[BNX2]: Fix tx race condition.
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
53
54 #include "bnx2.h"
55 #include "bnx2_fw.h"
56
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.43"
60 #define DRV_MODULE_RELDATE "June 28, 2006"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
66
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
86 BCM5708,
87 BCM5708S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92 char *name;
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118 { 0, }
119 };
120
121 static struct flash_spec flash_table[] =
122 {
123 /* Slow EEPROM */
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
206 };
207
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211 {
212 u32 diff;
213
214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219 }
220
221 static u32
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223 {
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226 }
227
228 static void
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230 {
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233 }
234
235 static void
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237 {
238 offset += cid_addr;
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
241 }
242
243 static int
244 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245 {
246 u32 val1;
247 int i, ret;
248
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256 udelay(40);
257 }
258
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264 for (i = 0; i < 50; i++) {
265 udelay(10);
266
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269 udelay(5);
270
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274 break;
275 }
276 }
277
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279 *val = 0x0;
280 ret = -EBUSY;
281 }
282 else {
283 *val = val1;
284 ret = 0;
285 }
286
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294 udelay(40);
295 }
296
297 return ret;
298 }
299
300 static int
301 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302 {
303 u32 val1;
304 int i, ret;
305
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313 udelay(40);
314 }
315
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
320
321 for (i = 0; i < 50; i++) {
322 udelay(10);
323
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326 udelay(5);
327 break;
328 }
329 }
330
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332 ret = -EBUSY;
333 else
334 ret = 0;
335
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343 udelay(40);
344 }
345
346 return ret;
347 }
348
349 static void
350 bnx2_disable_int(struct bnx2 *bp)
351 {
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355 }
356
357 static void
358 bnx2_enable_int(struct bnx2 *bp)
359 {
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
368 }
369
370 static void
371 bnx2_disable_int_sync(struct bnx2 *bp)
372 {
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
376 }
377
378 static void
379 bnx2_netif_stop(struct bnx2 *bp)
380 {
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386 }
387 }
388
389 static void
390 bnx2_netif_start(struct bnx2 *bp)
391 {
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
396 bnx2_enable_int(bp);
397 }
398 }
399 }
400
401 static void
402 bnx2_free_mem(struct bnx2 *bp)
403 {
404 int i;
405
406 if (bp->status_blk) {
407 pci_free_consistent(bp->pdev, bp->status_stats_size,
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
410 bp->stats_blk = NULL;
411 }
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
417 }
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_ring[i],
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
427 }
428 vfree(bp->rx_buf_ring);
429 bp->rx_buf_ring = NULL;
430 }
431
432 static int
433 bnx2_alloc_mem(struct bnx2 *bp)
434 {
435 int i, status_blk_size;
436
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 GFP_KERNEL);
439 if (bp->tx_buf_ring == NULL)
440 return -ENOMEM;
441
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
444 TX_DESC_CNT,
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
447 goto alloc_mem_err;
448
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 bp->rx_max_ring);
451 if (bp->rx_buf_ring == NULL)
452 goto alloc_mem_err;
453
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455 bp->rx_max_ring);
456
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
463 goto alloc_mem_err;
464
465 }
466
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
471
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
475 goto alloc_mem_err;
476
477 memset(bp->status_blk, 0, bp->status_stats_size);
478
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480 status_blk_size);
481
482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
483
484 return 0;
485
486 alloc_mem_err:
487 bnx2_free_mem(bp);
488 return -ENOMEM;
489 }
490
491 static void
492 bnx2_report_fw_link(struct bnx2 *bp)
493 {
494 u32 fw_link_status = 0;
495
496 if (bp->link_up) {
497 u32 bmsr;
498
499 switch (bp->line_speed) {
500 case SPEED_10:
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 else
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
505 break;
506 case SPEED_100:
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 else
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
511 break;
512 case SPEED_1000:
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 else
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517 break;
518 case SPEED_2500:
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 else
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 break;
524 }
525
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528 if (bp->autoneg) {
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 else
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 }
540 }
541 else
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545 }
546
547 static void
548 bnx2_report_link(struct bnx2 *bp)
549 {
550 if (bp->link_up) {
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554 printk("%d Mbps ", bp->line_speed);
555
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
558 else
559 printk("half duplex");
560
561 if (bp->flow_ctrl) {
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
566 }
567 else {
568 printk(", transmit ");
569 }
570 printk("flow control ON");
571 }
572 printk("\n");
573 }
574 else {
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577 }
578
579 bnx2_report_fw_link(bp);
580 }
581
582 static void
583 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584 {
585 u32 local_adv, remote_adv;
586
587 bp->flow_ctrl = 0;
588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
593 }
594 return;
595 }
596
597 if (bp->duplex != DUPLEX_FULL) {
598 return;
599 }
600
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603 u32 val;
604
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
610 return;
611 }
612
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
619
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
631 }
632
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 }
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
641 }
642 }
643 else {
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 }
647 }
648 }
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653 bp->flow_ctrl = FLOW_CTRL_TX;
654 }
655 }
656 }
657
658 static int
659 bnx2_5708s_linkup(struct bnx2 *bp)
660 {
661 u32 val;
662
663 bp->link_up = 1;
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
674 break;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
677 break;
678 }
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
681 else
682 bp->duplex = DUPLEX_HALF;
683
684 return 0;
685 }
686
687 static int
688 bnx2_5706s_linkup(struct bnx2 *bp)
689 {
690 u32 bmcr, local_adv, remote_adv, common;
691
692 bp->link_up = 1;
693 bp->line_speed = SPEED_1000;
694
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
698 }
699 else {
700 bp->duplex = DUPLEX_HALF;
701 }
702
703 if (!(bmcr & BMCR_ANENABLE)) {
704 return 0;
705 }
706
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
715 }
716 else {
717 bp->duplex = DUPLEX_HALF;
718 }
719 }
720
721 return 0;
722 }
723
724 static int
725 bnx2_copper_linkup(struct bnx2 *bp)
726 {
727 u32 bmcr;
728
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
732
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
740 }
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
744 }
745 else {
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
753 }
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
757 }
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
761 }
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
765 }
766 else {
767 bp->line_speed = 0;
768 bp->link_up = 0;
769 }
770 }
771 }
772 else {
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
775 }
776 else {
777 bp->line_speed = SPEED_10;
778 }
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
781 }
782 else {
783 bp->duplex = DUPLEX_HALF;
784 }
785 }
786
787 return 0;
788 }
789
790 static int
791 bnx2_set_mac_link(struct bnx2 *bp)
792 {
793 u32 val;
794
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799 }
800
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
803
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 BNX2_EMAC_MODE_25G);
807
808 if (bp->link_up) {
809 switch (bp->line_speed) {
810 case SPEED_10:
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
813 break;
814 }
815 /* fall through */
816 case SPEED_100:
817 val |= BNX2_EMAC_MODE_PORT_MII;
818 break;
819 case SPEED_2500:
820 val |= BNX2_EMAC_MODE_25G;
821 /* fall through */
822 case SPEED_1000:
823 val |= BNX2_EMAC_MODE_PORT_GMII;
824 break;
825 }
826 }
827 else {
828 val |= BNX2_EMAC_MODE_PORT_GMII;
829 }
830
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
835
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854 return 0;
855 }
856
857 static int
858 bnx2_set_link(struct bnx2 *bp)
859 {
860 u32 bmsr;
861 u8 link_up;
862
863 if (bp->loopback == MAC_LOOPBACK) {
864 bp->link_up = 1;
865 return 0;
866 }
867
868 link_up = bp->link_up;
869
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875 u32 val;
876
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
880 else
881 bmsr &= ~BMSR_LSTATUS;
882 }
883
884 if (bmsr & BMSR_LSTATUS) {
885 bp->link_up = 1;
886
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
892 }
893 else {
894 bnx2_copper_linkup(bp);
895 }
896 bnx2_resolve_flow_ctrl(bp);
897 }
898 else {
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
901
902 u32 bmcr;
903
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
905 if (!(bmcr & BMCR_ANENABLE)) {
906 bnx2_write_phy(bp, MII_BMCR, bmcr |
907 BMCR_ANENABLE);
908 }
909 }
910 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
911 bp->link_up = 0;
912 }
913
914 if (bp->link_up != link_up) {
915 bnx2_report_link(bp);
916 }
917
918 bnx2_set_mac_link(bp);
919
920 return 0;
921 }
922
923 static int
924 bnx2_reset_phy(struct bnx2 *bp)
925 {
926 int i;
927 u32 reg;
928
929 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
930
931 #define PHY_RESET_MAX_WAIT 100
932 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
933 udelay(10);
934
935 bnx2_read_phy(bp, MII_BMCR, &reg);
936 if (!(reg & BMCR_RESET)) {
937 udelay(20);
938 break;
939 }
940 }
941 if (i == PHY_RESET_MAX_WAIT) {
942 return -EBUSY;
943 }
944 return 0;
945 }
946
947 static u32
948 bnx2_phy_get_pause_adv(struct bnx2 *bp)
949 {
950 u32 adv = 0;
951
952 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
953 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
954
955 if (bp->phy_flags & PHY_SERDES_FLAG) {
956 adv = ADVERTISE_1000XPAUSE;
957 }
958 else {
959 adv = ADVERTISE_PAUSE_CAP;
960 }
961 }
962 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
963 if (bp->phy_flags & PHY_SERDES_FLAG) {
964 adv = ADVERTISE_1000XPSE_ASYM;
965 }
966 else {
967 adv = ADVERTISE_PAUSE_ASYM;
968 }
969 }
970 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
971 if (bp->phy_flags & PHY_SERDES_FLAG) {
972 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
973 }
974 else {
975 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
976 }
977 }
978 return adv;
979 }
980
981 static int
982 bnx2_setup_serdes_phy(struct bnx2 *bp)
983 {
984 u32 adv, bmcr, up1;
985 u32 new_adv = 0;
986
987 if (!(bp->autoneg & AUTONEG_SPEED)) {
988 u32 new_bmcr;
989 int force_link_down = 0;
990
991 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
992 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
993 if (up1 & BCM5708S_UP1_2G5) {
994 up1 &= ~BCM5708S_UP1_2G5;
995 bnx2_write_phy(bp, BCM5708S_UP1, up1);
996 force_link_down = 1;
997 }
998 }
999
1000 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1001 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1002
1003 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1004 new_bmcr = bmcr & ~BMCR_ANENABLE;
1005 new_bmcr |= BMCR_SPEED1000;
1006 if (bp->req_duplex == DUPLEX_FULL) {
1007 adv |= ADVERTISE_1000XFULL;
1008 new_bmcr |= BMCR_FULLDPLX;
1009 }
1010 else {
1011 adv |= ADVERTISE_1000XHALF;
1012 new_bmcr &= ~BMCR_FULLDPLX;
1013 }
1014 if ((new_bmcr != bmcr) || (force_link_down)) {
1015 /* Force a link down visible on the other side */
1016 if (bp->link_up) {
1017 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1018 ~(ADVERTISE_1000XFULL |
1019 ADVERTISE_1000XHALF));
1020 bnx2_write_phy(bp, MII_BMCR, bmcr |
1021 BMCR_ANRESTART | BMCR_ANENABLE);
1022
1023 bp->link_up = 0;
1024 netif_carrier_off(bp->dev);
1025 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1026 }
1027 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1028 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1029 }
1030 return 0;
1031 }
1032
1033 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1034 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1037 }
1038
1039 if (bp->advertising & ADVERTISED_1000baseT_Full)
1040 new_adv |= ADVERTISE_1000XFULL;
1041
1042 new_adv |= bnx2_phy_get_pause_adv(bp);
1043
1044 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1045 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1046
1047 bp->serdes_an_pending = 0;
1048 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1049 /* Force a link down visible on the other side */
1050 if (bp->link_up) {
1051 int i;
1052
1053 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1054 for (i = 0; i < 110; i++) {
1055 udelay(100);
1056 }
1057 }
1058
1059 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1060 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1061 BMCR_ANENABLE);
1062 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1063 /* Speed up link-up time when the link partner
1064 * does not autonegotiate which is very common
1065 * in blade servers. Some blade servers use
1066 * IPMI for kerboard input and it's important
1067 * to minimize link disruptions. Autoneg. involves
1068 * exchanging base pages plus 3 next pages and
1069 * normally completes in about 120 msec.
1070 */
1071 bp->current_interval = SERDES_AN_TIMEOUT;
1072 bp->serdes_an_pending = 1;
1073 mod_timer(&bp->timer, jiffies + bp->current_interval);
1074 }
1075 }
1076
1077 return 0;
1078 }
1079
1080 #define ETHTOOL_ALL_FIBRE_SPEED \
1081 (ADVERTISED_1000baseT_Full)
1082
1083 #define ETHTOOL_ALL_COPPER_SPEED \
1084 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1085 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1086 ADVERTISED_1000baseT_Full)
1087
1088 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1089 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1090
1091 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1092
1093 static int
1094 bnx2_setup_copper_phy(struct bnx2 *bp)
1095 {
1096 u32 bmcr;
1097 u32 new_bmcr;
1098
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101 if (bp->autoneg & AUTONEG_SPEED) {
1102 u32 adv_reg, adv1000_reg;
1103 u32 new_adv_reg = 0;
1104 u32 new_adv1000_reg = 0;
1105
1106 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1107 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1108 ADVERTISE_PAUSE_ASYM);
1109
1110 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1111 adv1000_reg &= PHY_ALL_1000_SPEED;
1112
1113 if (bp->advertising & ADVERTISED_10baseT_Half)
1114 new_adv_reg |= ADVERTISE_10HALF;
1115 if (bp->advertising & ADVERTISED_10baseT_Full)
1116 new_adv_reg |= ADVERTISE_10FULL;
1117 if (bp->advertising & ADVERTISED_100baseT_Half)
1118 new_adv_reg |= ADVERTISE_100HALF;
1119 if (bp->advertising & ADVERTISED_100baseT_Full)
1120 new_adv_reg |= ADVERTISE_100FULL;
1121 if (bp->advertising & ADVERTISED_1000baseT_Full)
1122 new_adv1000_reg |= ADVERTISE_1000FULL;
1123
1124 new_adv_reg |= ADVERTISE_CSMA;
1125
1126 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1127
1128 if ((adv1000_reg != new_adv1000_reg) ||
1129 (adv_reg != new_adv_reg) ||
1130 ((bmcr & BMCR_ANENABLE) == 0)) {
1131
1132 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1133 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1134 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1135 BMCR_ANENABLE);
1136 }
1137 else if (bp->link_up) {
1138 /* Flow ctrl may have changed from auto to forced */
1139 /* or vice-versa. */
1140
1141 bnx2_resolve_flow_ctrl(bp);
1142 bnx2_set_mac_link(bp);
1143 }
1144 return 0;
1145 }
1146
1147 new_bmcr = 0;
1148 if (bp->req_line_speed == SPEED_100) {
1149 new_bmcr |= BMCR_SPEED100;
1150 }
1151 if (bp->req_duplex == DUPLEX_FULL) {
1152 new_bmcr |= BMCR_FULLDPLX;
1153 }
1154 if (new_bmcr != bmcr) {
1155 u32 bmsr;
1156 int i = 0;
1157
1158 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1159 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1160
1161 if (bmsr & BMSR_LSTATUS) {
1162 /* Force link down */
1163 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1164 do {
1165 udelay(100);
1166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1168 i++;
1169 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1170 }
1171
1172 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1173
1174 /* Normally, the new speed is setup after the link has
1175 * gone down and up again. In some cases, link will not go
1176 * down so we need to set up the new speed here.
1177 */
1178 if (bmsr & BMSR_LSTATUS) {
1179 bp->line_speed = bp->req_line_speed;
1180 bp->duplex = bp->req_duplex;
1181 bnx2_resolve_flow_ctrl(bp);
1182 bnx2_set_mac_link(bp);
1183 }
1184 }
1185 return 0;
1186 }
1187
1188 static int
1189 bnx2_setup_phy(struct bnx2 *bp)
1190 {
1191 if (bp->loopback == MAC_LOOPBACK)
1192 return 0;
1193
1194 if (bp->phy_flags & PHY_SERDES_FLAG) {
1195 return (bnx2_setup_serdes_phy(bp));
1196 }
1197 else {
1198 return (bnx2_setup_copper_phy(bp));
1199 }
1200 }
1201
1202 static int
1203 bnx2_init_5708s_phy(struct bnx2 *bp)
1204 {
1205 u32 val;
1206
1207 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1208 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1210
1211 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1212 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1213 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1214
1215 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1216 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1217 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1218
1219 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1220 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1221 val |= BCM5708S_UP1_2G5;
1222 bnx2_write_phy(bp, BCM5708S_UP1, val);
1223 }
1224
1225 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1226 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1227 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1228 /* increase tx signal amplitude */
1229 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1230 BCM5708S_BLK_ADDR_TX_MISC);
1231 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1232 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1233 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1235 }
1236
1237 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1238 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1239
1240 if (val) {
1241 u32 is_backplane;
1242
1243 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1244 BNX2_SHARED_HW_CFG_CONFIG);
1245 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1247 BCM5708S_BLK_ADDR_TX_MISC);
1248 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1250 BCM5708S_BLK_ADDR_DIG);
1251 }
1252 }
1253 return 0;
1254 }
1255
1256 static int
1257 bnx2_init_5706s_phy(struct bnx2 *bp)
1258 {
1259 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1260
1261 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1262 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1263 }
1264
1265 if (bp->dev->mtu > 1500) {
1266 u32 val;
1267
1268 /* Set extended packet length bit */
1269 bnx2_write_phy(bp, 0x18, 0x7);
1270 bnx2_read_phy(bp, 0x18, &val);
1271 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1272
1273 bnx2_write_phy(bp, 0x1c, 0x6c00);
1274 bnx2_read_phy(bp, 0x1c, &val);
1275 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1276 }
1277 else {
1278 u32 val;
1279
1280 bnx2_write_phy(bp, 0x18, 0x7);
1281 bnx2_read_phy(bp, 0x18, &val);
1282 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1283
1284 bnx2_write_phy(bp, 0x1c, 0x6c00);
1285 bnx2_read_phy(bp, 0x1c, &val);
1286 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1287 }
1288
1289 return 0;
1290 }
1291
1292 static int
1293 bnx2_init_copper_phy(struct bnx2 *bp)
1294 {
1295 u32 val;
1296
1297 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1298
1299 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1300 bnx2_write_phy(bp, 0x18, 0x0c00);
1301 bnx2_write_phy(bp, 0x17, 0x000a);
1302 bnx2_write_phy(bp, 0x15, 0x310b);
1303 bnx2_write_phy(bp, 0x17, 0x201f);
1304 bnx2_write_phy(bp, 0x15, 0x9506);
1305 bnx2_write_phy(bp, 0x17, 0x401f);
1306 bnx2_write_phy(bp, 0x15, 0x14e2);
1307 bnx2_write_phy(bp, 0x18, 0x0400);
1308 }
1309
1310 if (bp->dev->mtu > 1500) {
1311 /* Set extended packet length bit */
1312 bnx2_write_phy(bp, 0x18, 0x7);
1313 bnx2_read_phy(bp, 0x18, &val);
1314 bnx2_write_phy(bp, 0x18, val | 0x4000);
1315
1316 bnx2_read_phy(bp, 0x10, &val);
1317 bnx2_write_phy(bp, 0x10, val | 0x1);
1318 }
1319 else {
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1323
1324 bnx2_read_phy(bp, 0x10, &val);
1325 bnx2_write_phy(bp, 0x10, val & ~0x1);
1326 }
1327
1328 /* ethernet@wirespeed */
1329 bnx2_write_phy(bp, 0x18, 0x7007);
1330 bnx2_read_phy(bp, 0x18, &val);
1331 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1332 return 0;
1333 }
1334
1335
1336 static int
1337 bnx2_init_phy(struct bnx2 *bp)
1338 {
1339 u32 val;
1340 int rc = 0;
1341
1342 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1343 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1344
1345 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1346
1347 bnx2_reset_phy(bp);
1348
1349 bnx2_read_phy(bp, MII_PHYSID1, &val);
1350 bp->phy_id = val << 16;
1351 bnx2_read_phy(bp, MII_PHYSID2, &val);
1352 bp->phy_id |= val & 0xffff;
1353
1354 if (bp->phy_flags & PHY_SERDES_FLAG) {
1355 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1356 rc = bnx2_init_5706s_phy(bp);
1357 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1358 rc = bnx2_init_5708s_phy(bp);
1359 }
1360 else {
1361 rc = bnx2_init_copper_phy(bp);
1362 }
1363
1364 bnx2_setup_phy(bp);
1365
1366 return rc;
1367 }
1368
1369 static int
1370 bnx2_set_mac_loopback(struct bnx2 *bp)
1371 {
1372 u32 mac_mode;
1373
1374 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1375 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1376 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1377 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1378 bp->link_up = 1;
1379 return 0;
1380 }
1381
1382 static int bnx2_test_link(struct bnx2 *);
1383
1384 static int
1385 bnx2_set_phy_loopback(struct bnx2 *bp)
1386 {
1387 u32 mac_mode;
1388 int rc, i;
1389
1390 spin_lock_bh(&bp->phy_lock);
1391 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1392 BMCR_SPEED1000);
1393 spin_unlock_bh(&bp->phy_lock);
1394 if (rc)
1395 return rc;
1396
1397 for (i = 0; i < 10; i++) {
1398 if (bnx2_test_link(bp) == 0)
1399 break;
1400 udelay(10);
1401 }
1402
1403 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1404 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1405 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1406 BNX2_EMAC_MODE_25G);
1407
1408 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1409 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1410 bp->link_up = 1;
1411 return 0;
1412 }
1413
1414 static int
1415 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1416 {
1417 int i;
1418 u32 val;
1419
1420 bp->fw_wr_seq++;
1421 msg_data |= bp->fw_wr_seq;
1422
1423 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1424
1425 /* wait for an acknowledgement. */
1426 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1427 msleep(10);
1428
1429 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1430
1431 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1432 break;
1433 }
1434 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1435 return 0;
1436
1437 /* If we timed out, inform the firmware that this is the case. */
1438 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1439 if (!silent)
1440 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1441 "%x\n", msg_data);
1442
1443 msg_data &= ~BNX2_DRV_MSG_CODE;
1444 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1445
1446 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1447
1448 return -EBUSY;
1449 }
1450
1451 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1452 return -EIO;
1453
1454 return 0;
1455 }
1456
1457 static void
1458 bnx2_init_context(struct bnx2 *bp)
1459 {
1460 u32 vcid;
1461
1462 vcid = 96;
1463 while (vcid) {
1464 u32 vcid_addr, pcid_addr, offset;
1465
1466 vcid--;
1467
1468 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1469 u32 new_vcid;
1470
1471 vcid_addr = GET_PCID_ADDR(vcid);
1472 if (vcid & 0x8) {
1473 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1474 }
1475 else {
1476 new_vcid = vcid;
1477 }
1478 pcid_addr = GET_PCID_ADDR(new_vcid);
1479 }
1480 else {
1481 vcid_addr = GET_CID_ADDR(vcid);
1482 pcid_addr = vcid_addr;
1483 }
1484
1485 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1486 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1487
1488 /* Zero out the context. */
1489 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1490 CTX_WR(bp, 0x00, offset, 0);
1491 }
1492
1493 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1494 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1495 }
1496 }
1497
1498 static int
1499 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1500 {
1501 u16 *good_mbuf;
1502 u32 good_mbuf_cnt;
1503 u32 val;
1504
1505 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1506 if (good_mbuf == NULL) {
1507 printk(KERN_ERR PFX "Failed to allocate memory in "
1508 "bnx2_alloc_bad_rbuf\n");
1509 return -ENOMEM;
1510 }
1511
1512 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1513 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1514
1515 good_mbuf_cnt = 0;
1516
1517 /* Allocate a bunch of mbufs and save the good ones in an array. */
1518 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1519 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1520 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1521
1522 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1523
1524 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1525
1526 /* The addresses with Bit 9 set are bad memory blocks. */
1527 if (!(val & (1 << 9))) {
1528 good_mbuf[good_mbuf_cnt] = (u16) val;
1529 good_mbuf_cnt++;
1530 }
1531
1532 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1533 }
1534
1535 /* Free the good ones back to the mbuf pool thus discarding
1536 * all the bad ones. */
1537 while (good_mbuf_cnt) {
1538 good_mbuf_cnt--;
1539
1540 val = good_mbuf[good_mbuf_cnt];
1541 val = (val << 9) | val | 1;
1542
1543 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1544 }
1545 kfree(good_mbuf);
1546 return 0;
1547 }
1548
1549 static void
1550 bnx2_set_mac_addr(struct bnx2 *bp)
1551 {
1552 u32 val;
1553 u8 *mac_addr = bp->dev->dev_addr;
1554
1555 val = (mac_addr[0] << 8) | mac_addr[1];
1556
1557 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1558
1559 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1560 (mac_addr[4] << 8) | mac_addr[5];
1561
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1563 }
1564
1565 static inline int
1566 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1567 {
1568 struct sk_buff *skb;
1569 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1570 dma_addr_t mapping;
1571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1572 unsigned long align;
1573
1574 skb = dev_alloc_skb(bp->rx_buf_size);
1575 if (skb == NULL) {
1576 return -ENOMEM;
1577 }
1578
1579 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1580 skb_reserve(skb, 8 - align);
1581 }
1582
1583 skb->dev = bp->dev;
1584 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1585 PCI_DMA_FROMDEVICE);
1586
1587 rx_buf->skb = skb;
1588 pci_unmap_addr_set(rx_buf, mapping, mapping);
1589
1590 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1591 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1592
1593 bp->rx_prod_bseq += bp->rx_buf_use_size;
1594
1595 return 0;
1596 }
1597
1598 static void
1599 bnx2_phy_int(struct bnx2 *bp)
1600 {
1601 u32 new_link_state, old_link_state;
1602
1603 new_link_state = bp->status_blk->status_attn_bits &
1604 STATUS_ATTN_BITS_LINK_STATE;
1605 old_link_state = bp->status_blk->status_attn_bits_ack &
1606 STATUS_ATTN_BITS_LINK_STATE;
1607 if (new_link_state != old_link_state) {
1608 if (new_link_state) {
1609 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1610 STATUS_ATTN_BITS_LINK_STATE);
1611 }
1612 else {
1613 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1614 STATUS_ATTN_BITS_LINK_STATE);
1615 }
1616 bnx2_set_link(bp);
1617 }
1618 }
1619
1620 static void
1621 bnx2_tx_int(struct bnx2 *bp)
1622 {
1623 struct status_block *sblk = bp->status_blk;
1624 u16 hw_cons, sw_cons, sw_ring_cons;
1625 int tx_free_bd = 0;
1626
1627 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1628 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1629 hw_cons++;
1630 }
1631 sw_cons = bp->tx_cons;
1632
1633 while (sw_cons != hw_cons) {
1634 struct sw_bd *tx_buf;
1635 struct sk_buff *skb;
1636 int i, last;
1637
1638 sw_ring_cons = TX_RING_IDX(sw_cons);
1639
1640 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1641 skb = tx_buf->skb;
1642 #ifdef BCM_TSO
1643 /* partial BD completions possible with TSO packets */
1644 if (skb_is_gso(skb)) {
1645 u16 last_idx, last_ring_idx;
1646
1647 last_idx = sw_cons +
1648 skb_shinfo(skb)->nr_frags + 1;
1649 last_ring_idx = sw_ring_cons +
1650 skb_shinfo(skb)->nr_frags + 1;
1651 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1652 last_idx++;
1653 }
1654 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1655 break;
1656 }
1657 }
1658 #endif
1659 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1660 skb_headlen(skb), PCI_DMA_TODEVICE);
1661
1662 tx_buf->skb = NULL;
1663 last = skb_shinfo(skb)->nr_frags;
1664
1665 for (i = 0; i < last; i++) {
1666 sw_cons = NEXT_TX_BD(sw_cons);
1667
1668 pci_unmap_page(bp->pdev,
1669 pci_unmap_addr(
1670 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1671 mapping),
1672 skb_shinfo(skb)->frags[i].size,
1673 PCI_DMA_TODEVICE);
1674 }
1675
1676 sw_cons = NEXT_TX_BD(sw_cons);
1677
1678 tx_free_bd += last + 1;
1679
1680 dev_kfree_skb(skb);
1681
1682 hw_cons = bp->hw_tx_cons =
1683 sblk->status_tx_quick_consumer_index0;
1684
1685 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1686 hw_cons++;
1687 }
1688 }
1689
1690 bp->tx_cons = sw_cons;
1691 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1692 * before checking for netif_queue_stopped(). Without the
1693 * memory barrier, there is a small possibility that bnx2_start_xmit()
1694 * will miss it and cause the queue to be stopped forever.
1695 */
1696 smp_mb();
1697
1698 if (unlikely(netif_queue_stopped(bp->dev)) &&
1699 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1700 netif_tx_lock(bp->dev);
1701 if ((netif_queue_stopped(bp->dev)) &&
1702 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1703 netif_wake_queue(bp->dev);
1704 netif_tx_unlock(bp->dev);
1705 }
1706 }
1707
1708 static inline void
1709 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1710 u16 cons, u16 prod)
1711 {
1712 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1713 struct rx_bd *cons_bd, *prod_bd;
1714
1715 cons_rx_buf = &bp->rx_buf_ring[cons];
1716 prod_rx_buf = &bp->rx_buf_ring[prod];
1717
1718 pci_dma_sync_single_for_device(bp->pdev,
1719 pci_unmap_addr(cons_rx_buf, mapping),
1720 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1721
1722 bp->rx_prod_bseq += bp->rx_buf_use_size;
1723
1724 prod_rx_buf->skb = skb;
1725
1726 if (cons == prod)
1727 return;
1728
1729 pci_unmap_addr_set(prod_rx_buf, mapping,
1730 pci_unmap_addr(cons_rx_buf, mapping));
1731
1732 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1733 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1734 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1735 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1736 }
1737
1738 static int
1739 bnx2_rx_int(struct bnx2 *bp, int budget)
1740 {
1741 struct status_block *sblk = bp->status_blk;
1742 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1743 struct l2_fhdr *rx_hdr;
1744 int rx_pkt = 0;
1745
1746 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1747 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1748 hw_cons++;
1749 }
1750 sw_cons = bp->rx_cons;
1751 sw_prod = bp->rx_prod;
1752
1753 /* Memory barrier necessary as speculative reads of the rx
1754 * buffer can be ahead of the index in the status block
1755 */
1756 rmb();
1757 while (sw_cons != hw_cons) {
1758 unsigned int len;
1759 u32 status;
1760 struct sw_bd *rx_buf;
1761 struct sk_buff *skb;
1762 dma_addr_t dma_addr;
1763
1764 sw_ring_cons = RX_RING_IDX(sw_cons);
1765 sw_ring_prod = RX_RING_IDX(sw_prod);
1766
1767 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1768 skb = rx_buf->skb;
1769
1770 rx_buf->skb = NULL;
1771
1772 dma_addr = pci_unmap_addr(rx_buf, mapping);
1773
1774 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1775 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1776
1777 rx_hdr = (struct l2_fhdr *) skb->data;
1778 len = rx_hdr->l2_fhdr_pkt_len - 4;
1779
1780 if ((status = rx_hdr->l2_fhdr_status) &
1781 (L2_FHDR_ERRORS_BAD_CRC |
1782 L2_FHDR_ERRORS_PHY_DECODE |
1783 L2_FHDR_ERRORS_ALIGNMENT |
1784 L2_FHDR_ERRORS_TOO_SHORT |
1785 L2_FHDR_ERRORS_GIANT_FRAME)) {
1786
1787 goto reuse_rx;
1788 }
1789
1790 /* Since we don't have a jumbo ring, copy small packets
1791 * if mtu > 1500
1792 */
1793 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1794 struct sk_buff *new_skb;
1795
1796 new_skb = dev_alloc_skb(len + 2);
1797 if (new_skb == NULL)
1798 goto reuse_rx;
1799
1800 /* aligned copy */
1801 memcpy(new_skb->data,
1802 skb->data + bp->rx_offset - 2,
1803 len + 2);
1804
1805 skb_reserve(new_skb, 2);
1806 skb_put(new_skb, len);
1807 new_skb->dev = bp->dev;
1808
1809 bnx2_reuse_rx_skb(bp, skb,
1810 sw_ring_cons, sw_ring_prod);
1811
1812 skb = new_skb;
1813 }
1814 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1815 pci_unmap_single(bp->pdev, dma_addr,
1816 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1817
1818 skb_reserve(skb, bp->rx_offset);
1819 skb_put(skb, len);
1820 }
1821 else {
1822 reuse_rx:
1823 bnx2_reuse_rx_skb(bp, skb,
1824 sw_ring_cons, sw_ring_prod);
1825 goto next_rx;
1826 }
1827
1828 skb->protocol = eth_type_trans(skb, bp->dev);
1829
1830 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1831 (ntohs(skb->protocol) != 0x8100)) {
1832
1833 dev_kfree_skb(skb);
1834 goto next_rx;
1835
1836 }
1837
1838 skb->ip_summed = CHECKSUM_NONE;
1839 if (bp->rx_csum &&
1840 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1841 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1842
1843 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1844 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1845 skb->ip_summed = CHECKSUM_UNNECESSARY;
1846 }
1847
1848 #ifdef BCM_VLAN
1849 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1850 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1851 rx_hdr->l2_fhdr_vlan_tag);
1852 }
1853 else
1854 #endif
1855 netif_receive_skb(skb);
1856
1857 bp->dev->last_rx = jiffies;
1858 rx_pkt++;
1859
1860 next_rx:
1861 sw_cons = NEXT_RX_BD(sw_cons);
1862 sw_prod = NEXT_RX_BD(sw_prod);
1863
1864 if ((rx_pkt == budget))
1865 break;
1866
1867 /* Refresh hw_cons to see if there is new work */
1868 if (sw_cons == hw_cons) {
1869 hw_cons = bp->hw_rx_cons =
1870 sblk->status_rx_quick_consumer_index0;
1871 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1872 hw_cons++;
1873 rmb();
1874 }
1875 }
1876 bp->rx_cons = sw_cons;
1877 bp->rx_prod = sw_prod;
1878
1879 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1880
1881 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1882
1883 mmiowb();
1884
1885 return rx_pkt;
1886
1887 }
1888
1889 /* MSI ISR - The only difference between this and the INTx ISR
1890 * is that the MSI interrupt is always serviced.
1891 */
1892 static irqreturn_t
1893 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1894 {
1895 struct net_device *dev = dev_instance;
1896 struct bnx2 *bp = netdev_priv(dev);
1897
1898 prefetch(bp->status_blk);
1899 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1900 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1901 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1902
1903 /* Return here if interrupt is disabled. */
1904 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1905 return IRQ_HANDLED;
1906
1907 netif_rx_schedule(dev);
1908
1909 return IRQ_HANDLED;
1910 }
1911
1912 static irqreturn_t
1913 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1914 {
1915 struct net_device *dev = dev_instance;
1916 struct bnx2 *bp = netdev_priv(dev);
1917
1918 /* When using INTx, it is possible for the interrupt to arrive
1919 * at the CPU before the status block posted prior to the
1920 * interrupt. Reading a register will flush the status block.
1921 * When using MSI, the MSI message will always complete after
1922 * the status block write.
1923 */
1924 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1925 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1926 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1927 return IRQ_NONE;
1928
1929 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1930 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1931 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1932
1933 /* Return here if interrupt is shared and is disabled. */
1934 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1935 return IRQ_HANDLED;
1936
1937 netif_rx_schedule(dev);
1938
1939 return IRQ_HANDLED;
1940 }
1941
1942 static inline int
1943 bnx2_has_work(struct bnx2 *bp)
1944 {
1945 struct status_block *sblk = bp->status_blk;
1946
1947 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1948 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1949 return 1;
1950
1951 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1952 bp->link_up)
1953 return 1;
1954
1955 return 0;
1956 }
1957
1958 static int
1959 bnx2_poll(struct net_device *dev, int *budget)
1960 {
1961 struct bnx2 *bp = netdev_priv(dev);
1962
1963 if ((bp->status_blk->status_attn_bits &
1964 STATUS_ATTN_BITS_LINK_STATE) !=
1965 (bp->status_blk->status_attn_bits_ack &
1966 STATUS_ATTN_BITS_LINK_STATE)) {
1967
1968 spin_lock(&bp->phy_lock);
1969 bnx2_phy_int(bp);
1970 spin_unlock(&bp->phy_lock);
1971
1972 /* This is needed to take care of transient status
1973 * during link changes.
1974 */
1975 REG_WR(bp, BNX2_HC_COMMAND,
1976 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1977 REG_RD(bp, BNX2_HC_COMMAND);
1978 }
1979
1980 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1981 bnx2_tx_int(bp);
1982
1983 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1984 int orig_budget = *budget;
1985 int work_done;
1986
1987 if (orig_budget > dev->quota)
1988 orig_budget = dev->quota;
1989
1990 work_done = bnx2_rx_int(bp, orig_budget);
1991 *budget -= work_done;
1992 dev->quota -= work_done;
1993 }
1994
1995 bp->last_status_idx = bp->status_blk->status_idx;
1996 rmb();
1997
1998 if (!bnx2_has_work(bp)) {
1999 netif_rx_complete(dev);
2000 if (likely(bp->flags & USING_MSI_FLAG)) {
2001 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2002 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2003 bp->last_status_idx);
2004 return 0;
2005 }
2006 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2007 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2008 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2009 bp->last_status_idx);
2010
2011 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2012 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2013 bp->last_status_idx);
2014 return 0;
2015 }
2016
2017 return 1;
2018 }
2019
2020 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2021 * from set_multicast.
2022 */
2023 static void
2024 bnx2_set_rx_mode(struct net_device *dev)
2025 {
2026 struct bnx2 *bp = netdev_priv(dev);
2027 u32 rx_mode, sort_mode;
2028 int i;
2029
2030 spin_lock_bh(&bp->phy_lock);
2031
2032 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2033 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2034 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2035 #ifdef BCM_VLAN
2036 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2037 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2038 #else
2039 if (!(bp->flags & ASF_ENABLE_FLAG))
2040 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2041 #endif
2042 if (dev->flags & IFF_PROMISC) {
2043 /* Promiscuous mode. */
2044 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2045 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2046 }
2047 else if (dev->flags & IFF_ALLMULTI) {
2048 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2049 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2050 0xffffffff);
2051 }
2052 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2053 }
2054 else {
2055 /* Accept one or more multicast(s). */
2056 struct dev_mc_list *mclist;
2057 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2058 u32 regidx;
2059 u32 bit;
2060 u32 crc;
2061
2062 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2063
2064 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2065 i++, mclist = mclist->next) {
2066
2067 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2068 bit = crc & 0xff;
2069 regidx = (bit & 0xe0) >> 5;
2070 bit &= 0x1f;
2071 mc_filter[regidx] |= (1 << bit);
2072 }
2073
2074 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2075 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2076 mc_filter[i]);
2077 }
2078
2079 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2080 }
2081
2082 if (rx_mode != bp->rx_mode) {
2083 bp->rx_mode = rx_mode;
2084 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2085 }
2086
2087 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2088 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2089 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2090
2091 spin_unlock_bh(&bp->phy_lock);
2092 }
2093
2094 #define FW_BUF_SIZE 0x8000
2095
2096 static int
2097 bnx2_gunzip_init(struct bnx2 *bp)
2098 {
2099 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2100 goto gunzip_nomem1;
2101
2102 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2103 goto gunzip_nomem2;
2104
2105 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2106 if (bp->strm->workspace == NULL)
2107 goto gunzip_nomem3;
2108
2109 return 0;
2110
2111 gunzip_nomem3:
2112 kfree(bp->strm);
2113 bp->strm = NULL;
2114
2115 gunzip_nomem2:
2116 vfree(bp->gunzip_buf);
2117 bp->gunzip_buf = NULL;
2118
2119 gunzip_nomem1:
2120 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2121 "uncompression.\n", bp->dev->name);
2122 return -ENOMEM;
2123 }
2124
2125 static void
2126 bnx2_gunzip_end(struct bnx2 *bp)
2127 {
2128 kfree(bp->strm->workspace);
2129
2130 kfree(bp->strm);
2131 bp->strm = NULL;
2132
2133 if (bp->gunzip_buf) {
2134 vfree(bp->gunzip_buf);
2135 bp->gunzip_buf = NULL;
2136 }
2137 }
2138
2139 static int
2140 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2141 {
2142 int n, rc;
2143
2144 /* check gzip header */
2145 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2146 return -EINVAL;
2147
2148 n = 10;
2149
2150 #define FNAME 0x8
2151 if (zbuf[3] & FNAME)
2152 while ((zbuf[n++] != 0) && (n < len));
2153
2154 bp->strm->next_in = zbuf + n;
2155 bp->strm->avail_in = len - n;
2156 bp->strm->next_out = bp->gunzip_buf;
2157 bp->strm->avail_out = FW_BUF_SIZE;
2158
2159 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2160 if (rc != Z_OK)
2161 return rc;
2162
2163 rc = zlib_inflate(bp->strm, Z_FINISH);
2164
2165 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2166 *outbuf = bp->gunzip_buf;
2167
2168 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2169 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2170 bp->dev->name, bp->strm->msg);
2171
2172 zlib_inflateEnd(bp->strm);
2173
2174 if (rc == Z_STREAM_END)
2175 return 0;
2176
2177 return rc;
2178 }
2179
2180 static void
2181 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2182 u32 rv2p_proc)
2183 {
2184 int i;
2185 u32 val;
2186
2187
2188 for (i = 0; i < rv2p_code_len; i += 8) {
2189 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2190 rv2p_code++;
2191 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2192 rv2p_code++;
2193
2194 if (rv2p_proc == RV2P_PROC1) {
2195 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2196 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2197 }
2198 else {
2199 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2200 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2201 }
2202 }
2203
2204 /* Reset the processor, un-stall is done later. */
2205 if (rv2p_proc == RV2P_PROC1) {
2206 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2207 }
2208 else {
2209 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2210 }
2211 }
2212
2213 static void
2214 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2215 {
2216 u32 offset;
2217 u32 val;
2218
2219 /* Halt the CPU. */
2220 val = REG_RD_IND(bp, cpu_reg->mode);
2221 val |= cpu_reg->mode_value_halt;
2222 REG_WR_IND(bp, cpu_reg->mode, val);
2223 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2224
2225 /* Load the Text area. */
2226 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2227 if (fw->text) {
2228 int j;
2229
2230 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2231 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2232 }
2233 }
2234
2235 /* Load the Data area. */
2236 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2237 if (fw->data) {
2238 int j;
2239
2240 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2241 REG_WR_IND(bp, offset, fw->data[j]);
2242 }
2243 }
2244
2245 /* Load the SBSS area. */
2246 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2247 if (fw->sbss) {
2248 int j;
2249
2250 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2251 REG_WR_IND(bp, offset, fw->sbss[j]);
2252 }
2253 }
2254
2255 /* Load the BSS area. */
2256 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2257 if (fw->bss) {
2258 int j;
2259
2260 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2261 REG_WR_IND(bp, offset, fw->bss[j]);
2262 }
2263 }
2264
2265 /* Load the Read-Only area. */
2266 offset = cpu_reg->spad_base +
2267 (fw->rodata_addr - cpu_reg->mips_view_base);
2268 if (fw->rodata) {
2269 int j;
2270
2271 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2272 REG_WR_IND(bp, offset, fw->rodata[j]);
2273 }
2274 }
2275
2276 /* Clear the pre-fetch instruction. */
2277 REG_WR_IND(bp, cpu_reg->inst, 0);
2278 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2279
2280 /* Start the CPU. */
2281 val = REG_RD_IND(bp, cpu_reg->mode);
2282 val &= ~cpu_reg->mode_value_halt;
2283 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2284 REG_WR_IND(bp, cpu_reg->mode, val);
2285 }
2286
2287 static int
2288 bnx2_init_cpus(struct bnx2 *bp)
2289 {
2290 struct cpu_reg cpu_reg;
2291 struct fw_info fw;
2292 int rc = 0;
2293 void *text;
2294 u32 text_len;
2295
2296 if ((rc = bnx2_gunzip_init(bp)) != 0)
2297 return rc;
2298
2299 /* Initialize the RV2P processor. */
2300 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2301 &text_len);
2302 if (rc)
2303 goto init_cpu_err;
2304
2305 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2306
2307 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2308 &text_len);
2309 if (rc)
2310 goto init_cpu_err;
2311
2312 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2313
2314 /* Initialize the RX Processor. */
2315 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2316 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2317 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2318 cpu_reg.state = BNX2_RXP_CPU_STATE;
2319 cpu_reg.state_value_clear = 0xffffff;
2320 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2321 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2322 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2323 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2324 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2325 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2326 cpu_reg.mips_view_base = 0x8000000;
2327
2328 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2329 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2330 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2331 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2332
2333 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2334 fw.text_len = bnx2_RXP_b06FwTextLen;
2335 fw.text_index = 0;
2336
2337 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2338 &text, &text_len);
2339 if (rc)
2340 goto init_cpu_err;
2341
2342 fw.text = text;
2343
2344 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2345 fw.data_len = bnx2_RXP_b06FwDataLen;
2346 fw.data_index = 0;
2347 fw.data = bnx2_RXP_b06FwData;
2348
2349 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2350 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2351 fw.sbss_index = 0;
2352 fw.sbss = bnx2_RXP_b06FwSbss;
2353
2354 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2355 fw.bss_len = bnx2_RXP_b06FwBssLen;
2356 fw.bss_index = 0;
2357 fw.bss = bnx2_RXP_b06FwBss;
2358
2359 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2360 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2361 fw.rodata_index = 0;
2362 fw.rodata = bnx2_RXP_b06FwRodata;
2363
2364 load_cpu_fw(bp, &cpu_reg, &fw);
2365
2366 /* Initialize the TX Processor. */
2367 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2368 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2369 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2370 cpu_reg.state = BNX2_TXP_CPU_STATE;
2371 cpu_reg.state_value_clear = 0xffffff;
2372 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2373 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2374 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2375 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2376 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2377 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2378 cpu_reg.mips_view_base = 0x8000000;
2379
2380 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2381 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2382 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2383 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2384
2385 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2386 fw.text_len = bnx2_TXP_b06FwTextLen;
2387 fw.text_index = 0;
2388
2389 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2390 &text, &text_len);
2391 if (rc)
2392 goto init_cpu_err;
2393
2394 fw.text = text;
2395
2396 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2397 fw.data_len = bnx2_TXP_b06FwDataLen;
2398 fw.data_index = 0;
2399 fw.data = bnx2_TXP_b06FwData;
2400
2401 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2402 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2403 fw.sbss_index = 0;
2404 fw.sbss = bnx2_TXP_b06FwSbss;
2405
2406 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2407 fw.bss_len = bnx2_TXP_b06FwBssLen;
2408 fw.bss_index = 0;
2409 fw.bss = bnx2_TXP_b06FwBss;
2410
2411 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2412 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2413 fw.rodata_index = 0;
2414 fw.rodata = bnx2_TXP_b06FwRodata;
2415
2416 load_cpu_fw(bp, &cpu_reg, &fw);
2417
2418 /* Initialize the TX Patch-up Processor. */
2419 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2420 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2421 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2422 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2423 cpu_reg.state_value_clear = 0xffffff;
2424 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2425 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2426 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2427 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2428 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2429 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2430 cpu_reg.mips_view_base = 0x8000000;
2431
2432 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2433 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2434 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2435 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2436
2437 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2438 fw.text_len = bnx2_TPAT_b06FwTextLen;
2439 fw.text_index = 0;
2440
2441 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2442 &text, &text_len);
2443 if (rc)
2444 goto init_cpu_err;
2445
2446 fw.text = text;
2447
2448 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2449 fw.data_len = bnx2_TPAT_b06FwDataLen;
2450 fw.data_index = 0;
2451 fw.data = bnx2_TPAT_b06FwData;
2452
2453 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2454 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2455 fw.sbss_index = 0;
2456 fw.sbss = bnx2_TPAT_b06FwSbss;
2457
2458 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2459 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2460 fw.bss_index = 0;
2461 fw.bss = bnx2_TPAT_b06FwBss;
2462
2463 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2464 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2465 fw.rodata_index = 0;
2466 fw.rodata = bnx2_TPAT_b06FwRodata;
2467
2468 load_cpu_fw(bp, &cpu_reg, &fw);
2469
2470 /* Initialize the Completion Processor. */
2471 cpu_reg.mode = BNX2_COM_CPU_MODE;
2472 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2473 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2474 cpu_reg.state = BNX2_COM_CPU_STATE;
2475 cpu_reg.state_value_clear = 0xffffff;
2476 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2477 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2478 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2479 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2480 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2481 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2482 cpu_reg.mips_view_base = 0x8000000;
2483
2484 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2485 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2486 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2487 fw.start_addr = bnx2_COM_b06FwStartAddr;
2488
2489 fw.text_addr = bnx2_COM_b06FwTextAddr;
2490 fw.text_len = bnx2_COM_b06FwTextLen;
2491 fw.text_index = 0;
2492
2493 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2494 &text, &text_len);
2495 if (rc)
2496 goto init_cpu_err;
2497
2498 fw.text = text;
2499
2500 fw.data_addr = bnx2_COM_b06FwDataAddr;
2501 fw.data_len = bnx2_COM_b06FwDataLen;
2502 fw.data_index = 0;
2503 fw.data = bnx2_COM_b06FwData;
2504
2505 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2506 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2507 fw.sbss_index = 0;
2508 fw.sbss = bnx2_COM_b06FwSbss;
2509
2510 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2511 fw.bss_len = bnx2_COM_b06FwBssLen;
2512 fw.bss_index = 0;
2513 fw.bss = bnx2_COM_b06FwBss;
2514
2515 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2516 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2517 fw.rodata_index = 0;
2518 fw.rodata = bnx2_COM_b06FwRodata;
2519
2520 load_cpu_fw(bp, &cpu_reg, &fw);
2521
2522 init_cpu_err:
2523 bnx2_gunzip_end(bp);
2524 return rc;
2525 }
2526
2527 static int
2528 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2529 {
2530 u16 pmcsr;
2531
2532 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2533
2534 switch (state) {
2535 case PCI_D0: {
2536 u32 val;
2537
2538 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2539 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2540 PCI_PM_CTRL_PME_STATUS);
2541
2542 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2543 /* delay required during transition out of D3hot */
2544 msleep(20);
2545
2546 val = REG_RD(bp, BNX2_EMAC_MODE);
2547 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2548 val &= ~BNX2_EMAC_MODE_MPKT;
2549 REG_WR(bp, BNX2_EMAC_MODE, val);
2550
2551 val = REG_RD(bp, BNX2_RPM_CONFIG);
2552 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2553 REG_WR(bp, BNX2_RPM_CONFIG, val);
2554 break;
2555 }
2556 case PCI_D3hot: {
2557 int i;
2558 u32 val, wol_msg;
2559
2560 if (bp->wol) {
2561 u32 advertising;
2562 u8 autoneg;
2563
2564 autoneg = bp->autoneg;
2565 advertising = bp->advertising;
2566
2567 bp->autoneg = AUTONEG_SPEED;
2568 bp->advertising = ADVERTISED_10baseT_Half |
2569 ADVERTISED_10baseT_Full |
2570 ADVERTISED_100baseT_Half |
2571 ADVERTISED_100baseT_Full |
2572 ADVERTISED_Autoneg;
2573
2574 bnx2_setup_copper_phy(bp);
2575
2576 bp->autoneg = autoneg;
2577 bp->advertising = advertising;
2578
2579 bnx2_set_mac_addr(bp);
2580
2581 val = REG_RD(bp, BNX2_EMAC_MODE);
2582
2583 /* Enable port mode. */
2584 val &= ~BNX2_EMAC_MODE_PORT;
2585 val |= BNX2_EMAC_MODE_PORT_MII |
2586 BNX2_EMAC_MODE_MPKT_RCVD |
2587 BNX2_EMAC_MODE_ACPI_RCVD |
2588 BNX2_EMAC_MODE_MPKT;
2589
2590 REG_WR(bp, BNX2_EMAC_MODE, val);
2591
2592 /* receive all multicast */
2593 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2594 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2595 0xffffffff);
2596 }
2597 REG_WR(bp, BNX2_EMAC_RX_MODE,
2598 BNX2_EMAC_RX_MODE_SORT_MODE);
2599
2600 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2601 BNX2_RPM_SORT_USER0_MC_EN;
2602 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2603 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2604 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2605 BNX2_RPM_SORT_USER0_ENA);
2606
2607 /* Need to enable EMAC and RPM for WOL. */
2608 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2609 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2610 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2611 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2612
2613 val = REG_RD(bp, BNX2_RPM_CONFIG);
2614 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2615 REG_WR(bp, BNX2_RPM_CONFIG, val);
2616
2617 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2618 }
2619 else {
2620 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2621 }
2622
2623 if (!(bp->flags & NO_WOL_FLAG))
2624 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2625
2626 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2627 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2628 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2629
2630 if (bp->wol)
2631 pmcsr |= 3;
2632 }
2633 else {
2634 pmcsr |= 3;
2635 }
2636 if (bp->wol) {
2637 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2638 }
2639 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2640 pmcsr);
2641
2642 /* No more memory access after this point until
2643 * device is brought back to D0.
2644 */
2645 udelay(50);
2646 break;
2647 }
2648 default:
2649 return -EINVAL;
2650 }
2651 return 0;
2652 }
2653
2654 static int
2655 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2656 {
2657 u32 val;
2658 int j;
2659
2660 /* Request access to the flash interface. */
2661 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2662 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2663 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2664 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2665 break;
2666
2667 udelay(5);
2668 }
2669
2670 if (j >= NVRAM_TIMEOUT_COUNT)
2671 return -EBUSY;
2672
2673 return 0;
2674 }
2675
2676 static int
2677 bnx2_release_nvram_lock(struct bnx2 *bp)
2678 {
2679 int j;
2680 u32 val;
2681
2682 /* Relinquish nvram interface. */
2683 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2684
2685 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2686 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2687 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2688 break;
2689
2690 udelay(5);
2691 }
2692
2693 if (j >= NVRAM_TIMEOUT_COUNT)
2694 return -EBUSY;
2695
2696 return 0;
2697 }
2698
2699
2700 static int
2701 bnx2_enable_nvram_write(struct bnx2 *bp)
2702 {
2703 u32 val;
2704
2705 val = REG_RD(bp, BNX2_MISC_CFG);
2706 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2707
2708 if (!bp->flash_info->buffered) {
2709 int j;
2710
2711 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2712 REG_WR(bp, BNX2_NVM_COMMAND,
2713 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2714
2715 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2716 udelay(5);
2717
2718 val = REG_RD(bp, BNX2_NVM_COMMAND);
2719 if (val & BNX2_NVM_COMMAND_DONE)
2720 break;
2721 }
2722
2723 if (j >= NVRAM_TIMEOUT_COUNT)
2724 return -EBUSY;
2725 }
2726 return 0;
2727 }
2728
2729 static void
2730 bnx2_disable_nvram_write(struct bnx2 *bp)
2731 {
2732 u32 val;
2733
2734 val = REG_RD(bp, BNX2_MISC_CFG);
2735 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2736 }
2737
2738
2739 static void
2740 bnx2_enable_nvram_access(struct bnx2 *bp)
2741 {
2742 u32 val;
2743
2744 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2745 /* Enable both bits, even on read. */
2746 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2747 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2748 }
2749
2750 static void
2751 bnx2_disable_nvram_access(struct bnx2 *bp)
2752 {
2753 u32 val;
2754
2755 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2756 /* Disable both bits, even after read. */
2757 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2758 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2759 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2760 }
2761
2762 static int
2763 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2764 {
2765 u32 cmd;
2766 int j;
2767
2768 if (bp->flash_info->buffered)
2769 /* Buffered flash, no erase needed */
2770 return 0;
2771
2772 /* Build an erase command */
2773 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2774 BNX2_NVM_COMMAND_DOIT;
2775
2776 /* Need to clear DONE bit separately. */
2777 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2778
2779 /* Address of the NVRAM to read from. */
2780 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2781
2782 /* Issue an erase command. */
2783 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2784
2785 /* Wait for completion. */
2786 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2787 u32 val;
2788
2789 udelay(5);
2790
2791 val = REG_RD(bp, BNX2_NVM_COMMAND);
2792 if (val & BNX2_NVM_COMMAND_DONE)
2793 break;
2794 }
2795
2796 if (j >= NVRAM_TIMEOUT_COUNT)
2797 return -EBUSY;
2798
2799 return 0;
2800 }
2801
2802 static int
2803 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2804 {
2805 u32 cmd;
2806 int j;
2807
2808 /* Build the command word. */
2809 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2810
2811 /* Calculate an offset of a buffered flash. */
2812 if (bp->flash_info->buffered) {
2813 offset = ((offset / bp->flash_info->page_size) <<
2814 bp->flash_info->page_bits) +
2815 (offset % bp->flash_info->page_size);
2816 }
2817
2818 /* Need to clear DONE bit separately. */
2819 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2820
2821 /* Address of the NVRAM to read from. */
2822 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2823
2824 /* Issue a read command. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2826
2827 /* Wait for completion. */
2828 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2829 u32 val;
2830
2831 udelay(5);
2832
2833 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834 if (val & BNX2_NVM_COMMAND_DONE) {
2835 val = REG_RD(bp, BNX2_NVM_READ);
2836
2837 val = be32_to_cpu(val);
2838 memcpy(ret_val, &val, 4);
2839 break;
2840 }
2841 }
2842 if (j >= NVRAM_TIMEOUT_COUNT)
2843 return -EBUSY;
2844
2845 return 0;
2846 }
2847
2848
2849 static int
2850 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2851 {
2852 u32 cmd, val32;
2853 int j;
2854
2855 /* Build the command word. */
2856 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2857
2858 /* Calculate an offset of a buffered flash. */
2859 if (bp->flash_info->buffered) {
2860 offset = ((offset / bp->flash_info->page_size) <<
2861 bp->flash_info->page_bits) +
2862 (offset % bp->flash_info->page_size);
2863 }
2864
2865 /* Need to clear DONE bit separately. */
2866 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2867
2868 memcpy(&val32, val, 4);
2869 val32 = cpu_to_be32(val32);
2870
2871 /* Write the data. */
2872 REG_WR(bp, BNX2_NVM_WRITE, val32);
2873
2874 /* Address of the NVRAM to write to. */
2875 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2876
2877 /* Issue the write command. */
2878 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2879
2880 /* Wait for completion. */
2881 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2882 udelay(5);
2883
2884 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2885 break;
2886 }
2887 if (j >= NVRAM_TIMEOUT_COUNT)
2888 return -EBUSY;
2889
2890 return 0;
2891 }
2892
2893 static int
2894 bnx2_init_nvram(struct bnx2 *bp)
2895 {
2896 u32 val;
2897 int j, entry_count, rc;
2898 struct flash_spec *flash;
2899
2900 /* Determine the selected interface. */
2901 val = REG_RD(bp, BNX2_NVM_CFG1);
2902
2903 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2904
2905 rc = 0;
2906 if (val & 0x40000000) {
2907
2908 /* Flash interface has been reconfigured */
2909 for (j = 0, flash = &flash_table[0]; j < entry_count;
2910 j++, flash++) {
2911 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2912 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2913 bp->flash_info = flash;
2914 break;
2915 }
2916 }
2917 }
2918 else {
2919 u32 mask;
2920 /* Not yet been reconfigured */
2921
2922 if (val & (1 << 23))
2923 mask = FLASH_BACKUP_STRAP_MASK;
2924 else
2925 mask = FLASH_STRAP_MASK;
2926
2927 for (j = 0, flash = &flash_table[0]; j < entry_count;
2928 j++, flash++) {
2929
2930 if ((val & mask) == (flash->strapping & mask)) {
2931 bp->flash_info = flash;
2932
2933 /* Request access to the flash interface. */
2934 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2935 return rc;
2936
2937 /* Enable access to flash interface */
2938 bnx2_enable_nvram_access(bp);
2939
2940 /* Reconfigure the flash interface */
2941 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2942 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2943 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2944 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2945
2946 /* Disable access to flash interface */
2947 bnx2_disable_nvram_access(bp);
2948 bnx2_release_nvram_lock(bp);
2949
2950 break;
2951 }
2952 }
2953 } /* if (val & 0x40000000) */
2954
2955 if (j == entry_count) {
2956 bp->flash_info = NULL;
2957 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2958 return -ENODEV;
2959 }
2960
2961 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2962 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2963 if (val)
2964 bp->flash_size = val;
2965 else
2966 bp->flash_size = bp->flash_info->total_size;
2967
2968 return rc;
2969 }
2970
2971 static int
2972 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2973 int buf_size)
2974 {
2975 int rc = 0;
2976 u32 cmd_flags, offset32, len32, extra;
2977
2978 if (buf_size == 0)
2979 return 0;
2980
2981 /* Request access to the flash interface. */
2982 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2983 return rc;
2984
2985 /* Enable access to flash interface */
2986 bnx2_enable_nvram_access(bp);
2987
2988 len32 = buf_size;
2989 offset32 = offset;
2990 extra = 0;
2991
2992 cmd_flags = 0;
2993
2994 if (offset32 & 3) {
2995 u8 buf[4];
2996 u32 pre_len;
2997
2998 offset32 &= ~3;
2999 pre_len = 4 - (offset & 3);
3000
3001 if (pre_len >= len32) {
3002 pre_len = len32;
3003 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3004 BNX2_NVM_COMMAND_LAST;
3005 }
3006 else {
3007 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3008 }
3009
3010 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3011
3012 if (rc)
3013 return rc;
3014
3015 memcpy(ret_buf, buf + (offset & 3), pre_len);
3016
3017 offset32 += 4;
3018 ret_buf += pre_len;
3019 len32 -= pre_len;
3020 }
3021 if (len32 & 3) {
3022 extra = 4 - (len32 & 3);
3023 len32 = (len32 + 4) & ~3;
3024 }
3025
3026 if (len32 == 4) {
3027 u8 buf[4];
3028
3029 if (cmd_flags)
3030 cmd_flags = BNX2_NVM_COMMAND_LAST;
3031 else
3032 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3033 BNX2_NVM_COMMAND_LAST;
3034
3035 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3036
3037 memcpy(ret_buf, buf, 4 - extra);
3038 }
3039 else if (len32 > 0) {
3040 u8 buf[4];
3041
3042 /* Read the first word. */
3043 if (cmd_flags)
3044 cmd_flags = 0;
3045 else
3046 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3047
3048 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3049
3050 /* Advance to the next dword. */
3051 offset32 += 4;
3052 ret_buf += 4;
3053 len32 -= 4;
3054
3055 while (len32 > 4 && rc == 0) {
3056 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3057
3058 /* Advance to the next dword. */
3059 offset32 += 4;
3060 ret_buf += 4;
3061 len32 -= 4;
3062 }
3063
3064 if (rc)
3065 return rc;
3066
3067 cmd_flags = BNX2_NVM_COMMAND_LAST;
3068 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3069
3070 memcpy(ret_buf, buf, 4 - extra);
3071 }
3072
3073 /* Disable access to flash interface */
3074 bnx2_disable_nvram_access(bp);
3075
3076 bnx2_release_nvram_lock(bp);
3077
3078 return rc;
3079 }
3080
3081 static int
3082 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3083 int buf_size)
3084 {
3085 u32 written, offset32, len32;
3086 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3087 int rc = 0;
3088 int align_start, align_end;
3089
3090 buf = data_buf;
3091 offset32 = offset;
3092 len32 = buf_size;
3093 align_start = align_end = 0;
3094
3095 if ((align_start = (offset32 & 3))) {
3096 offset32 &= ~3;
3097 len32 += align_start;
3098 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3099 return rc;
3100 }
3101
3102 if (len32 & 3) {
3103 if ((len32 > 4) || !align_start) {
3104 align_end = 4 - (len32 & 3);
3105 len32 += align_end;
3106 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3107 end, 4))) {
3108 return rc;
3109 }
3110 }
3111 }
3112
3113 if (align_start || align_end) {
3114 buf = kmalloc(len32, GFP_KERNEL);
3115 if (buf == 0)
3116 return -ENOMEM;
3117 if (align_start) {
3118 memcpy(buf, start, 4);
3119 }
3120 if (align_end) {
3121 memcpy(buf + len32 - 4, end, 4);
3122 }
3123 memcpy(buf + align_start, data_buf, buf_size);
3124 }
3125
3126 if (bp->flash_info->buffered == 0) {
3127 flash_buffer = kmalloc(264, GFP_KERNEL);
3128 if (flash_buffer == NULL) {
3129 rc = -ENOMEM;
3130 goto nvram_write_end;
3131 }
3132 }
3133
3134 written = 0;
3135 while ((written < len32) && (rc == 0)) {
3136 u32 page_start, page_end, data_start, data_end;
3137 u32 addr, cmd_flags;
3138 int i;
3139
3140 /* Find the page_start addr */
3141 page_start = offset32 + written;
3142 page_start -= (page_start % bp->flash_info->page_size);
3143 /* Find the page_end addr */
3144 page_end = page_start + bp->flash_info->page_size;
3145 /* Find the data_start addr */
3146 data_start = (written == 0) ? offset32 : page_start;
3147 /* Find the data_end addr */
3148 data_end = (page_end > offset32 + len32) ?
3149 (offset32 + len32) : page_end;
3150
3151 /* Request access to the flash interface. */
3152 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3153 goto nvram_write_end;
3154
3155 /* Enable access to flash interface */
3156 bnx2_enable_nvram_access(bp);
3157
3158 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3159 if (bp->flash_info->buffered == 0) {
3160 int j;
3161
3162 /* Read the whole page into the buffer
3163 * (non-buffer flash only) */
3164 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3165 if (j == (bp->flash_info->page_size - 4)) {
3166 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3167 }
3168 rc = bnx2_nvram_read_dword(bp,
3169 page_start + j,
3170 &flash_buffer[j],
3171 cmd_flags);
3172
3173 if (rc)
3174 goto nvram_write_end;
3175
3176 cmd_flags = 0;
3177 }
3178 }
3179
3180 /* Enable writes to flash interface (unlock write-protect) */
3181 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3182 goto nvram_write_end;
3183
3184 /* Erase the page */
3185 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3186 goto nvram_write_end;
3187
3188 /* Re-enable the write again for the actual write */
3189 bnx2_enable_nvram_write(bp);
3190
3191 /* Loop to write back the buffer data from page_start to
3192 * data_start */
3193 i = 0;
3194 if (bp->flash_info->buffered == 0) {
3195 for (addr = page_start; addr < data_start;
3196 addr += 4, i += 4) {
3197
3198 rc = bnx2_nvram_write_dword(bp, addr,
3199 &flash_buffer[i], cmd_flags);
3200
3201 if (rc != 0)
3202 goto nvram_write_end;
3203
3204 cmd_flags = 0;
3205 }
3206 }
3207
3208 /* Loop to write the new data from data_start to data_end */
3209 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3210 if ((addr == page_end - 4) ||
3211 ((bp->flash_info->buffered) &&
3212 (addr == data_end - 4))) {
3213
3214 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3215 }
3216 rc = bnx2_nvram_write_dword(bp, addr, buf,
3217 cmd_flags);
3218
3219 if (rc != 0)
3220 goto nvram_write_end;
3221
3222 cmd_flags = 0;
3223 buf += 4;
3224 }
3225
3226 /* Loop to write back the buffer data from data_end
3227 * to page_end */
3228 if (bp->flash_info->buffered == 0) {
3229 for (addr = data_end; addr < page_end;
3230 addr += 4, i += 4) {
3231
3232 if (addr == page_end-4) {
3233 cmd_flags = BNX2_NVM_COMMAND_LAST;
3234 }
3235 rc = bnx2_nvram_write_dword(bp, addr,
3236 &flash_buffer[i], cmd_flags);
3237
3238 if (rc != 0)
3239 goto nvram_write_end;
3240
3241 cmd_flags = 0;
3242 }
3243 }
3244
3245 /* Disable writes to flash interface (lock write-protect) */
3246 bnx2_disable_nvram_write(bp);
3247
3248 /* Disable access to flash interface */
3249 bnx2_disable_nvram_access(bp);
3250 bnx2_release_nvram_lock(bp);
3251
3252 /* Increment written */
3253 written += data_end - data_start;
3254 }
3255
3256 nvram_write_end:
3257 if (bp->flash_info->buffered == 0)
3258 kfree(flash_buffer);
3259
3260 if (align_start || align_end)
3261 kfree(buf);
3262 return rc;
3263 }
3264
3265 static int
3266 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3267 {
3268 u32 val;
3269 int i, rc = 0;
3270
3271 /* Wait for the current PCI transaction to complete before
3272 * issuing a reset. */
3273 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3274 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3277 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3278 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3279 udelay(5);
3280
3281 /* Wait for the firmware to tell us it is ok to issue a reset. */
3282 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3283
3284 /* Deposit a driver reset signature so the firmware knows that
3285 * this is a soft reset. */
3286 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3287 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3288
3289 /* Do a dummy read to force the chip to complete all current transaction
3290 * before we issue a reset. */
3291 val = REG_RD(bp, BNX2_MISC_ID);
3292
3293 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3294 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3295 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3296
3297 /* Chip reset. */
3298 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3299
3300 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3301 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3302 msleep(15);
3303
3304 /* Reset takes approximate 30 usec */
3305 for (i = 0; i < 10; i++) {
3306 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3307 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3308 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3309 break;
3310 }
3311 udelay(10);
3312 }
3313
3314 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3315 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3316 printk(KERN_ERR PFX "Chip reset did not complete\n");
3317 return -EBUSY;
3318 }
3319
3320 /* Make sure byte swapping is properly configured. */
3321 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3322 if (val != 0x01020304) {
3323 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3324 return -ENODEV;
3325 }
3326
3327 /* Wait for the firmware to finish its initialization. */
3328 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3329 if (rc)
3330 return rc;
3331
3332 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3333 /* Adjust the voltage regular to two steps lower. The default
3334 * of this register is 0x0000000e. */
3335 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3336
3337 /* Remove bad rbuf memory from the free pool. */
3338 rc = bnx2_alloc_bad_rbuf(bp);
3339 }
3340
3341 return rc;
3342 }
3343
3344 static int
3345 bnx2_init_chip(struct bnx2 *bp)
3346 {
3347 u32 val;
3348 int rc;
3349
3350 /* Make sure the interrupt is not active. */
3351 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3352
3353 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3354 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3355 #ifdef __BIG_ENDIAN
3356 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3357 #endif
3358 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3359 DMA_READ_CHANS << 12 |
3360 DMA_WRITE_CHANS << 16;
3361
3362 val |= (0x2 << 20) | (1 << 11);
3363
3364 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3365 val |= (1 << 23);
3366
3367 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3368 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3369 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3370
3371 REG_WR(bp, BNX2_DMA_CONFIG, val);
3372
3373 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3374 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3375 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3376 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3377 }
3378
3379 if (bp->flags & PCIX_FLAG) {
3380 u16 val16;
3381
3382 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3383 &val16);
3384 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3385 val16 & ~PCI_X_CMD_ERO);
3386 }
3387
3388 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3389 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3390 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3391 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3392
3393 /* Initialize context mapping and zero out the quick contexts. The
3394 * context block must have already been enabled. */
3395 bnx2_init_context(bp);
3396
3397 if ((rc = bnx2_init_cpus(bp)) != 0)
3398 return rc;
3399
3400 bnx2_init_nvram(bp);
3401
3402 bnx2_set_mac_addr(bp);
3403
3404 val = REG_RD(bp, BNX2_MQ_CONFIG);
3405 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3406 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3407 REG_WR(bp, BNX2_MQ_CONFIG, val);
3408
3409 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3410 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3411 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3412
3413 val = (BCM_PAGE_BITS - 8) << 24;
3414 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3415
3416 /* Configure page size. */
3417 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3418 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3419 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3420 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3421
3422 val = bp->mac_addr[0] +
3423 (bp->mac_addr[1] << 8) +
3424 (bp->mac_addr[2] << 16) +
3425 bp->mac_addr[3] +
3426 (bp->mac_addr[4] << 8) +
3427 (bp->mac_addr[5] << 16);
3428 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3429
3430 /* Program the MTU. Also include 4 bytes for CRC32. */
3431 val = bp->dev->mtu + ETH_HLEN + 4;
3432 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3433 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3434 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3435
3436 bp->last_status_idx = 0;
3437 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3438
3439 /* Set up how to generate a link change interrupt. */
3440 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3441
3442 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3443 (u64) bp->status_blk_mapping & 0xffffffff);
3444 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3445
3446 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3447 (u64) bp->stats_blk_mapping & 0xffffffff);
3448 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3449 (u64) bp->stats_blk_mapping >> 32);
3450
3451 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3452 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3453
3454 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3455 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3456
3457 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3458 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3459
3460 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3461
3462 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3463
3464 REG_WR(bp, BNX2_HC_COM_TICKS,
3465 (bp->com_ticks_int << 16) | bp->com_ticks);
3466
3467 REG_WR(bp, BNX2_HC_CMD_TICKS,
3468 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3469
3470 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3471 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3472
3473 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3474 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3475 else {
3476 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3477 BNX2_HC_CONFIG_TX_TMR_MODE |
3478 BNX2_HC_CONFIG_COLLECT_STATS);
3479 }
3480
3481 /* Clear internal stats counters. */
3482 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3483
3484 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3485
3486 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3487 BNX2_PORT_FEATURE_ASF_ENABLED)
3488 bp->flags |= ASF_ENABLE_FLAG;
3489
3490 /* Initialize the receive filter. */
3491 bnx2_set_rx_mode(bp->dev);
3492
3493 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3494 0);
3495
3496 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3497 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3498
3499 udelay(20);
3500
3501 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3502
3503 return rc;
3504 }
3505
3506
3507 static void
3508 bnx2_init_tx_ring(struct bnx2 *bp)
3509 {
3510 struct tx_bd *txbd;
3511 u32 val;
3512
3513 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3514
3515 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3516
3517 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3518 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3519
3520 bp->tx_prod = 0;
3521 bp->tx_cons = 0;
3522 bp->hw_tx_cons = 0;
3523 bp->tx_prod_bseq = 0;
3524
3525 val = BNX2_L2CTX_TYPE_TYPE_L2;
3526 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3527 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3528
3529 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3530 val |= 8 << 16;
3531 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3532
3533 val = (u64) bp->tx_desc_mapping >> 32;
3534 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3535
3536 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3537 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3538 }
3539
3540 static void
3541 bnx2_init_rx_ring(struct bnx2 *bp)
3542 {
3543 struct rx_bd *rxbd;
3544 int i;
3545 u16 prod, ring_prod;
3546 u32 val;
3547
3548 /* 8 for CRC and VLAN */
3549 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3550 /* 8 for alignment */
3551 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3552
3553 ring_prod = prod = bp->rx_prod = 0;
3554 bp->rx_cons = 0;
3555 bp->hw_rx_cons = 0;
3556 bp->rx_prod_bseq = 0;
3557
3558 for (i = 0; i < bp->rx_max_ring; i++) {
3559 int j;
3560
3561 rxbd = &bp->rx_desc_ring[i][0];
3562 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3563 rxbd->rx_bd_len = bp->rx_buf_use_size;
3564 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3565 }
3566 if (i == (bp->rx_max_ring - 1))
3567 j = 0;
3568 else
3569 j = i + 1;
3570 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3571 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3572 0xffffffff;
3573 }
3574
3575 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3576 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3577 val |= 0x02 << 8;
3578 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3579
3580 val = (u64) bp->rx_desc_mapping[0] >> 32;
3581 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3582
3583 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3584 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3585
3586 for (i = 0; i < bp->rx_ring_size; i++) {
3587 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3588 break;
3589 }
3590 prod = NEXT_RX_BD(prod);
3591 ring_prod = RX_RING_IDX(prod);
3592 }
3593 bp->rx_prod = prod;
3594
3595 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3596
3597 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3598 }
3599
3600 static void
3601 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3602 {
3603 u32 num_rings, max;
3604
3605 bp->rx_ring_size = size;
3606 num_rings = 1;
3607 while (size > MAX_RX_DESC_CNT) {
3608 size -= MAX_RX_DESC_CNT;
3609 num_rings++;
3610 }
3611 /* round to next power of 2 */
3612 max = MAX_RX_RINGS;
3613 while ((max & num_rings) == 0)
3614 max >>= 1;
3615
3616 if (num_rings != max)
3617 max <<= 1;
3618
3619 bp->rx_max_ring = max;
3620 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3621 }
3622
3623 static void
3624 bnx2_free_tx_skbs(struct bnx2 *bp)
3625 {
3626 int i;
3627
3628 if (bp->tx_buf_ring == NULL)
3629 return;
3630
3631 for (i = 0; i < TX_DESC_CNT; ) {
3632 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3633 struct sk_buff *skb = tx_buf->skb;
3634 int j, last;
3635
3636 if (skb == NULL) {
3637 i++;
3638 continue;
3639 }
3640
3641 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3642 skb_headlen(skb), PCI_DMA_TODEVICE);
3643
3644 tx_buf->skb = NULL;
3645
3646 last = skb_shinfo(skb)->nr_frags;
3647 for (j = 0; j < last; j++) {
3648 tx_buf = &bp->tx_buf_ring[i + j + 1];
3649 pci_unmap_page(bp->pdev,
3650 pci_unmap_addr(tx_buf, mapping),
3651 skb_shinfo(skb)->frags[j].size,
3652 PCI_DMA_TODEVICE);
3653 }
3654 dev_kfree_skb(skb);
3655 i += j + 1;
3656 }
3657
3658 }
3659
3660 static void
3661 bnx2_free_rx_skbs(struct bnx2 *bp)
3662 {
3663 int i;
3664
3665 if (bp->rx_buf_ring == NULL)
3666 return;
3667
3668 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3669 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3670 struct sk_buff *skb = rx_buf->skb;
3671
3672 if (skb == NULL)
3673 continue;
3674
3675 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3676 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3677
3678 rx_buf->skb = NULL;
3679
3680 dev_kfree_skb(skb);
3681 }
3682 }
3683
3684 static void
3685 bnx2_free_skbs(struct bnx2 *bp)
3686 {
3687 bnx2_free_tx_skbs(bp);
3688 bnx2_free_rx_skbs(bp);
3689 }
3690
3691 static int
3692 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3693 {
3694 int rc;
3695
3696 rc = bnx2_reset_chip(bp, reset_code);
3697 bnx2_free_skbs(bp);
3698 if (rc)
3699 return rc;
3700
3701 if ((rc = bnx2_init_chip(bp)) != 0)
3702 return rc;
3703
3704 bnx2_init_tx_ring(bp);
3705 bnx2_init_rx_ring(bp);
3706 return 0;
3707 }
3708
3709 static int
3710 bnx2_init_nic(struct bnx2 *bp)
3711 {
3712 int rc;
3713
3714 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3715 return rc;
3716
3717 bnx2_init_phy(bp);
3718 bnx2_set_link(bp);
3719 return 0;
3720 }
3721
3722 static int
3723 bnx2_test_registers(struct bnx2 *bp)
3724 {
3725 int ret;
3726 int i;
3727 static const struct {
3728 u16 offset;
3729 u16 flags;
3730 u32 rw_mask;
3731 u32 ro_mask;
3732 } reg_tbl[] = {
3733 { 0x006c, 0, 0x00000000, 0x0000003f },
3734 { 0x0090, 0, 0xffffffff, 0x00000000 },
3735 { 0x0094, 0, 0x00000000, 0x00000000 },
3736
3737 { 0x0404, 0, 0x00003f00, 0x00000000 },
3738 { 0x0418, 0, 0x00000000, 0xffffffff },
3739 { 0x041c, 0, 0x00000000, 0xffffffff },
3740 { 0x0420, 0, 0x00000000, 0x80ffffff },
3741 { 0x0424, 0, 0x00000000, 0x00000000 },
3742 { 0x0428, 0, 0x00000000, 0x00000001 },
3743 { 0x0450, 0, 0x00000000, 0x0000ffff },
3744 { 0x0454, 0, 0x00000000, 0xffffffff },
3745 { 0x0458, 0, 0x00000000, 0xffffffff },
3746
3747 { 0x0808, 0, 0x00000000, 0xffffffff },
3748 { 0x0854, 0, 0x00000000, 0xffffffff },
3749 { 0x0868, 0, 0x00000000, 0x77777777 },
3750 { 0x086c, 0, 0x00000000, 0x77777777 },
3751 { 0x0870, 0, 0x00000000, 0x77777777 },
3752 { 0x0874, 0, 0x00000000, 0x77777777 },
3753
3754 { 0x0c00, 0, 0x00000000, 0x00000001 },
3755 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3756 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3757
3758 { 0x1000, 0, 0x00000000, 0x00000001 },
3759 { 0x1004, 0, 0x00000000, 0x000f0001 },
3760
3761 { 0x1408, 0, 0x01c00800, 0x00000000 },
3762 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3763 { 0x14a8, 0, 0x00000000, 0x000001ff },
3764 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3765 { 0x14b0, 0, 0x00000002, 0x00000001 },
3766 { 0x14b8, 0, 0x00000000, 0x00000000 },
3767 { 0x14c0, 0, 0x00000000, 0x00000009 },
3768 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3769 { 0x14cc, 0, 0x00000000, 0x00000001 },
3770 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3771
3772 { 0x1800, 0, 0x00000000, 0x00000001 },
3773 { 0x1804, 0, 0x00000000, 0x00000003 },
3774
3775 { 0x2800, 0, 0x00000000, 0x00000001 },
3776 { 0x2804, 0, 0x00000000, 0x00003f01 },
3777 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3778 { 0x2810, 0, 0xffff0000, 0x00000000 },
3779 { 0x2814, 0, 0xffff0000, 0x00000000 },
3780 { 0x2818, 0, 0xffff0000, 0x00000000 },
3781 { 0x281c, 0, 0xffff0000, 0x00000000 },
3782 { 0x2834, 0, 0xffffffff, 0x00000000 },
3783 { 0x2840, 0, 0x00000000, 0xffffffff },
3784 { 0x2844, 0, 0x00000000, 0xffffffff },
3785 { 0x2848, 0, 0xffffffff, 0x00000000 },
3786 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3787
3788 { 0x2c00, 0, 0x00000000, 0x00000011 },
3789 { 0x2c04, 0, 0x00000000, 0x00030007 },
3790
3791 { 0x3c00, 0, 0x00000000, 0x00000001 },
3792 { 0x3c04, 0, 0x00000000, 0x00070000 },
3793 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3794 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3795 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3796 { 0x3c14, 0, 0x00000000, 0xffffffff },
3797 { 0x3c18, 0, 0x00000000, 0xffffffff },
3798 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3799 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3800
3801 { 0x5004, 0, 0x00000000, 0x0000007f },
3802 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3803 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3804
3805 { 0x5c00, 0, 0x00000000, 0x00000001 },
3806 { 0x5c04, 0, 0x00000000, 0x0003000f },
3807 { 0x5c08, 0, 0x00000003, 0x00000000 },
3808 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3809 { 0x5c10, 0, 0x00000000, 0xffffffff },
3810 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3811 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3812 { 0x5c88, 0, 0x00000000, 0x00077373 },
3813 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3814
3815 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3816 { 0x680c, 0, 0xffffffff, 0x00000000 },
3817 { 0x6810, 0, 0xffffffff, 0x00000000 },
3818 { 0x6814, 0, 0xffffffff, 0x00000000 },
3819 { 0x6818, 0, 0xffffffff, 0x00000000 },
3820 { 0x681c, 0, 0xffffffff, 0x00000000 },
3821 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3822 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3823 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3824 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3825 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3826 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3827 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3828 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3829 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3830 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3831 { 0x684c, 0, 0xffffffff, 0x00000000 },
3832 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3833 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3834 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3835 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3836 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3837 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3838
3839 { 0xffff, 0, 0x00000000, 0x00000000 },
3840 };
3841
3842 ret = 0;
3843 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3844 u32 offset, rw_mask, ro_mask, save_val, val;
3845
3846 offset = (u32) reg_tbl[i].offset;
3847 rw_mask = reg_tbl[i].rw_mask;
3848 ro_mask = reg_tbl[i].ro_mask;
3849
3850 save_val = readl(bp->regview + offset);
3851
3852 writel(0, bp->regview + offset);
3853
3854 val = readl(bp->regview + offset);
3855 if ((val & rw_mask) != 0) {
3856 goto reg_test_err;
3857 }
3858
3859 if ((val & ro_mask) != (save_val & ro_mask)) {
3860 goto reg_test_err;
3861 }
3862
3863 writel(0xffffffff, bp->regview + offset);
3864
3865 val = readl(bp->regview + offset);
3866 if ((val & rw_mask) != rw_mask) {
3867 goto reg_test_err;
3868 }
3869
3870 if ((val & ro_mask) != (save_val & ro_mask)) {
3871 goto reg_test_err;
3872 }
3873
3874 writel(save_val, bp->regview + offset);
3875 continue;
3876
3877 reg_test_err:
3878 writel(save_val, bp->regview + offset);
3879 ret = -ENODEV;
3880 break;
3881 }
3882 return ret;
3883 }
3884
3885 static int
3886 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3887 {
3888 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3889 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3890 int i;
3891
3892 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3893 u32 offset;
3894
3895 for (offset = 0; offset < size; offset += 4) {
3896
3897 REG_WR_IND(bp, start + offset, test_pattern[i]);
3898
3899 if (REG_RD_IND(bp, start + offset) !=
3900 test_pattern[i]) {
3901 return -ENODEV;
3902 }
3903 }
3904 }
3905 return 0;
3906 }
3907
3908 static int
3909 bnx2_test_memory(struct bnx2 *bp)
3910 {
3911 int ret = 0;
3912 int i;
3913 static const struct {
3914 u32 offset;
3915 u32 len;
3916 } mem_tbl[] = {
3917 { 0x60000, 0x4000 },
3918 { 0xa0000, 0x3000 },
3919 { 0xe0000, 0x4000 },
3920 { 0x120000, 0x4000 },
3921 { 0x1a0000, 0x4000 },
3922 { 0x160000, 0x4000 },
3923 { 0xffffffff, 0 },
3924 };
3925
3926 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3927 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3928 mem_tbl[i].len)) != 0) {
3929 return ret;
3930 }
3931 }
3932
3933 return ret;
3934 }
3935
3936 #define BNX2_MAC_LOOPBACK 0
3937 #define BNX2_PHY_LOOPBACK 1
3938
3939 static int
3940 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3941 {
3942 unsigned int pkt_size, num_pkts, i;
3943 struct sk_buff *skb, *rx_skb;
3944 unsigned char *packet;
3945 u16 rx_start_idx, rx_idx;
3946 dma_addr_t map;
3947 struct tx_bd *txbd;
3948 struct sw_bd *rx_buf;
3949 struct l2_fhdr *rx_hdr;
3950 int ret = -ENODEV;
3951
3952 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3953 bp->loopback = MAC_LOOPBACK;
3954 bnx2_set_mac_loopback(bp);
3955 }
3956 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3957 bp->loopback = 0;
3958 bnx2_set_phy_loopback(bp);
3959 }
3960 else
3961 return -EINVAL;
3962
3963 pkt_size = 1514;
3964 skb = dev_alloc_skb(pkt_size);
3965 if (!skb)
3966 return -ENOMEM;
3967 packet = skb_put(skb, pkt_size);
3968 memcpy(packet, bp->mac_addr, 6);
3969 memset(packet + 6, 0x0, 8);
3970 for (i = 14; i < pkt_size; i++)
3971 packet[i] = (unsigned char) (i & 0xff);
3972
3973 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3974 PCI_DMA_TODEVICE);
3975
3976 REG_WR(bp, BNX2_HC_COMMAND,
3977 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3978
3979 REG_RD(bp, BNX2_HC_COMMAND);
3980
3981 udelay(5);
3982 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3983
3984 num_pkts = 0;
3985
3986 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3987
3988 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3989 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3990 txbd->tx_bd_mss_nbytes = pkt_size;
3991 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3992
3993 num_pkts++;
3994 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3995 bp->tx_prod_bseq += pkt_size;
3996
3997 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3998 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3999
4000 udelay(100);
4001
4002 REG_WR(bp, BNX2_HC_COMMAND,
4003 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4004
4005 REG_RD(bp, BNX2_HC_COMMAND);
4006
4007 udelay(5);
4008
4009 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4010 dev_kfree_skb(skb);
4011
4012 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4013 goto loopback_test_done;
4014 }
4015
4016 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4017 if (rx_idx != rx_start_idx + num_pkts) {
4018 goto loopback_test_done;
4019 }
4020
4021 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4022 rx_skb = rx_buf->skb;
4023
4024 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4025 skb_reserve(rx_skb, bp->rx_offset);
4026
4027 pci_dma_sync_single_for_cpu(bp->pdev,
4028 pci_unmap_addr(rx_buf, mapping),
4029 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4030
4031 if (rx_hdr->l2_fhdr_status &
4032 (L2_FHDR_ERRORS_BAD_CRC |
4033 L2_FHDR_ERRORS_PHY_DECODE |
4034 L2_FHDR_ERRORS_ALIGNMENT |
4035 L2_FHDR_ERRORS_TOO_SHORT |
4036 L2_FHDR_ERRORS_GIANT_FRAME)) {
4037
4038 goto loopback_test_done;
4039 }
4040
4041 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4042 goto loopback_test_done;
4043 }
4044
4045 for (i = 14; i < pkt_size; i++) {
4046 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4047 goto loopback_test_done;
4048 }
4049 }
4050
4051 ret = 0;
4052
4053 loopback_test_done:
4054 bp->loopback = 0;
4055 return ret;
4056 }
4057
4058 #define BNX2_MAC_LOOPBACK_FAILED 1
4059 #define BNX2_PHY_LOOPBACK_FAILED 2
4060 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4061 BNX2_PHY_LOOPBACK_FAILED)
4062
4063 static int
4064 bnx2_test_loopback(struct bnx2 *bp)
4065 {
4066 int rc = 0;
4067
4068 if (!netif_running(bp->dev))
4069 return BNX2_LOOPBACK_FAILED;
4070
4071 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4072 spin_lock_bh(&bp->phy_lock);
4073 bnx2_init_phy(bp);
4074 spin_unlock_bh(&bp->phy_lock);
4075 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4076 rc |= BNX2_MAC_LOOPBACK_FAILED;
4077 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4078 rc |= BNX2_PHY_LOOPBACK_FAILED;
4079 return rc;
4080 }
4081
4082 #define NVRAM_SIZE 0x200
4083 #define CRC32_RESIDUAL 0xdebb20e3
4084
4085 static int
4086 bnx2_test_nvram(struct bnx2 *bp)
4087 {
4088 u32 buf[NVRAM_SIZE / 4];
4089 u8 *data = (u8 *) buf;
4090 int rc = 0;
4091 u32 magic, csum;
4092
4093 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4094 goto test_nvram_done;
4095
4096 magic = be32_to_cpu(buf[0]);
4097 if (magic != 0x669955aa) {
4098 rc = -ENODEV;
4099 goto test_nvram_done;
4100 }
4101
4102 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4103 goto test_nvram_done;
4104
4105 csum = ether_crc_le(0x100, data);
4106 if (csum != CRC32_RESIDUAL) {
4107 rc = -ENODEV;
4108 goto test_nvram_done;
4109 }
4110
4111 csum = ether_crc_le(0x100, data + 0x100);
4112 if (csum != CRC32_RESIDUAL) {
4113 rc = -ENODEV;
4114 }
4115
4116 test_nvram_done:
4117 return rc;
4118 }
4119
4120 static int
4121 bnx2_test_link(struct bnx2 *bp)
4122 {
4123 u32 bmsr;
4124
4125 spin_lock_bh(&bp->phy_lock);
4126 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4127 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4128 spin_unlock_bh(&bp->phy_lock);
4129
4130 if (bmsr & BMSR_LSTATUS) {
4131 return 0;
4132 }
4133 return -ENODEV;
4134 }
4135
4136 static int
4137 bnx2_test_intr(struct bnx2 *bp)
4138 {
4139 int i;
4140 u16 status_idx;
4141
4142 if (!netif_running(bp->dev))
4143 return -ENODEV;
4144
4145 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4146
4147 /* This register is not touched during run-time. */
4148 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4149 REG_RD(bp, BNX2_HC_COMMAND);
4150
4151 for (i = 0; i < 10; i++) {
4152 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4153 status_idx) {
4154
4155 break;
4156 }
4157
4158 msleep_interruptible(10);
4159 }
4160 if (i < 10)
4161 return 0;
4162
4163 return -ENODEV;
4164 }
4165
4166 static void
4167 bnx2_timer(unsigned long data)
4168 {
4169 struct bnx2 *bp = (struct bnx2 *) data;
4170 u32 msg;
4171
4172 if (!netif_running(bp->dev))
4173 return;
4174
4175 if (atomic_read(&bp->intr_sem) != 0)
4176 goto bnx2_restart_timer;
4177
4178 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4179 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4180
4181 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4182
4183 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4184 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4185
4186 spin_lock(&bp->phy_lock);
4187 if (bp->serdes_an_pending) {
4188 bp->serdes_an_pending--;
4189 }
4190 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4191 u32 bmcr;
4192
4193 bp->current_interval = bp->timer_interval;
4194
4195 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4196
4197 if (bmcr & BMCR_ANENABLE) {
4198 u32 phy1, phy2;
4199
4200 bnx2_write_phy(bp, 0x1c, 0x7c00);
4201 bnx2_read_phy(bp, 0x1c, &phy1);
4202
4203 bnx2_write_phy(bp, 0x17, 0x0f01);
4204 bnx2_read_phy(bp, 0x15, &phy2);
4205 bnx2_write_phy(bp, 0x17, 0x0f01);
4206 bnx2_read_phy(bp, 0x15, &phy2);
4207
4208 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4209 !(phy2 & 0x20)) { /* no CONFIG */
4210
4211 bmcr &= ~BMCR_ANENABLE;
4212 bmcr |= BMCR_SPEED1000 |
4213 BMCR_FULLDPLX;
4214 bnx2_write_phy(bp, MII_BMCR, bmcr);
4215 bp->phy_flags |=
4216 PHY_PARALLEL_DETECT_FLAG;
4217 }
4218 }
4219 }
4220 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4221 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4222 u32 phy2;
4223
4224 bnx2_write_phy(bp, 0x17, 0x0f01);
4225 bnx2_read_phy(bp, 0x15, &phy2);
4226 if (phy2 & 0x20) {
4227 u32 bmcr;
4228
4229 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4230 bmcr |= BMCR_ANENABLE;
4231 bnx2_write_phy(bp, MII_BMCR, bmcr);
4232
4233 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4234
4235 }
4236 }
4237 else
4238 bp->current_interval = bp->timer_interval;
4239
4240 spin_unlock(&bp->phy_lock);
4241 }
4242
4243 bnx2_restart_timer:
4244 mod_timer(&bp->timer, jiffies + bp->current_interval);
4245 }
4246
4247 /* Called with rtnl_lock */
4248 static int
4249 bnx2_open(struct net_device *dev)
4250 {
4251 struct bnx2 *bp = netdev_priv(dev);
4252 int rc;
4253
4254 bnx2_set_power_state(bp, PCI_D0);
4255 bnx2_disable_int(bp);
4256
4257 rc = bnx2_alloc_mem(bp);
4258 if (rc)
4259 return rc;
4260
4261 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4262 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4263 !disable_msi) {
4264
4265 if (pci_enable_msi(bp->pdev) == 0) {
4266 bp->flags |= USING_MSI_FLAG;
4267 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4268 dev);
4269 }
4270 else {
4271 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4272 IRQF_SHARED, dev->name, dev);
4273 }
4274 }
4275 else {
4276 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4277 dev->name, dev);
4278 }
4279 if (rc) {
4280 bnx2_free_mem(bp);
4281 return rc;
4282 }
4283
4284 rc = bnx2_init_nic(bp);
4285
4286 if (rc) {
4287 free_irq(bp->pdev->irq, dev);
4288 if (bp->flags & USING_MSI_FLAG) {
4289 pci_disable_msi(bp->pdev);
4290 bp->flags &= ~USING_MSI_FLAG;
4291 }
4292 bnx2_free_skbs(bp);
4293 bnx2_free_mem(bp);
4294 return rc;
4295 }
4296
4297 mod_timer(&bp->timer, jiffies + bp->current_interval);
4298
4299 atomic_set(&bp->intr_sem, 0);
4300
4301 bnx2_enable_int(bp);
4302
4303 if (bp->flags & USING_MSI_FLAG) {
4304 /* Test MSI to make sure it is working
4305 * If MSI test fails, go back to INTx mode
4306 */
4307 if (bnx2_test_intr(bp) != 0) {
4308 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4309 " using MSI, switching to INTx mode. Please"
4310 " report this failure to the PCI maintainer"
4311 " and include system chipset information.\n",
4312 bp->dev->name);
4313
4314 bnx2_disable_int(bp);
4315 free_irq(bp->pdev->irq, dev);
4316 pci_disable_msi(bp->pdev);
4317 bp->flags &= ~USING_MSI_FLAG;
4318
4319 rc = bnx2_init_nic(bp);
4320
4321 if (!rc) {
4322 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4323 IRQF_SHARED, dev->name, dev);
4324 }
4325 if (rc) {
4326 bnx2_free_skbs(bp);
4327 bnx2_free_mem(bp);
4328 del_timer_sync(&bp->timer);
4329 return rc;
4330 }
4331 bnx2_enable_int(bp);
4332 }
4333 }
4334 if (bp->flags & USING_MSI_FLAG) {
4335 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4336 }
4337
4338 netif_start_queue(dev);
4339
4340 return 0;
4341 }
4342
4343 static void
4344 bnx2_reset_task(void *data)
4345 {
4346 struct bnx2 *bp = data;
4347
4348 if (!netif_running(bp->dev))
4349 return;
4350
4351 bp->in_reset_task = 1;
4352 bnx2_netif_stop(bp);
4353
4354 bnx2_init_nic(bp);
4355
4356 atomic_set(&bp->intr_sem, 1);
4357 bnx2_netif_start(bp);
4358 bp->in_reset_task = 0;
4359 }
4360
4361 static void
4362 bnx2_tx_timeout(struct net_device *dev)
4363 {
4364 struct bnx2 *bp = netdev_priv(dev);
4365
4366 /* This allows the netif to be shutdown gracefully before resetting */
4367 schedule_work(&bp->reset_task);
4368 }
4369
4370 #ifdef BCM_VLAN
4371 /* Called with rtnl_lock */
4372 static void
4373 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4374 {
4375 struct bnx2 *bp = netdev_priv(dev);
4376
4377 bnx2_netif_stop(bp);
4378
4379 bp->vlgrp = vlgrp;
4380 bnx2_set_rx_mode(dev);
4381
4382 bnx2_netif_start(bp);
4383 }
4384
4385 /* Called with rtnl_lock */
4386 static void
4387 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4388 {
4389 struct bnx2 *bp = netdev_priv(dev);
4390
4391 bnx2_netif_stop(bp);
4392
4393 if (bp->vlgrp)
4394 bp->vlgrp->vlan_devices[vid] = NULL;
4395 bnx2_set_rx_mode(dev);
4396
4397 bnx2_netif_start(bp);
4398 }
4399 #endif
4400
4401 /* Called with netif_tx_lock.
4402 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4403 * netif_wake_queue().
4404 */
4405 static int
4406 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4407 {
4408 struct bnx2 *bp = netdev_priv(dev);
4409 dma_addr_t mapping;
4410 struct tx_bd *txbd;
4411 struct sw_bd *tx_buf;
4412 u32 len, vlan_tag_flags, last_frag, mss;
4413 u16 prod, ring_prod;
4414 int i;
4415
4416 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4417 netif_stop_queue(dev);
4418 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4419 dev->name);
4420
4421 return NETDEV_TX_BUSY;
4422 }
4423 len = skb_headlen(skb);
4424 prod = bp->tx_prod;
4425 ring_prod = TX_RING_IDX(prod);
4426
4427 vlan_tag_flags = 0;
4428 if (skb->ip_summed == CHECKSUM_HW) {
4429 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4430 }
4431
4432 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4433 vlan_tag_flags |=
4434 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4435 }
4436 #ifdef BCM_TSO
4437 if ((mss = skb_shinfo(skb)->gso_size) &&
4438 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4439 u32 tcp_opt_len, ip_tcp_len;
4440
4441 if (skb_header_cloned(skb) &&
4442 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4443 dev_kfree_skb(skb);
4444 return NETDEV_TX_OK;
4445 }
4446
4447 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4448 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4449
4450 tcp_opt_len = 0;
4451 if (skb->h.th->doff > 5) {
4452 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4453 }
4454 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4455
4456 skb->nh.iph->check = 0;
4457 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4458 skb->h.th->check =
4459 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4460 skb->nh.iph->daddr,
4461 0, IPPROTO_TCP, 0);
4462
4463 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4464 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4465 (tcp_opt_len >> 2)) << 8;
4466 }
4467 }
4468 else
4469 #endif
4470 {
4471 mss = 0;
4472 }
4473
4474 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4475
4476 tx_buf = &bp->tx_buf_ring[ring_prod];
4477 tx_buf->skb = skb;
4478 pci_unmap_addr_set(tx_buf, mapping, mapping);
4479
4480 txbd = &bp->tx_desc_ring[ring_prod];
4481
4482 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4483 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4484 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4485 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4486
4487 last_frag = skb_shinfo(skb)->nr_frags;
4488
4489 for (i = 0; i < last_frag; i++) {
4490 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4491
4492 prod = NEXT_TX_BD(prod);
4493 ring_prod = TX_RING_IDX(prod);
4494 txbd = &bp->tx_desc_ring[ring_prod];
4495
4496 len = frag->size;
4497 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4498 len, PCI_DMA_TODEVICE);
4499 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4500 mapping, mapping);
4501
4502 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4503 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4504 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4505 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4506
4507 }
4508 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4509
4510 prod = NEXT_TX_BD(prod);
4511 bp->tx_prod_bseq += skb->len;
4512
4513 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4514 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4515
4516 mmiowb();
4517
4518 bp->tx_prod = prod;
4519 dev->trans_start = jiffies;
4520
4521 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4522 netif_stop_queue(dev);
4523 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4524 netif_wake_queue(dev);
4525 }
4526
4527 return NETDEV_TX_OK;
4528 }
4529
4530 /* Called with rtnl_lock */
4531 static int
4532 bnx2_close(struct net_device *dev)
4533 {
4534 struct bnx2 *bp = netdev_priv(dev);
4535 u32 reset_code;
4536
4537 /* Calling flush_scheduled_work() may deadlock because
4538 * linkwatch_event() may be on the workqueue and it will try to get
4539 * the rtnl_lock which we are holding.
4540 */
4541 while (bp->in_reset_task)
4542 msleep(1);
4543
4544 bnx2_netif_stop(bp);
4545 del_timer_sync(&bp->timer);
4546 if (bp->flags & NO_WOL_FLAG)
4547 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4548 else if (bp->wol)
4549 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4550 else
4551 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4552 bnx2_reset_chip(bp, reset_code);
4553 free_irq(bp->pdev->irq, dev);
4554 if (bp->flags & USING_MSI_FLAG) {
4555 pci_disable_msi(bp->pdev);
4556 bp->flags &= ~USING_MSI_FLAG;
4557 }
4558 bnx2_free_skbs(bp);
4559 bnx2_free_mem(bp);
4560 bp->link_up = 0;
4561 netif_carrier_off(bp->dev);
4562 bnx2_set_power_state(bp, PCI_D3hot);
4563 return 0;
4564 }
4565
4566 #define GET_NET_STATS64(ctr) \
4567 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4568 (unsigned long) (ctr##_lo)
4569
4570 #define GET_NET_STATS32(ctr) \
4571 (ctr##_lo)
4572
4573 #if (BITS_PER_LONG == 64)
4574 #define GET_NET_STATS GET_NET_STATS64
4575 #else
4576 #define GET_NET_STATS GET_NET_STATS32
4577 #endif
4578
4579 static struct net_device_stats *
4580 bnx2_get_stats(struct net_device *dev)
4581 {
4582 struct bnx2 *bp = netdev_priv(dev);
4583 struct statistics_block *stats_blk = bp->stats_blk;
4584 struct net_device_stats *net_stats = &bp->net_stats;
4585
4586 if (bp->stats_blk == NULL) {
4587 return net_stats;
4588 }
4589 net_stats->rx_packets =
4590 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4591 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4592 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4593
4594 net_stats->tx_packets =
4595 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4596 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4597 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4598
4599 net_stats->rx_bytes =
4600 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4601
4602 net_stats->tx_bytes =
4603 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4604
4605 net_stats->multicast =
4606 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4607
4608 net_stats->collisions =
4609 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4610
4611 net_stats->rx_length_errors =
4612 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4613 stats_blk->stat_EtherStatsOverrsizePkts);
4614
4615 net_stats->rx_over_errors =
4616 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4617
4618 net_stats->rx_frame_errors =
4619 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4620
4621 net_stats->rx_crc_errors =
4622 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4623
4624 net_stats->rx_errors = net_stats->rx_length_errors +
4625 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4626 net_stats->rx_crc_errors;
4627
4628 net_stats->tx_aborted_errors =
4629 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4630 stats_blk->stat_Dot3StatsLateCollisions);
4631
4632 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4633 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4634 net_stats->tx_carrier_errors = 0;
4635 else {
4636 net_stats->tx_carrier_errors =
4637 (unsigned long)
4638 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4639 }
4640
4641 net_stats->tx_errors =
4642 (unsigned long)
4643 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4644 +
4645 net_stats->tx_aborted_errors +
4646 net_stats->tx_carrier_errors;
4647
4648 net_stats->rx_missed_errors =
4649 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4650 stats_blk->stat_FwRxDrop);
4651
4652 return net_stats;
4653 }
4654
4655 /* All ethtool functions called with rtnl_lock */
4656
4657 static int
4658 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4659 {
4660 struct bnx2 *bp = netdev_priv(dev);
4661
4662 cmd->supported = SUPPORTED_Autoneg;
4663 if (bp->phy_flags & PHY_SERDES_FLAG) {
4664 cmd->supported |= SUPPORTED_1000baseT_Full |
4665 SUPPORTED_FIBRE;
4666
4667 cmd->port = PORT_FIBRE;
4668 }
4669 else {
4670 cmd->supported |= SUPPORTED_10baseT_Half |
4671 SUPPORTED_10baseT_Full |
4672 SUPPORTED_100baseT_Half |
4673 SUPPORTED_100baseT_Full |
4674 SUPPORTED_1000baseT_Full |
4675 SUPPORTED_TP;
4676
4677 cmd->port = PORT_TP;
4678 }
4679
4680 cmd->advertising = bp->advertising;
4681
4682 if (bp->autoneg & AUTONEG_SPEED) {
4683 cmd->autoneg = AUTONEG_ENABLE;
4684 }
4685 else {
4686 cmd->autoneg = AUTONEG_DISABLE;
4687 }
4688
4689 if (netif_carrier_ok(dev)) {
4690 cmd->speed = bp->line_speed;
4691 cmd->duplex = bp->duplex;
4692 }
4693 else {
4694 cmd->speed = -1;
4695 cmd->duplex = -1;
4696 }
4697
4698 cmd->transceiver = XCVR_INTERNAL;
4699 cmd->phy_address = bp->phy_addr;
4700
4701 return 0;
4702 }
4703
4704 static int
4705 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4706 {
4707 struct bnx2 *bp = netdev_priv(dev);
4708 u8 autoneg = bp->autoneg;
4709 u8 req_duplex = bp->req_duplex;
4710 u16 req_line_speed = bp->req_line_speed;
4711 u32 advertising = bp->advertising;
4712
4713 if (cmd->autoneg == AUTONEG_ENABLE) {
4714 autoneg |= AUTONEG_SPEED;
4715
4716 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4717
4718 /* allow advertising 1 speed */
4719 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4720 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4721 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4722 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4723
4724 if (bp->phy_flags & PHY_SERDES_FLAG)
4725 return -EINVAL;
4726
4727 advertising = cmd->advertising;
4728
4729 }
4730 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4731 advertising = cmd->advertising;
4732 }
4733 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4734 return -EINVAL;
4735 }
4736 else {
4737 if (bp->phy_flags & PHY_SERDES_FLAG) {
4738 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4739 }
4740 else {
4741 advertising = ETHTOOL_ALL_COPPER_SPEED;
4742 }
4743 }
4744 advertising |= ADVERTISED_Autoneg;
4745 }
4746 else {
4747 if (bp->phy_flags & PHY_SERDES_FLAG) {
4748 if ((cmd->speed != SPEED_1000) ||
4749 (cmd->duplex != DUPLEX_FULL)) {
4750 return -EINVAL;
4751 }
4752 }
4753 else if (cmd->speed == SPEED_1000) {
4754 return -EINVAL;
4755 }
4756 autoneg &= ~AUTONEG_SPEED;
4757 req_line_speed = cmd->speed;
4758 req_duplex = cmd->duplex;
4759 advertising = 0;
4760 }
4761
4762 bp->autoneg = autoneg;
4763 bp->advertising = advertising;
4764 bp->req_line_speed = req_line_speed;
4765 bp->req_duplex = req_duplex;
4766
4767 spin_lock_bh(&bp->phy_lock);
4768
4769 bnx2_setup_phy(bp);
4770
4771 spin_unlock_bh(&bp->phy_lock);
4772
4773 return 0;
4774 }
4775
4776 static void
4777 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4778 {
4779 struct bnx2 *bp = netdev_priv(dev);
4780
4781 strcpy(info->driver, DRV_MODULE_NAME);
4782 strcpy(info->version, DRV_MODULE_VERSION);
4783 strcpy(info->bus_info, pci_name(bp->pdev));
4784 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4785 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4786 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4787 info->fw_version[1] = info->fw_version[3] = '.';
4788 info->fw_version[5] = 0;
4789 }
4790
4791 #define BNX2_REGDUMP_LEN (32 * 1024)
4792
4793 static int
4794 bnx2_get_regs_len(struct net_device *dev)
4795 {
4796 return BNX2_REGDUMP_LEN;
4797 }
4798
4799 static void
4800 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4801 {
4802 u32 *p = _p, i, offset;
4803 u8 *orig_p = _p;
4804 struct bnx2 *bp = netdev_priv(dev);
4805 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4806 0x0800, 0x0880, 0x0c00, 0x0c10,
4807 0x0c30, 0x0d08, 0x1000, 0x101c,
4808 0x1040, 0x1048, 0x1080, 0x10a4,
4809 0x1400, 0x1490, 0x1498, 0x14f0,
4810 0x1500, 0x155c, 0x1580, 0x15dc,
4811 0x1600, 0x1658, 0x1680, 0x16d8,
4812 0x1800, 0x1820, 0x1840, 0x1854,
4813 0x1880, 0x1894, 0x1900, 0x1984,
4814 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4815 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4816 0x2000, 0x2030, 0x23c0, 0x2400,
4817 0x2800, 0x2820, 0x2830, 0x2850,
4818 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4819 0x3c00, 0x3c94, 0x4000, 0x4010,
4820 0x4080, 0x4090, 0x43c0, 0x4458,
4821 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4822 0x4fc0, 0x5010, 0x53c0, 0x5444,
4823 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4824 0x5fc0, 0x6000, 0x6400, 0x6428,
4825 0x6800, 0x6848, 0x684c, 0x6860,
4826 0x6888, 0x6910, 0x8000 };
4827
4828 regs->version = 0;
4829
4830 memset(p, 0, BNX2_REGDUMP_LEN);
4831
4832 if (!netif_running(bp->dev))
4833 return;
4834
4835 i = 0;
4836 offset = reg_boundaries[0];
4837 p += offset;
4838 while (offset < BNX2_REGDUMP_LEN) {
4839 *p++ = REG_RD(bp, offset);
4840 offset += 4;
4841 if (offset == reg_boundaries[i + 1]) {
4842 offset = reg_boundaries[i + 2];
4843 p = (u32 *) (orig_p + offset);
4844 i += 2;
4845 }
4846 }
4847 }
4848
4849 static void
4850 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4851 {
4852 struct bnx2 *bp = netdev_priv(dev);
4853
4854 if (bp->flags & NO_WOL_FLAG) {
4855 wol->supported = 0;
4856 wol->wolopts = 0;
4857 }
4858 else {
4859 wol->supported = WAKE_MAGIC;
4860 if (bp->wol)
4861 wol->wolopts = WAKE_MAGIC;
4862 else
4863 wol->wolopts = 0;
4864 }
4865 memset(&wol->sopass, 0, sizeof(wol->sopass));
4866 }
4867
4868 static int
4869 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4870 {
4871 struct bnx2 *bp = netdev_priv(dev);
4872
4873 if (wol->wolopts & ~WAKE_MAGIC)
4874 return -EINVAL;
4875
4876 if (wol->wolopts & WAKE_MAGIC) {
4877 if (bp->flags & NO_WOL_FLAG)
4878 return -EINVAL;
4879
4880 bp->wol = 1;
4881 }
4882 else {
4883 bp->wol = 0;
4884 }
4885 return 0;
4886 }
4887
4888 static int
4889 bnx2_nway_reset(struct net_device *dev)
4890 {
4891 struct bnx2 *bp = netdev_priv(dev);
4892 u32 bmcr;
4893
4894 if (!(bp->autoneg & AUTONEG_SPEED)) {
4895 return -EINVAL;
4896 }
4897
4898 spin_lock_bh(&bp->phy_lock);
4899
4900 /* Force a link down visible on the other side */
4901 if (bp->phy_flags & PHY_SERDES_FLAG) {
4902 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4903 spin_unlock_bh(&bp->phy_lock);
4904
4905 msleep(20);
4906
4907 spin_lock_bh(&bp->phy_lock);
4908 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4909 bp->current_interval = SERDES_AN_TIMEOUT;
4910 bp->serdes_an_pending = 1;
4911 mod_timer(&bp->timer, jiffies + bp->current_interval);
4912 }
4913 }
4914
4915 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4916 bmcr &= ~BMCR_LOOPBACK;
4917 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4918
4919 spin_unlock_bh(&bp->phy_lock);
4920
4921 return 0;
4922 }
4923
4924 static int
4925 bnx2_get_eeprom_len(struct net_device *dev)
4926 {
4927 struct bnx2 *bp = netdev_priv(dev);
4928
4929 if (bp->flash_info == NULL)
4930 return 0;
4931
4932 return (int) bp->flash_size;
4933 }
4934
4935 static int
4936 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4937 u8 *eebuf)
4938 {
4939 struct bnx2 *bp = netdev_priv(dev);
4940 int rc;
4941
4942 /* parameters already validated in ethtool_get_eeprom */
4943
4944 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4945
4946 return rc;
4947 }
4948
4949 static int
4950 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4951 u8 *eebuf)
4952 {
4953 struct bnx2 *bp = netdev_priv(dev);
4954 int rc;
4955
4956 /* parameters already validated in ethtool_set_eeprom */
4957
4958 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4959
4960 return rc;
4961 }
4962
4963 static int
4964 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4965 {
4966 struct bnx2 *bp = netdev_priv(dev);
4967
4968 memset(coal, 0, sizeof(struct ethtool_coalesce));
4969
4970 coal->rx_coalesce_usecs = bp->rx_ticks;
4971 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4972 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4973 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4974
4975 coal->tx_coalesce_usecs = bp->tx_ticks;
4976 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4977 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4978 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4979
4980 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4981
4982 return 0;
4983 }
4984
4985 static int
4986 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4987 {
4988 struct bnx2 *bp = netdev_priv(dev);
4989
4990 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4991 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4992
4993 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4994 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4995
4996 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4997 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4998
4999 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5000 if (bp->rx_quick_cons_trip_int > 0xff)
5001 bp->rx_quick_cons_trip_int = 0xff;
5002
5003 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5004 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5005
5006 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5007 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5008
5009 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5010 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5011
5012 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5013 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5014 0xff;
5015
5016 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5017 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5018 bp->stats_ticks &= 0xffff00;
5019
5020 if (netif_running(bp->dev)) {
5021 bnx2_netif_stop(bp);
5022 bnx2_init_nic(bp);
5023 bnx2_netif_start(bp);
5024 }
5025
5026 return 0;
5027 }
5028
5029 static void
5030 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5031 {
5032 struct bnx2 *bp = netdev_priv(dev);
5033
5034 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5035 ering->rx_mini_max_pending = 0;
5036 ering->rx_jumbo_max_pending = 0;
5037
5038 ering->rx_pending = bp->rx_ring_size;
5039 ering->rx_mini_pending = 0;
5040 ering->rx_jumbo_pending = 0;
5041
5042 ering->tx_max_pending = MAX_TX_DESC_CNT;
5043 ering->tx_pending = bp->tx_ring_size;
5044 }
5045
5046 static int
5047 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5048 {
5049 struct bnx2 *bp = netdev_priv(dev);
5050
5051 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5052 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5053 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5054
5055 return -EINVAL;
5056 }
5057 if (netif_running(bp->dev)) {
5058 bnx2_netif_stop(bp);
5059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5060 bnx2_free_skbs(bp);
5061 bnx2_free_mem(bp);
5062 }
5063
5064 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5065 bp->tx_ring_size = ering->tx_pending;
5066
5067 if (netif_running(bp->dev)) {
5068 int rc;
5069
5070 rc = bnx2_alloc_mem(bp);
5071 if (rc)
5072 return rc;
5073 bnx2_init_nic(bp);
5074 bnx2_netif_start(bp);
5075 }
5076
5077 return 0;
5078 }
5079
5080 static void
5081 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5082 {
5083 struct bnx2 *bp = netdev_priv(dev);
5084
5085 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5086 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5087 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5088 }
5089
5090 static int
5091 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5092 {
5093 struct bnx2 *bp = netdev_priv(dev);
5094
5095 bp->req_flow_ctrl = 0;
5096 if (epause->rx_pause)
5097 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5098 if (epause->tx_pause)
5099 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5100
5101 if (epause->autoneg) {
5102 bp->autoneg |= AUTONEG_FLOW_CTRL;
5103 }
5104 else {
5105 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5106 }
5107
5108 spin_lock_bh(&bp->phy_lock);
5109
5110 bnx2_setup_phy(bp);
5111
5112 spin_unlock_bh(&bp->phy_lock);
5113
5114 return 0;
5115 }
5116
5117 static u32
5118 bnx2_get_rx_csum(struct net_device *dev)
5119 {
5120 struct bnx2 *bp = netdev_priv(dev);
5121
5122 return bp->rx_csum;
5123 }
5124
5125 static int
5126 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5127 {
5128 struct bnx2 *bp = netdev_priv(dev);
5129
5130 bp->rx_csum = data;
5131 return 0;
5132 }
5133
5134 static int
5135 bnx2_set_tso(struct net_device *dev, u32 data)
5136 {
5137 if (data)
5138 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5139 else
5140 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5141 return 0;
5142 }
5143
5144 #define BNX2_NUM_STATS 46
5145
5146 static struct {
5147 char string[ETH_GSTRING_LEN];
5148 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5149 { "rx_bytes" },
5150 { "rx_error_bytes" },
5151 { "tx_bytes" },
5152 { "tx_error_bytes" },
5153 { "rx_ucast_packets" },
5154 { "rx_mcast_packets" },
5155 { "rx_bcast_packets" },
5156 { "tx_ucast_packets" },
5157 { "tx_mcast_packets" },
5158 { "tx_bcast_packets" },
5159 { "tx_mac_errors" },
5160 { "tx_carrier_errors" },
5161 { "rx_crc_errors" },
5162 { "rx_align_errors" },
5163 { "tx_single_collisions" },
5164 { "tx_multi_collisions" },
5165 { "tx_deferred" },
5166 { "tx_excess_collisions" },
5167 { "tx_late_collisions" },
5168 { "tx_total_collisions" },
5169 { "rx_fragments" },
5170 { "rx_jabbers" },
5171 { "rx_undersize_packets" },
5172 { "rx_oversize_packets" },
5173 { "rx_64_byte_packets" },
5174 { "rx_65_to_127_byte_packets" },
5175 { "rx_128_to_255_byte_packets" },
5176 { "rx_256_to_511_byte_packets" },
5177 { "rx_512_to_1023_byte_packets" },
5178 { "rx_1024_to_1522_byte_packets" },
5179 { "rx_1523_to_9022_byte_packets" },
5180 { "tx_64_byte_packets" },
5181 { "tx_65_to_127_byte_packets" },
5182 { "tx_128_to_255_byte_packets" },
5183 { "tx_256_to_511_byte_packets" },
5184 { "tx_512_to_1023_byte_packets" },
5185 { "tx_1024_to_1522_byte_packets" },
5186 { "tx_1523_to_9022_byte_packets" },
5187 { "rx_xon_frames" },
5188 { "rx_xoff_frames" },
5189 { "tx_xon_frames" },
5190 { "tx_xoff_frames" },
5191 { "rx_mac_ctrl_frames" },
5192 { "rx_filtered_packets" },
5193 { "rx_discards" },
5194 { "rx_fw_discards" },
5195 };
5196
5197 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5198
5199 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5200 STATS_OFFSET32(stat_IfHCInOctets_hi),
5201 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5202 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5203 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5204 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5205 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5206 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5207 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5208 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5209 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5210 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5211 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5212 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5213 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5214 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5215 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5216 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5217 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5218 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5219 STATS_OFFSET32(stat_EtherStatsCollisions),
5220 STATS_OFFSET32(stat_EtherStatsFragments),
5221 STATS_OFFSET32(stat_EtherStatsJabbers),
5222 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5223 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5224 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5225 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5226 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5227 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5228 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5229 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5230 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5231 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5232 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5233 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5234 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5235 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5236 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5237 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5238 STATS_OFFSET32(stat_XonPauseFramesReceived),
5239 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5240 STATS_OFFSET32(stat_OutXonSent),
5241 STATS_OFFSET32(stat_OutXoffSent),
5242 STATS_OFFSET32(stat_MacControlFramesReceived),
5243 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5244 STATS_OFFSET32(stat_IfInMBUFDiscards),
5245 STATS_OFFSET32(stat_FwRxDrop),
5246 };
5247
5248 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5249 * skipped because of errata.
5250 */
5251 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5252 8,0,8,8,8,8,8,8,8,8,
5253 4,0,4,4,4,4,4,4,4,4,
5254 4,4,4,4,4,4,4,4,4,4,
5255 4,4,4,4,4,4,4,4,4,4,
5256 4,4,4,4,4,4,
5257 };
5258
5259 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5260 8,0,8,8,8,8,8,8,8,8,
5261 4,4,4,4,4,4,4,4,4,4,
5262 4,4,4,4,4,4,4,4,4,4,
5263 4,4,4,4,4,4,4,4,4,4,
5264 4,4,4,4,4,4,
5265 };
5266
5267 #define BNX2_NUM_TESTS 6
5268
5269 static struct {
5270 char string[ETH_GSTRING_LEN];
5271 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5272 { "register_test (offline)" },
5273 { "memory_test (offline)" },
5274 { "loopback_test (offline)" },
5275 { "nvram_test (online)" },
5276 { "interrupt_test (online)" },
5277 { "link_test (online)" },
5278 };
5279
5280 static int
5281 bnx2_self_test_count(struct net_device *dev)
5282 {
5283 return BNX2_NUM_TESTS;
5284 }
5285
5286 static void
5287 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5288 {
5289 struct bnx2 *bp = netdev_priv(dev);
5290
5291 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5292 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5293 bnx2_netif_stop(bp);
5294 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5295 bnx2_free_skbs(bp);
5296
5297 if (bnx2_test_registers(bp) != 0) {
5298 buf[0] = 1;
5299 etest->flags |= ETH_TEST_FL_FAILED;
5300 }
5301 if (bnx2_test_memory(bp) != 0) {
5302 buf[1] = 1;
5303 etest->flags |= ETH_TEST_FL_FAILED;
5304 }
5305 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5306 etest->flags |= ETH_TEST_FL_FAILED;
5307
5308 if (!netif_running(bp->dev)) {
5309 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5310 }
5311 else {
5312 bnx2_init_nic(bp);
5313 bnx2_netif_start(bp);
5314 }
5315
5316 /* wait for link up */
5317 msleep_interruptible(3000);
5318 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5319 msleep_interruptible(4000);
5320 }
5321
5322 if (bnx2_test_nvram(bp) != 0) {
5323 buf[3] = 1;
5324 etest->flags |= ETH_TEST_FL_FAILED;
5325 }
5326 if (bnx2_test_intr(bp) != 0) {
5327 buf[4] = 1;
5328 etest->flags |= ETH_TEST_FL_FAILED;
5329 }
5330
5331 if (bnx2_test_link(bp) != 0) {
5332 buf[5] = 1;
5333 etest->flags |= ETH_TEST_FL_FAILED;
5334
5335 }
5336 }
5337
5338 static void
5339 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5340 {
5341 switch (stringset) {
5342 case ETH_SS_STATS:
5343 memcpy(buf, bnx2_stats_str_arr,
5344 sizeof(bnx2_stats_str_arr));
5345 break;
5346 case ETH_SS_TEST:
5347 memcpy(buf, bnx2_tests_str_arr,
5348 sizeof(bnx2_tests_str_arr));
5349 break;
5350 }
5351 }
5352
5353 static int
5354 bnx2_get_stats_count(struct net_device *dev)
5355 {
5356 return BNX2_NUM_STATS;
5357 }
5358
5359 static void
5360 bnx2_get_ethtool_stats(struct net_device *dev,
5361 struct ethtool_stats *stats, u64 *buf)
5362 {
5363 struct bnx2 *bp = netdev_priv(dev);
5364 int i;
5365 u32 *hw_stats = (u32 *) bp->stats_blk;
5366 u8 *stats_len_arr = NULL;
5367
5368 if (hw_stats == NULL) {
5369 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5370 return;
5371 }
5372
5373 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5374 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5375 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5376 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5377 stats_len_arr = bnx2_5706_stats_len_arr;
5378 else
5379 stats_len_arr = bnx2_5708_stats_len_arr;
5380
5381 for (i = 0; i < BNX2_NUM_STATS; i++) {
5382 if (stats_len_arr[i] == 0) {
5383 /* skip this counter */
5384 buf[i] = 0;
5385 continue;
5386 }
5387 if (stats_len_arr[i] == 4) {
5388 /* 4-byte counter */
5389 buf[i] = (u64)
5390 *(hw_stats + bnx2_stats_offset_arr[i]);
5391 continue;
5392 }
5393 /* 8-byte counter */
5394 buf[i] = (((u64) *(hw_stats +
5395 bnx2_stats_offset_arr[i])) << 32) +
5396 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5397 }
5398 }
5399
5400 static int
5401 bnx2_phys_id(struct net_device *dev, u32 data)
5402 {
5403 struct bnx2 *bp = netdev_priv(dev);
5404 int i;
5405 u32 save;
5406
5407 if (data == 0)
5408 data = 2;
5409
5410 save = REG_RD(bp, BNX2_MISC_CFG);
5411 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5412
5413 for (i = 0; i < (data * 2); i++) {
5414 if ((i % 2) == 0) {
5415 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5416 }
5417 else {
5418 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5419 BNX2_EMAC_LED_1000MB_OVERRIDE |
5420 BNX2_EMAC_LED_100MB_OVERRIDE |
5421 BNX2_EMAC_LED_10MB_OVERRIDE |
5422 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5423 BNX2_EMAC_LED_TRAFFIC);
5424 }
5425 msleep_interruptible(500);
5426 if (signal_pending(current))
5427 break;
5428 }
5429 REG_WR(bp, BNX2_EMAC_LED, 0);
5430 REG_WR(bp, BNX2_MISC_CFG, save);
5431 return 0;
5432 }
5433
5434 static struct ethtool_ops bnx2_ethtool_ops = {
5435 .get_settings = bnx2_get_settings,
5436 .set_settings = bnx2_set_settings,
5437 .get_drvinfo = bnx2_get_drvinfo,
5438 .get_regs_len = bnx2_get_regs_len,
5439 .get_regs = bnx2_get_regs,
5440 .get_wol = bnx2_get_wol,
5441 .set_wol = bnx2_set_wol,
5442 .nway_reset = bnx2_nway_reset,
5443 .get_link = ethtool_op_get_link,
5444 .get_eeprom_len = bnx2_get_eeprom_len,
5445 .get_eeprom = bnx2_get_eeprom,
5446 .set_eeprom = bnx2_set_eeprom,
5447 .get_coalesce = bnx2_get_coalesce,
5448 .set_coalesce = bnx2_set_coalesce,
5449 .get_ringparam = bnx2_get_ringparam,
5450 .set_ringparam = bnx2_set_ringparam,
5451 .get_pauseparam = bnx2_get_pauseparam,
5452 .set_pauseparam = bnx2_set_pauseparam,
5453 .get_rx_csum = bnx2_get_rx_csum,
5454 .set_rx_csum = bnx2_set_rx_csum,
5455 .get_tx_csum = ethtool_op_get_tx_csum,
5456 .set_tx_csum = ethtool_op_set_tx_csum,
5457 .get_sg = ethtool_op_get_sg,
5458 .set_sg = ethtool_op_set_sg,
5459 #ifdef BCM_TSO
5460 .get_tso = ethtool_op_get_tso,
5461 .set_tso = bnx2_set_tso,
5462 #endif
5463 .self_test_count = bnx2_self_test_count,
5464 .self_test = bnx2_self_test,
5465 .get_strings = bnx2_get_strings,
5466 .phys_id = bnx2_phys_id,
5467 .get_stats_count = bnx2_get_stats_count,
5468 .get_ethtool_stats = bnx2_get_ethtool_stats,
5469 .get_perm_addr = ethtool_op_get_perm_addr,
5470 };
5471
5472 /* Called with rtnl_lock */
5473 static int
5474 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5475 {
5476 struct mii_ioctl_data *data = if_mii(ifr);
5477 struct bnx2 *bp = netdev_priv(dev);
5478 int err;
5479
5480 switch(cmd) {
5481 case SIOCGMIIPHY:
5482 data->phy_id = bp->phy_addr;
5483
5484 /* fallthru */
5485 case SIOCGMIIREG: {
5486 u32 mii_regval;
5487
5488 spin_lock_bh(&bp->phy_lock);
5489 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5490 spin_unlock_bh(&bp->phy_lock);
5491
5492 data->val_out = mii_regval;
5493
5494 return err;
5495 }
5496
5497 case SIOCSMIIREG:
5498 if (!capable(CAP_NET_ADMIN))
5499 return -EPERM;
5500
5501 spin_lock_bh(&bp->phy_lock);
5502 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5503 spin_unlock_bh(&bp->phy_lock);
5504
5505 return err;
5506
5507 default:
5508 /* do nothing */
5509 break;
5510 }
5511 return -EOPNOTSUPP;
5512 }
5513
5514 /* Called with rtnl_lock */
5515 static int
5516 bnx2_change_mac_addr(struct net_device *dev, void *p)
5517 {
5518 struct sockaddr *addr = p;
5519 struct bnx2 *bp = netdev_priv(dev);
5520
5521 if (!is_valid_ether_addr(addr->sa_data))
5522 return -EINVAL;
5523
5524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5525 if (netif_running(dev))
5526 bnx2_set_mac_addr(bp);
5527
5528 return 0;
5529 }
5530
5531 /* Called with rtnl_lock */
5532 static int
5533 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5534 {
5535 struct bnx2 *bp = netdev_priv(dev);
5536
5537 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5538 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5539 return -EINVAL;
5540
5541 dev->mtu = new_mtu;
5542 if (netif_running(dev)) {
5543 bnx2_netif_stop(bp);
5544
5545 bnx2_init_nic(bp);
5546
5547 bnx2_netif_start(bp);
5548 }
5549 return 0;
5550 }
5551
5552 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5553 static void
5554 poll_bnx2(struct net_device *dev)
5555 {
5556 struct bnx2 *bp = netdev_priv(dev);
5557
5558 disable_irq(bp->pdev->irq);
5559 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5560 enable_irq(bp->pdev->irq);
5561 }
5562 #endif
5563
5564 static int __devinit
5565 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5566 {
5567 struct bnx2 *bp;
5568 unsigned long mem_len;
5569 int rc;
5570 u32 reg;
5571
5572 SET_MODULE_OWNER(dev);
5573 SET_NETDEV_DEV(dev, &pdev->dev);
5574 bp = netdev_priv(dev);
5575
5576 bp->flags = 0;
5577 bp->phy_flags = 0;
5578
5579 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5580 rc = pci_enable_device(pdev);
5581 if (rc) {
5582 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5583 goto err_out;
5584 }
5585
5586 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5587 dev_err(&pdev->dev,
5588 "Cannot find PCI device base address, aborting.\n");
5589 rc = -ENODEV;
5590 goto err_out_disable;
5591 }
5592
5593 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5594 if (rc) {
5595 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5596 goto err_out_disable;
5597 }
5598
5599 pci_set_master(pdev);
5600
5601 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5602 if (bp->pm_cap == 0) {
5603 dev_err(&pdev->dev,
5604 "Cannot find power management capability, aborting.\n");
5605 rc = -EIO;
5606 goto err_out_release;
5607 }
5608
5609 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5610 if (bp->pcix_cap == 0) {
5611 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5612 rc = -EIO;
5613 goto err_out_release;
5614 }
5615
5616 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5617 bp->flags |= USING_DAC_FLAG;
5618 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5619 dev_err(&pdev->dev,
5620 "pci_set_consistent_dma_mask failed, aborting.\n");
5621 rc = -EIO;
5622 goto err_out_release;
5623 }
5624 }
5625 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5626 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5627 rc = -EIO;
5628 goto err_out_release;
5629 }
5630
5631 bp->dev = dev;
5632 bp->pdev = pdev;
5633
5634 spin_lock_init(&bp->phy_lock);
5635 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5636
5637 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5638 mem_len = MB_GET_CID_ADDR(17);
5639 dev->mem_end = dev->mem_start + mem_len;
5640 dev->irq = pdev->irq;
5641
5642 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5643
5644 if (!bp->regview) {
5645 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5646 rc = -ENOMEM;
5647 goto err_out_release;
5648 }
5649
5650 /* Configure byte swap and enable write to the reg_window registers.
5651 * Rely on CPU to do target byte swapping on big endian systems
5652 * The chip's target access swapping will not swap all accesses
5653 */
5654 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5655 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5656 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5657
5658 bnx2_set_power_state(bp, PCI_D0);
5659
5660 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5661
5662 /* Get bus information. */
5663 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5664 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5665 u32 clkreg;
5666
5667 bp->flags |= PCIX_FLAG;
5668
5669 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5670
5671 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5672 switch (clkreg) {
5673 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5674 bp->bus_speed_mhz = 133;
5675 break;
5676
5677 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5678 bp->bus_speed_mhz = 100;
5679 break;
5680
5681 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5682 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5683 bp->bus_speed_mhz = 66;
5684 break;
5685
5686 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5687 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5688 bp->bus_speed_mhz = 50;
5689 break;
5690
5691 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5692 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5693 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5694 bp->bus_speed_mhz = 33;
5695 break;
5696 }
5697 }
5698 else {
5699 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5700 bp->bus_speed_mhz = 66;
5701 else
5702 bp->bus_speed_mhz = 33;
5703 }
5704
5705 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5706 bp->flags |= PCI_32BIT_FLAG;
5707
5708 /* 5706A0 may falsely detect SERR and PERR. */
5709 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5710 reg = REG_RD(bp, PCI_COMMAND);
5711 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5712 REG_WR(bp, PCI_COMMAND, reg);
5713 }
5714 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5715 !(bp->flags & PCIX_FLAG)) {
5716
5717 dev_err(&pdev->dev,
5718 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5719 goto err_out_unmap;
5720 }
5721
5722 bnx2_init_nvram(bp);
5723
5724 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5725
5726 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5727 BNX2_SHM_HDR_SIGNATURE_SIG)
5728 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5729 else
5730 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5731
5732 /* Get the permanent MAC address. First we need to make sure the
5733 * firmware is actually running.
5734 */
5735 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5736
5737 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5738 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5739 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5740 rc = -ENODEV;
5741 goto err_out_unmap;
5742 }
5743
5744 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5745
5746 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5747 bp->mac_addr[0] = (u8) (reg >> 8);
5748 bp->mac_addr[1] = (u8) reg;
5749
5750 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5751 bp->mac_addr[2] = (u8) (reg >> 24);
5752 bp->mac_addr[3] = (u8) (reg >> 16);
5753 bp->mac_addr[4] = (u8) (reg >> 8);
5754 bp->mac_addr[5] = (u8) reg;
5755
5756 bp->tx_ring_size = MAX_TX_DESC_CNT;
5757 bnx2_set_rx_ring_size(bp, 100);
5758
5759 bp->rx_csum = 1;
5760
5761 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5762
5763 bp->tx_quick_cons_trip_int = 20;
5764 bp->tx_quick_cons_trip = 20;
5765 bp->tx_ticks_int = 80;
5766 bp->tx_ticks = 80;
5767
5768 bp->rx_quick_cons_trip_int = 6;
5769 bp->rx_quick_cons_trip = 6;
5770 bp->rx_ticks_int = 18;
5771 bp->rx_ticks = 18;
5772
5773 bp->stats_ticks = 1000000 & 0xffff00;
5774
5775 bp->timer_interval = HZ;
5776 bp->current_interval = HZ;
5777
5778 bp->phy_addr = 1;
5779
5780 /* Disable WOL support if we are running on a SERDES chip. */
5781 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5782 bp->phy_flags |= PHY_SERDES_FLAG;
5783 bp->flags |= NO_WOL_FLAG;
5784 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5785 bp->phy_addr = 2;
5786 reg = REG_RD_IND(bp, bp->shmem_base +
5787 BNX2_SHARED_HW_CFG_CONFIG);
5788 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5789 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5790 }
5791 }
5792
5793 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5794 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5795 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5796 bp->flags |= NO_WOL_FLAG;
5797
5798 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5799 bp->tx_quick_cons_trip_int =
5800 bp->tx_quick_cons_trip;
5801 bp->tx_ticks_int = bp->tx_ticks;
5802 bp->rx_quick_cons_trip_int =
5803 bp->rx_quick_cons_trip;
5804 bp->rx_ticks_int = bp->rx_ticks;
5805 bp->comp_prod_trip_int = bp->comp_prod_trip;
5806 bp->com_ticks_int = bp->com_ticks;
5807 bp->cmd_ticks_int = bp->cmd_ticks;
5808 }
5809
5810 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5811 bp->req_line_speed = 0;
5812 if (bp->phy_flags & PHY_SERDES_FLAG) {
5813 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5814
5815 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5816 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5817 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5818 bp->autoneg = 0;
5819 bp->req_line_speed = bp->line_speed = SPEED_1000;
5820 bp->req_duplex = DUPLEX_FULL;
5821 }
5822 }
5823 else {
5824 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5825 }
5826
5827 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5828
5829 init_timer(&bp->timer);
5830 bp->timer.expires = RUN_AT(bp->timer_interval);
5831 bp->timer.data = (unsigned long) bp;
5832 bp->timer.function = bnx2_timer;
5833
5834 return 0;
5835
5836 err_out_unmap:
5837 if (bp->regview) {
5838 iounmap(bp->regview);
5839 bp->regview = NULL;
5840 }
5841
5842 err_out_release:
5843 pci_release_regions(pdev);
5844
5845 err_out_disable:
5846 pci_disable_device(pdev);
5847 pci_set_drvdata(pdev, NULL);
5848
5849 err_out:
5850 return rc;
5851 }
5852
5853 static int __devinit
5854 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5855 {
5856 static int version_printed = 0;
5857 struct net_device *dev = NULL;
5858 struct bnx2 *bp;
5859 int rc, i;
5860
5861 if (version_printed++ == 0)
5862 printk(KERN_INFO "%s", version);
5863
5864 /* dev zeroed in init_etherdev */
5865 dev = alloc_etherdev(sizeof(*bp));
5866
5867 if (!dev)
5868 return -ENOMEM;
5869
5870 rc = bnx2_init_board(pdev, dev);
5871 if (rc < 0) {
5872 free_netdev(dev);
5873 return rc;
5874 }
5875
5876 dev->open = bnx2_open;
5877 dev->hard_start_xmit = bnx2_start_xmit;
5878 dev->stop = bnx2_close;
5879 dev->get_stats = bnx2_get_stats;
5880 dev->set_multicast_list = bnx2_set_rx_mode;
5881 dev->do_ioctl = bnx2_ioctl;
5882 dev->set_mac_address = bnx2_change_mac_addr;
5883 dev->change_mtu = bnx2_change_mtu;
5884 dev->tx_timeout = bnx2_tx_timeout;
5885 dev->watchdog_timeo = TX_TIMEOUT;
5886 #ifdef BCM_VLAN
5887 dev->vlan_rx_register = bnx2_vlan_rx_register;
5888 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5889 #endif
5890 dev->poll = bnx2_poll;
5891 dev->ethtool_ops = &bnx2_ethtool_ops;
5892 dev->weight = 64;
5893
5894 bp = netdev_priv(dev);
5895
5896 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5897 dev->poll_controller = poll_bnx2;
5898 #endif
5899
5900 if ((rc = register_netdev(dev))) {
5901 dev_err(&pdev->dev, "Cannot register net device\n");
5902 if (bp->regview)
5903 iounmap(bp->regview);
5904 pci_release_regions(pdev);
5905 pci_disable_device(pdev);
5906 pci_set_drvdata(pdev, NULL);
5907 free_netdev(dev);
5908 return rc;
5909 }
5910
5911 pci_set_drvdata(pdev, dev);
5912
5913 memcpy(dev->dev_addr, bp->mac_addr, 6);
5914 memcpy(dev->perm_addr, bp->mac_addr, 6);
5915 bp->name = board_info[ent->driver_data].name,
5916 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5917 "IRQ %d, ",
5918 dev->name,
5919 bp->name,
5920 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5921 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5922 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5923 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5924 bp->bus_speed_mhz,
5925 dev->base_addr,
5926 bp->pdev->irq);
5927
5928 printk("node addr ");
5929 for (i = 0; i < 6; i++)
5930 printk("%2.2x", dev->dev_addr[i]);
5931 printk("\n");
5932
5933 dev->features |= NETIF_F_SG;
5934 if (bp->flags & USING_DAC_FLAG)
5935 dev->features |= NETIF_F_HIGHDMA;
5936 dev->features |= NETIF_F_IP_CSUM;
5937 #ifdef BCM_VLAN
5938 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5939 #endif
5940 #ifdef BCM_TSO
5941 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5942 #endif
5943
5944 netif_carrier_off(bp->dev);
5945
5946 return 0;
5947 }
5948
5949 static void __devexit
5950 bnx2_remove_one(struct pci_dev *pdev)
5951 {
5952 struct net_device *dev = pci_get_drvdata(pdev);
5953 struct bnx2 *bp = netdev_priv(dev);
5954
5955 flush_scheduled_work();
5956
5957 unregister_netdev(dev);
5958
5959 if (bp->regview)
5960 iounmap(bp->regview);
5961
5962 free_netdev(dev);
5963 pci_release_regions(pdev);
5964 pci_disable_device(pdev);
5965 pci_set_drvdata(pdev, NULL);
5966 }
5967
5968 static int
5969 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5970 {
5971 struct net_device *dev = pci_get_drvdata(pdev);
5972 struct bnx2 *bp = netdev_priv(dev);
5973 u32 reset_code;
5974
5975 if (!netif_running(dev))
5976 return 0;
5977
5978 flush_scheduled_work();
5979 bnx2_netif_stop(bp);
5980 netif_device_detach(dev);
5981 del_timer_sync(&bp->timer);
5982 if (bp->flags & NO_WOL_FLAG)
5983 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5984 else if (bp->wol)
5985 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5986 else
5987 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5988 bnx2_reset_chip(bp, reset_code);
5989 bnx2_free_skbs(bp);
5990 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5991 return 0;
5992 }
5993
5994 static int
5995 bnx2_resume(struct pci_dev *pdev)
5996 {
5997 struct net_device *dev = pci_get_drvdata(pdev);
5998 struct bnx2 *bp = netdev_priv(dev);
5999
6000 if (!netif_running(dev))
6001 return 0;
6002
6003 bnx2_set_power_state(bp, PCI_D0);
6004 netif_device_attach(dev);
6005 bnx2_init_nic(bp);
6006 bnx2_netif_start(bp);
6007 return 0;
6008 }
6009
6010 static struct pci_driver bnx2_pci_driver = {
6011 .name = DRV_MODULE_NAME,
6012 .id_table = bnx2_pci_tbl,
6013 .probe = bnx2_init_one,
6014 .remove = __devexit_p(bnx2_remove_one),
6015 .suspend = bnx2_suspend,
6016 .resume = bnx2_resume,
6017 };
6018
6019 static int __init bnx2_init(void)
6020 {
6021 return pci_module_init(&bnx2_pci_driver);
6022 }
6023
6024 static void __exit bnx2_cleanup(void)
6025 {
6026 pci_unregister_driver(&bnx2_pci_driver);
6027 }
6028
6029 module_init(bnx2_init);
6030 module_exit(bnx2_cleanup);
6031
6032
6033
This page took 0.147124 seconds and 6 git commands to generate.