KVM: MMU: Move set_pte() into guest paging mode independent code
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2008 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE 0x10000
56
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.2"
60 #define DRV_MODULE_RELDATE "January 21, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
66
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
86 BCM5708,
87 BCM5708S,
88 BCM5709,
89 BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94 char *name;
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126 { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
133 /* Slow EEPROM */
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231 u32 diff;
232
233 smp_mb();
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
244 return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271 offset += cid_addr;
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
290 spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402 int i;
403 struct bnx2_napi *bnapi;
404
405 for (i = 0; i < bp->irq_nvecs; i++) {
406 bnapi = &bp->bnx2_napi[i];
407 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409 }
410 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416 int i;
417 struct bnx2_napi *bnapi;
418
419 for (i = 0; i < bp->irq_nvecs; i++) {
420 bnapi = &bp->bnx2_napi[i];
421
422 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425 bnapi->last_status_idx);
426
427 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429 bnapi->last_status_idx);
430 }
431 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437 int i;
438
439 atomic_inc(&bp->intr_sem);
440 bnx2_disable_int(bp);
441 for (i = 0; i < bp->irq_nvecs; i++)
442 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448 int i;
449
450 for (i = 0; i < bp->irq_nvecs; i++)
451 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457 int i;
458
459 for (i = 0; i < bp->irq_nvecs; i++)
460 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466 bnx2_disable_int_sync(bp);
467 if (netif_running(bp->dev)) {
468 bnx2_napi_disable(bp);
469 netif_tx_disable(bp->dev);
470 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471 }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477 if (atomic_dec_and_test(&bp->intr_sem)) {
478 if (netif_running(bp->dev)) {
479 netif_wake_queue(bp->dev);
480 bnx2_napi_enable(bp);
481 bnx2_enable_int(bp);
482 }
483 }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489 int i;
490
491 for (i = 0; i < bp->ctx_pages; i++) {
492 if (bp->ctx_blk[i]) {
493 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494 bp->ctx_blk[i],
495 bp->ctx_blk_mapping[i]);
496 bp->ctx_blk[i] = NULL;
497 }
498 }
499 if (bp->status_blk) {
500 pci_free_consistent(bp->pdev, bp->status_stats_size,
501 bp->status_blk, bp->status_blk_mapping);
502 bp->status_blk = NULL;
503 bp->stats_blk = NULL;
504 }
505 if (bp->tx_desc_ring) {
506 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507 bp->tx_desc_ring, bp->tx_desc_mapping);
508 bp->tx_desc_ring = NULL;
509 }
510 kfree(bp->tx_buf_ring);
511 bp->tx_buf_ring = NULL;
512 for (i = 0; i < bp->rx_max_ring; i++) {
513 if (bp->rx_desc_ring[i])
514 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515 bp->rx_desc_ring[i],
516 bp->rx_desc_mapping[i]);
517 bp->rx_desc_ring[i] = NULL;
518 }
519 vfree(bp->rx_buf_ring);
520 bp->rx_buf_ring = NULL;
521 for (i = 0; i < bp->rx_max_pg_ring; i++) {
522 if (bp->rx_pg_desc_ring[i])
523 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524 bp->rx_pg_desc_ring[i],
525 bp->rx_pg_desc_mapping[i]);
526 bp->rx_pg_desc_ring[i] = NULL;
527 }
528 if (bp->rx_pg_ring)
529 vfree(bp->rx_pg_ring);
530 bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536 int i, status_blk_size;
537
538 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539 if (bp->tx_buf_ring == NULL)
540 return -ENOMEM;
541
542 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543 &bp->tx_desc_mapping);
544 if (bp->tx_desc_ring == NULL)
545 goto alloc_mem_err;
546
547 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548 if (bp->rx_buf_ring == NULL)
549 goto alloc_mem_err;
550
551 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553 for (i = 0; i < bp->rx_max_ring; i++) {
554 bp->rx_desc_ring[i] =
555 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556 &bp->rx_desc_mapping[i]);
557 if (bp->rx_desc_ring[i] == NULL)
558 goto alloc_mem_err;
559
560 }
561
562 if (bp->rx_pg_ring_size) {
563 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564 bp->rx_max_pg_ring);
565 if (bp->rx_pg_ring == NULL)
566 goto alloc_mem_err;
567
568 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569 bp->rx_max_pg_ring);
570 }
571
572 for (i = 0; i < bp->rx_max_pg_ring; i++) {
573 bp->rx_pg_desc_ring[i] =
574 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575 &bp->rx_pg_desc_mapping[i]);
576 if (bp->rx_pg_desc_ring[i] == NULL)
577 goto alloc_mem_err;
578
579 }
580
581 /* Combine status and statistics blocks into one allocation. */
582 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583 if (bp->flags & BNX2_FLAG_MSIX_CAP)
584 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585 BNX2_SBLK_MSIX_ALIGN_SIZE);
586 bp->status_stats_size = status_blk_size +
587 sizeof(struct statistics_block);
588
589 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590 &bp->status_blk_mapping);
591 if (bp->status_blk == NULL)
592 goto alloc_mem_err;
593
594 memset(bp->status_blk, 0, bp->status_stats_size);
595
596 bp->bnx2_napi[0].status_blk = bp->status_blk;
597 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
598 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601 bnapi->status_blk_msix = (void *)
602 ((unsigned long) bp->status_blk +
603 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604 bnapi->int_num = i << 24;
605 }
606 }
607
608 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609 status_blk_size);
610
611 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615 if (bp->ctx_pages == 0)
616 bp->ctx_pages = 1;
617 for (i = 0; i < bp->ctx_pages; i++) {
618 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619 BCM_PAGE_SIZE,
620 &bp->ctx_blk_mapping[i]);
621 if (bp->ctx_blk[i] == NULL)
622 goto alloc_mem_err;
623 }
624 }
625 return 0;
626
627 alloc_mem_err:
628 bnx2_free_mem(bp);
629 return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635 u32 fw_link_status = 0;
636
637 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
638 return;
639
640 if (bp->link_up) {
641 u32 bmsr;
642
643 switch (bp->line_speed) {
644 case SPEED_10:
645 if (bp->duplex == DUPLEX_HALF)
646 fw_link_status = BNX2_LINK_STATUS_10HALF;
647 else
648 fw_link_status = BNX2_LINK_STATUS_10FULL;
649 break;
650 case SPEED_100:
651 if (bp->duplex == DUPLEX_HALF)
652 fw_link_status = BNX2_LINK_STATUS_100HALF;
653 else
654 fw_link_status = BNX2_LINK_STATUS_100FULL;
655 break;
656 case SPEED_1000:
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659 else
660 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661 break;
662 case SPEED_2500:
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665 else
666 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667 break;
668 }
669
670 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672 if (bp->autoneg) {
673 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
680 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681 else
682 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683 }
684 }
685 else
686 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
696 "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702 if (bp->link_up) {
703 netif_carrier_on(bp->dev);
704 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705 bnx2_xceiver_str(bp));
706
707 printk("%d Mbps ", bp->line_speed);
708
709 if (bp->duplex == DUPLEX_FULL)
710 printk("full duplex");
711 else
712 printk("half duplex");
713
714 if (bp->flow_ctrl) {
715 if (bp->flow_ctrl & FLOW_CTRL_RX) {
716 printk(", receive ");
717 if (bp->flow_ctrl & FLOW_CTRL_TX)
718 printk("& transmit ");
719 }
720 else {
721 printk(", transmit ");
722 }
723 printk("flow control ON");
724 }
725 printk("\n");
726 }
727 else {
728 netif_carrier_off(bp->dev);
729 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730 bnx2_xceiver_str(bp));
731 }
732
733 bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739 u32 local_adv, remote_adv;
740
741 bp->flow_ctrl = 0;
742 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745 if (bp->duplex == DUPLEX_FULL) {
746 bp->flow_ctrl = bp->req_flow_ctrl;
747 }
748 return;
749 }
750
751 if (bp->duplex != DUPLEX_FULL) {
752 return;
753 }
754
755 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
756 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757 u32 val;
758
759 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761 bp->flow_ctrl |= FLOW_CTRL_TX;
762 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763 bp->flow_ctrl |= FLOW_CTRL_RX;
764 return;
765 }
766
767 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
771 u32 new_local_adv = 0;
772 u32 new_remote_adv = 0;
773
774 if (local_adv & ADVERTISE_1000XPAUSE)
775 new_local_adv |= ADVERTISE_PAUSE_CAP;
776 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777 new_local_adv |= ADVERTISE_PAUSE_ASYM;
778 if (remote_adv & ADVERTISE_1000XPAUSE)
779 new_remote_adv |= ADVERTISE_PAUSE_CAP;
780 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783 local_adv = new_local_adv;
784 remote_adv = new_remote_adv;
785 }
786
787 /* See Table 28B-3 of 802.3ab-1999 spec. */
788 if (local_adv & ADVERTISE_PAUSE_CAP) {
789 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790 if (remote_adv & ADVERTISE_PAUSE_CAP) {
791 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792 }
793 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794 bp->flow_ctrl = FLOW_CTRL_RX;
795 }
796 }
797 else {
798 if (remote_adv & ADVERTISE_PAUSE_CAP) {
799 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800 }
801 }
802 }
803 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807 bp->flow_ctrl = FLOW_CTRL_TX;
808 }
809 }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815 u32 val, speed;
816
817 bp->link_up = 1;
818
819 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824 bp->line_speed = bp->req_line_speed;
825 bp->duplex = bp->req_duplex;
826 return 0;
827 }
828 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829 switch (speed) {
830 case MII_BNX2_GP_TOP_AN_SPEED_10:
831 bp->line_speed = SPEED_10;
832 break;
833 case MII_BNX2_GP_TOP_AN_SPEED_100:
834 bp->line_speed = SPEED_100;
835 break;
836 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838 bp->line_speed = SPEED_1000;
839 break;
840 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841 bp->line_speed = SPEED_2500;
842 break;
843 }
844 if (val & MII_BNX2_GP_TOP_AN_FD)
845 bp->duplex = DUPLEX_FULL;
846 else
847 bp->duplex = DUPLEX_HALF;
848 return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854 u32 val;
855
856 bp->link_up = 1;
857 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859 case BCM5708S_1000X_STAT1_SPEED_10:
860 bp->line_speed = SPEED_10;
861 break;
862 case BCM5708S_1000X_STAT1_SPEED_100:
863 bp->line_speed = SPEED_100;
864 break;
865 case BCM5708S_1000X_STAT1_SPEED_1G:
866 bp->line_speed = SPEED_1000;
867 break;
868 case BCM5708S_1000X_STAT1_SPEED_2G5:
869 bp->line_speed = SPEED_2500;
870 break;
871 }
872 if (val & BCM5708S_1000X_STAT1_FD)
873 bp->duplex = DUPLEX_FULL;
874 else
875 bp->duplex = DUPLEX_HALF;
876
877 return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883 u32 bmcr, local_adv, remote_adv, common;
884
885 bp->link_up = 1;
886 bp->line_speed = SPEED_1000;
887
888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889 if (bmcr & BMCR_FULLDPLX) {
890 bp->duplex = DUPLEX_FULL;
891 }
892 else {
893 bp->duplex = DUPLEX_HALF;
894 }
895
896 if (!(bmcr & BMCR_ANENABLE)) {
897 return 0;
898 }
899
900 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903 common = local_adv & remote_adv;
904 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906 if (common & ADVERTISE_1000XFULL) {
907 bp->duplex = DUPLEX_FULL;
908 }
909 else {
910 bp->duplex = DUPLEX_HALF;
911 }
912 }
913
914 return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920 u32 bmcr;
921
922 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923 if (bmcr & BMCR_ANENABLE) {
924 u32 local_adv, remote_adv, common;
925
926 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929 common = local_adv & (remote_adv >> 2);
930 if (common & ADVERTISE_1000FULL) {
931 bp->line_speed = SPEED_1000;
932 bp->duplex = DUPLEX_FULL;
933 }
934 else if (common & ADVERTISE_1000HALF) {
935 bp->line_speed = SPEED_1000;
936 bp->duplex = DUPLEX_HALF;
937 }
938 else {
939 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942 common = local_adv & remote_adv;
943 if (common & ADVERTISE_100FULL) {
944 bp->line_speed = SPEED_100;
945 bp->duplex = DUPLEX_FULL;
946 }
947 else if (common & ADVERTISE_100HALF) {
948 bp->line_speed = SPEED_100;
949 bp->duplex = DUPLEX_HALF;
950 }
951 else if (common & ADVERTISE_10FULL) {
952 bp->line_speed = SPEED_10;
953 bp->duplex = DUPLEX_FULL;
954 }
955 else if (common & ADVERTISE_10HALF) {
956 bp->line_speed = SPEED_10;
957 bp->duplex = DUPLEX_HALF;
958 }
959 else {
960 bp->line_speed = 0;
961 bp->link_up = 0;
962 }
963 }
964 }
965 else {
966 if (bmcr & BMCR_SPEED100) {
967 bp->line_speed = SPEED_100;
968 }
969 else {
970 bp->line_speed = SPEED_10;
971 }
972 if (bmcr & BMCR_FULLDPLX) {
973 bp->duplex = DUPLEX_FULL;
974 }
975 else {
976 bp->duplex = DUPLEX_HALF;
977 }
978 }
979
980 return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986 u32 val;
987
988 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990 (bp->duplex == DUPLEX_HALF)) {
991 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992 }
993
994 /* Configure the EMAC mode register. */
995 val = REG_RD(bp, BNX2_EMAC_MODE);
996
997 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999 BNX2_EMAC_MODE_25G_MODE);
1000
1001 if (bp->link_up) {
1002 switch (bp->line_speed) {
1003 case SPEED_10:
1004 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006 break;
1007 }
1008 /* fall through */
1009 case SPEED_100:
1010 val |= BNX2_EMAC_MODE_PORT_MII;
1011 break;
1012 case SPEED_2500:
1013 val |= BNX2_EMAC_MODE_25G_MODE;
1014 /* fall through */
1015 case SPEED_1000:
1016 val |= BNX2_EMAC_MODE_PORT_GMII;
1017 break;
1018 }
1019 }
1020 else {
1021 val |= BNX2_EMAC_MODE_PORT_GMII;
1022 }
1023
1024 /* Set the MAC to operate in the appropriate duplex mode. */
1025 if (bp->duplex == DUPLEX_HALF)
1026 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027 REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029 /* Enable/disable rx PAUSE. */
1030 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032 if (bp->flow_ctrl & FLOW_CTRL_RX)
1033 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036 /* Enable/disable tx PAUSE. */
1037 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040 if (bp->flow_ctrl & FLOW_CTRL_TX)
1041 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044 /* Acknowledge the interrupt. */
1045 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047 return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1054 (CHIP_NUM(bp) == CHIP_NUM_5709))
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1063 (CHIP_NUM(bp) == CHIP_NUM_5709))
1064 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071 u32 up1;
1072 int ret = 1;
1073
1074 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1075 return 0;
1076
1077 if (bp->autoneg & AUTONEG_SPEED)
1078 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083 bnx2_read_phy(bp, bp->mii_up1, &up1);
1084 if (!(up1 & BCM5708S_UP1_2G5)) {
1085 up1 |= BCM5708S_UP1_2G5;
1086 bnx2_write_phy(bp, bp->mii_up1, up1);
1087 ret = 0;
1088 }
1089
1090 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094 return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100 u32 up1;
1101 int ret = 0;
1102
1103 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1104 return 0;
1105
1106 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109 bnx2_read_phy(bp, bp->mii_up1, &up1);
1110 if (up1 & BCM5708S_UP1_2G5) {
1111 up1 &= ~BCM5708S_UP1_2G5;
1112 bnx2_write_phy(bp, bp->mii_up1, up1);
1113 ret = 1;
1114 }
1115
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120 return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126 u32 bmcr;
1127
1128 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1129 return;
1130
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132 u32 val;
1133
1134 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135 MII_BNX2_BLK_ADDR_SERDES_DIG);
1136 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148 }
1149
1150 if (bp->autoneg & AUTONEG_SPEED) {
1151 bmcr &= ~BMCR_ANENABLE;
1152 if (bp->req_duplex == DUPLEX_FULL)
1153 bmcr |= BMCR_FULLDPLX;
1154 }
1155 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161 u32 bmcr;
1162
1163 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1164 return;
1165
1166 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167 u32 val;
1168
1169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170 MII_BNX2_BLK_ADDR_SERDES_DIG);
1171 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182 }
1183
1184 if (bp->autoneg & AUTONEG_SPEED)
1185 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static void
1190 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1191 {
1192 u32 val;
1193
1194 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1195 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1196 if (start)
1197 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1198 else
1199 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1200 }
1201
1202 static int
1203 bnx2_set_link(struct bnx2 *bp)
1204 {
1205 u32 bmsr;
1206 u8 link_up;
1207
1208 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1209 bp->link_up = 1;
1210 return 0;
1211 }
1212
1213 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1214 return 0;
1215
1216 link_up = bp->link_up;
1217
1218 bnx2_enable_bmsr1(bp);
1219 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1220 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1221 bnx2_disable_bmsr1(bp);
1222
1223 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1224 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1225 u32 val;
1226
1227 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1228 bnx2_5706s_force_link_dn(bp, 0);
1229 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1230 }
1231 val = REG_RD(bp, BNX2_EMAC_STATUS);
1232 if (val & BNX2_EMAC_STATUS_LINK)
1233 bmsr |= BMSR_LSTATUS;
1234 else
1235 bmsr &= ~BMSR_LSTATUS;
1236 }
1237
1238 if (bmsr & BMSR_LSTATUS) {
1239 bp->link_up = 1;
1240
1241 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1242 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1243 bnx2_5706s_linkup(bp);
1244 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1245 bnx2_5708s_linkup(bp);
1246 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1247 bnx2_5709s_linkup(bp);
1248 }
1249 else {
1250 bnx2_copper_linkup(bp);
1251 }
1252 bnx2_resolve_flow_ctrl(bp);
1253 }
1254 else {
1255 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1256 (bp->autoneg & AUTONEG_SPEED))
1257 bnx2_disable_forced_2g5(bp);
1258
1259 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1260 u32 bmcr;
1261
1262 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1263 bmcr |= BMCR_ANENABLE;
1264 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1265
1266 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1267 }
1268 bp->link_up = 0;
1269 }
1270
1271 if (bp->link_up != link_up) {
1272 bnx2_report_link(bp);
1273 }
1274
1275 bnx2_set_mac_link(bp);
1276
1277 return 0;
1278 }
1279
1280 static int
1281 bnx2_reset_phy(struct bnx2 *bp)
1282 {
1283 int i;
1284 u32 reg;
1285
1286 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1287
1288 #define PHY_RESET_MAX_WAIT 100
1289 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1290 udelay(10);
1291
1292 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1293 if (!(reg & BMCR_RESET)) {
1294 udelay(20);
1295 break;
1296 }
1297 }
1298 if (i == PHY_RESET_MAX_WAIT) {
1299 return -EBUSY;
1300 }
1301 return 0;
1302 }
1303
1304 static u32
1305 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1306 {
1307 u32 adv = 0;
1308
1309 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1310 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1311
1312 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1313 adv = ADVERTISE_1000XPAUSE;
1314 }
1315 else {
1316 adv = ADVERTISE_PAUSE_CAP;
1317 }
1318 }
1319 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1320 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1321 adv = ADVERTISE_1000XPSE_ASYM;
1322 }
1323 else {
1324 adv = ADVERTISE_PAUSE_ASYM;
1325 }
1326 }
1327 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1328 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1329 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1330 }
1331 else {
1332 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1333 }
1334 }
1335 return adv;
1336 }
1337
1338 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1339
1340 static int
1341 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1342 {
1343 u32 speed_arg = 0, pause_adv;
1344
1345 pause_adv = bnx2_phy_get_pause_adv(bp);
1346
1347 if (bp->autoneg & AUTONEG_SPEED) {
1348 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1349 if (bp->advertising & ADVERTISED_10baseT_Half)
1350 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351 if (bp->advertising & ADVERTISED_10baseT_Full)
1352 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1353 if (bp->advertising & ADVERTISED_100baseT_Half)
1354 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1355 if (bp->advertising & ADVERTISED_100baseT_Full)
1356 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1357 if (bp->advertising & ADVERTISED_1000baseT_Full)
1358 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1359 if (bp->advertising & ADVERTISED_2500baseX_Full)
1360 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1361 } else {
1362 if (bp->req_line_speed == SPEED_2500)
1363 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1364 else if (bp->req_line_speed == SPEED_1000)
1365 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1366 else if (bp->req_line_speed == SPEED_100) {
1367 if (bp->req_duplex == DUPLEX_FULL)
1368 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1369 else
1370 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1371 } else if (bp->req_line_speed == SPEED_10) {
1372 if (bp->req_duplex == DUPLEX_FULL)
1373 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1374 else
1375 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1376 }
1377 }
1378
1379 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1380 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1381 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1382 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1383
1384 if (port == PORT_TP)
1385 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1386 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1387
1388 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1389
1390 spin_unlock_bh(&bp->phy_lock);
1391 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1392 spin_lock_bh(&bp->phy_lock);
1393
1394 return 0;
1395 }
1396
1397 static int
1398 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1399 {
1400 u32 adv, bmcr;
1401 u32 new_adv = 0;
1402
1403 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1404 return (bnx2_setup_remote_phy(bp, port));
1405
1406 if (!(bp->autoneg & AUTONEG_SPEED)) {
1407 u32 new_bmcr;
1408 int force_link_down = 0;
1409
1410 if (bp->req_line_speed == SPEED_2500) {
1411 if (!bnx2_test_and_enable_2g5(bp))
1412 force_link_down = 1;
1413 } else if (bp->req_line_speed == SPEED_1000) {
1414 if (bnx2_test_and_disable_2g5(bp))
1415 force_link_down = 1;
1416 }
1417 bnx2_read_phy(bp, bp->mii_adv, &adv);
1418 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1419
1420 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421 new_bmcr = bmcr & ~BMCR_ANENABLE;
1422 new_bmcr |= BMCR_SPEED1000;
1423
1424 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1425 if (bp->req_line_speed == SPEED_2500)
1426 bnx2_enable_forced_2g5(bp);
1427 else if (bp->req_line_speed == SPEED_1000) {
1428 bnx2_disable_forced_2g5(bp);
1429 new_bmcr &= ~0x2000;
1430 }
1431
1432 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1433 if (bp->req_line_speed == SPEED_2500)
1434 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1435 else
1436 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1437 }
1438
1439 if (bp->req_duplex == DUPLEX_FULL) {
1440 adv |= ADVERTISE_1000XFULL;
1441 new_bmcr |= BMCR_FULLDPLX;
1442 }
1443 else {
1444 adv |= ADVERTISE_1000XHALF;
1445 new_bmcr &= ~BMCR_FULLDPLX;
1446 }
1447 if ((new_bmcr != bmcr) || (force_link_down)) {
1448 /* Force a link down visible on the other side */
1449 if (bp->link_up) {
1450 bnx2_write_phy(bp, bp->mii_adv, adv &
1451 ~(ADVERTISE_1000XFULL |
1452 ADVERTISE_1000XHALF));
1453 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1454 BMCR_ANRESTART | BMCR_ANENABLE);
1455
1456 bp->link_up = 0;
1457 netif_carrier_off(bp->dev);
1458 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1459 bnx2_report_link(bp);
1460 }
1461 bnx2_write_phy(bp, bp->mii_adv, adv);
1462 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1463 } else {
1464 bnx2_resolve_flow_ctrl(bp);
1465 bnx2_set_mac_link(bp);
1466 }
1467 return 0;
1468 }
1469
1470 bnx2_test_and_enable_2g5(bp);
1471
1472 if (bp->advertising & ADVERTISED_1000baseT_Full)
1473 new_adv |= ADVERTISE_1000XFULL;
1474
1475 new_adv |= bnx2_phy_get_pause_adv(bp);
1476
1477 bnx2_read_phy(bp, bp->mii_adv, &adv);
1478 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480 bp->serdes_an_pending = 0;
1481 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1482 /* Force a link down visible on the other side */
1483 if (bp->link_up) {
1484 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1485 spin_unlock_bh(&bp->phy_lock);
1486 msleep(20);
1487 spin_lock_bh(&bp->phy_lock);
1488 }
1489
1490 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1491 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1492 BMCR_ANENABLE);
1493 /* Speed up link-up time when the link partner
1494 * does not autonegotiate which is very common
1495 * in blade servers. Some blade servers use
1496 * IPMI for kerboard input and it's important
1497 * to minimize link disruptions. Autoneg. involves
1498 * exchanging base pages plus 3 next pages and
1499 * normally completes in about 120 msec.
1500 */
1501 bp->current_interval = SERDES_AN_TIMEOUT;
1502 bp->serdes_an_pending = 1;
1503 mod_timer(&bp->timer, jiffies + bp->current_interval);
1504 } else {
1505 bnx2_resolve_flow_ctrl(bp);
1506 bnx2_set_mac_link(bp);
1507 }
1508
1509 return 0;
1510 }
1511
1512 #define ETHTOOL_ALL_FIBRE_SPEED \
1513 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1514 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1515 (ADVERTISED_1000baseT_Full)
1516
1517 #define ETHTOOL_ALL_COPPER_SPEED \
1518 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1519 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1520 ADVERTISED_1000baseT_Full)
1521
1522 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1523 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1524
1525 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1526
1527 static void
1528 bnx2_set_default_remote_link(struct bnx2 *bp)
1529 {
1530 u32 link;
1531
1532 if (bp->phy_port == PORT_TP)
1533 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1534 else
1535 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1536
1537 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1538 bp->req_line_speed = 0;
1539 bp->autoneg |= AUTONEG_SPEED;
1540 bp->advertising = ADVERTISED_Autoneg;
1541 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1542 bp->advertising |= ADVERTISED_10baseT_Half;
1543 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1544 bp->advertising |= ADVERTISED_10baseT_Full;
1545 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1546 bp->advertising |= ADVERTISED_100baseT_Half;
1547 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1548 bp->advertising |= ADVERTISED_100baseT_Full;
1549 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1550 bp->advertising |= ADVERTISED_1000baseT_Full;
1551 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1552 bp->advertising |= ADVERTISED_2500baseX_Full;
1553 } else {
1554 bp->autoneg = 0;
1555 bp->advertising = 0;
1556 bp->req_duplex = DUPLEX_FULL;
1557 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1558 bp->req_line_speed = SPEED_10;
1559 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1560 bp->req_duplex = DUPLEX_HALF;
1561 }
1562 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1563 bp->req_line_speed = SPEED_100;
1564 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1565 bp->req_duplex = DUPLEX_HALF;
1566 }
1567 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1568 bp->req_line_speed = SPEED_1000;
1569 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1570 bp->req_line_speed = SPEED_2500;
1571 }
1572 }
1573
1574 static void
1575 bnx2_set_default_link(struct bnx2 *bp)
1576 {
1577 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1578 return bnx2_set_default_remote_link(bp);
1579
1580 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1581 bp->req_line_speed = 0;
1582 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1583 u32 reg;
1584
1585 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1586
1587 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1588 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1589 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1590 bp->autoneg = 0;
1591 bp->req_line_speed = bp->line_speed = SPEED_1000;
1592 bp->req_duplex = DUPLEX_FULL;
1593 }
1594 } else
1595 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1596 }
1597
1598 static void
1599 bnx2_send_heart_beat(struct bnx2 *bp)
1600 {
1601 u32 msg;
1602 u32 addr;
1603
1604 spin_lock(&bp->indirect_lock);
1605 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1606 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1607 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1608 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1609 spin_unlock(&bp->indirect_lock);
1610 }
1611
1612 static void
1613 bnx2_remote_phy_event(struct bnx2 *bp)
1614 {
1615 u32 msg;
1616 u8 link_up = bp->link_up;
1617 u8 old_port;
1618
1619 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1620
1621 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1622 bnx2_send_heart_beat(bp);
1623
1624 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1625
1626 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1627 bp->link_up = 0;
1628 else {
1629 u32 speed;
1630
1631 bp->link_up = 1;
1632 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1633 bp->duplex = DUPLEX_FULL;
1634 switch (speed) {
1635 case BNX2_LINK_STATUS_10HALF:
1636 bp->duplex = DUPLEX_HALF;
1637 case BNX2_LINK_STATUS_10FULL:
1638 bp->line_speed = SPEED_10;
1639 break;
1640 case BNX2_LINK_STATUS_100HALF:
1641 bp->duplex = DUPLEX_HALF;
1642 case BNX2_LINK_STATUS_100BASE_T4:
1643 case BNX2_LINK_STATUS_100FULL:
1644 bp->line_speed = SPEED_100;
1645 break;
1646 case BNX2_LINK_STATUS_1000HALF:
1647 bp->duplex = DUPLEX_HALF;
1648 case BNX2_LINK_STATUS_1000FULL:
1649 bp->line_speed = SPEED_1000;
1650 break;
1651 case BNX2_LINK_STATUS_2500HALF:
1652 bp->duplex = DUPLEX_HALF;
1653 case BNX2_LINK_STATUS_2500FULL:
1654 bp->line_speed = SPEED_2500;
1655 break;
1656 default:
1657 bp->line_speed = 0;
1658 break;
1659 }
1660
1661 spin_lock(&bp->phy_lock);
1662 bp->flow_ctrl = 0;
1663 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1664 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1665 if (bp->duplex == DUPLEX_FULL)
1666 bp->flow_ctrl = bp->req_flow_ctrl;
1667 } else {
1668 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1669 bp->flow_ctrl |= FLOW_CTRL_TX;
1670 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1671 bp->flow_ctrl |= FLOW_CTRL_RX;
1672 }
1673
1674 old_port = bp->phy_port;
1675 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1676 bp->phy_port = PORT_FIBRE;
1677 else
1678 bp->phy_port = PORT_TP;
1679
1680 if (old_port != bp->phy_port)
1681 bnx2_set_default_link(bp);
1682
1683 spin_unlock(&bp->phy_lock);
1684 }
1685 if (bp->link_up != link_up)
1686 bnx2_report_link(bp);
1687
1688 bnx2_set_mac_link(bp);
1689 }
1690
1691 static int
1692 bnx2_set_remote_link(struct bnx2 *bp)
1693 {
1694 u32 evt_code;
1695
1696 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1697 switch (evt_code) {
1698 case BNX2_FW_EVT_CODE_LINK_EVENT:
1699 bnx2_remote_phy_event(bp);
1700 break;
1701 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1702 default:
1703 bnx2_send_heart_beat(bp);
1704 break;
1705 }
1706 return 0;
1707 }
1708
1709 static int
1710 bnx2_setup_copper_phy(struct bnx2 *bp)
1711 {
1712 u32 bmcr;
1713 u32 new_bmcr;
1714
1715 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1716
1717 if (bp->autoneg & AUTONEG_SPEED) {
1718 u32 adv_reg, adv1000_reg;
1719 u32 new_adv_reg = 0;
1720 u32 new_adv1000_reg = 0;
1721
1722 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1723 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1724 ADVERTISE_PAUSE_ASYM);
1725
1726 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1727 adv1000_reg &= PHY_ALL_1000_SPEED;
1728
1729 if (bp->advertising & ADVERTISED_10baseT_Half)
1730 new_adv_reg |= ADVERTISE_10HALF;
1731 if (bp->advertising & ADVERTISED_10baseT_Full)
1732 new_adv_reg |= ADVERTISE_10FULL;
1733 if (bp->advertising & ADVERTISED_100baseT_Half)
1734 new_adv_reg |= ADVERTISE_100HALF;
1735 if (bp->advertising & ADVERTISED_100baseT_Full)
1736 new_adv_reg |= ADVERTISE_100FULL;
1737 if (bp->advertising & ADVERTISED_1000baseT_Full)
1738 new_adv1000_reg |= ADVERTISE_1000FULL;
1739
1740 new_adv_reg |= ADVERTISE_CSMA;
1741
1742 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1743
1744 if ((adv1000_reg != new_adv1000_reg) ||
1745 (adv_reg != new_adv_reg) ||
1746 ((bmcr & BMCR_ANENABLE) == 0)) {
1747
1748 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1749 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1750 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1751 BMCR_ANENABLE);
1752 }
1753 else if (bp->link_up) {
1754 /* Flow ctrl may have changed from auto to forced */
1755 /* or vice-versa. */
1756
1757 bnx2_resolve_flow_ctrl(bp);
1758 bnx2_set_mac_link(bp);
1759 }
1760 return 0;
1761 }
1762
1763 new_bmcr = 0;
1764 if (bp->req_line_speed == SPEED_100) {
1765 new_bmcr |= BMCR_SPEED100;
1766 }
1767 if (bp->req_duplex == DUPLEX_FULL) {
1768 new_bmcr |= BMCR_FULLDPLX;
1769 }
1770 if (new_bmcr != bmcr) {
1771 u32 bmsr;
1772
1773 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1774 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1775
1776 if (bmsr & BMSR_LSTATUS) {
1777 /* Force link down */
1778 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1779 spin_unlock_bh(&bp->phy_lock);
1780 msleep(50);
1781 spin_lock_bh(&bp->phy_lock);
1782
1783 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1784 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1785 }
1786
1787 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788
1789 /* Normally, the new speed is setup after the link has
1790 * gone down and up again. In some cases, link will not go
1791 * down so we need to set up the new speed here.
1792 */
1793 if (bmsr & BMSR_LSTATUS) {
1794 bp->line_speed = bp->req_line_speed;
1795 bp->duplex = bp->req_duplex;
1796 bnx2_resolve_flow_ctrl(bp);
1797 bnx2_set_mac_link(bp);
1798 }
1799 } else {
1800 bnx2_resolve_flow_ctrl(bp);
1801 bnx2_set_mac_link(bp);
1802 }
1803 return 0;
1804 }
1805
1806 static int
1807 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1808 {
1809 if (bp->loopback == MAC_LOOPBACK)
1810 return 0;
1811
1812 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1813 return (bnx2_setup_serdes_phy(bp, port));
1814 }
1815 else {
1816 return (bnx2_setup_copper_phy(bp));
1817 }
1818 }
1819
1820 static int
1821 bnx2_init_5709s_phy(struct bnx2 *bp)
1822 {
1823 u32 val;
1824
1825 bp->mii_bmcr = MII_BMCR + 0x10;
1826 bp->mii_bmsr = MII_BMSR + 0x10;
1827 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1828 bp->mii_adv = MII_ADVERTISE + 0x10;
1829 bp->mii_lpa = MII_LPA + 0x10;
1830 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1831
1832 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1833 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1834
1835 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1836 bnx2_reset_phy(bp);
1837
1838 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1839
1840 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1841 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1842 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1843 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1844
1845 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1846 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1847 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1848 val |= BCM5708S_UP1_2G5;
1849 else
1850 val &= ~BCM5708S_UP1_2G5;
1851 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1852
1853 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1854 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1855 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1856 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1857
1858 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1859
1860 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1861 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1862 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1863
1864 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1865
1866 return 0;
1867 }
1868
1869 static int
1870 bnx2_init_5708s_phy(struct bnx2 *bp)
1871 {
1872 u32 val;
1873
1874 bnx2_reset_phy(bp);
1875
1876 bp->mii_up1 = BCM5708S_UP1;
1877
1878 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1879 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1880 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881
1882 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1883 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1884 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1885
1886 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1887 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1888 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1889
1890 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1891 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1892 val |= BCM5708S_UP1_2G5;
1893 bnx2_write_phy(bp, BCM5708S_UP1, val);
1894 }
1895
1896 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1897 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1898 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1899 /* increase tx signal amplitude */
1900 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1901 BCM5708S_BLK_ADDR_TX_MISC);
1902 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1903 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1904 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1905 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1906 }
1907
1908 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1909 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1910
1911 if (val) {
1912 u32 is_backplane;
1913
1914 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1915 BNX2_SHARED_HW_CFG_CONFIG);
1916 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1917 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1918 BCM5708S_BLK_ADDR_TX_MISC);
1919 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1920 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1921 BCM5708S_BLK_ADDR_DIG);
1922 }
1923 }
1924 return 0;
1925 }
1926
1927 static int
1928 bnx2_init_5706s_phy(struct bnx2 *bp)
1929 {
1930 bnx2_reset_phy(bp);
1931
1932 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1933
1934 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1935 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1936
1937 if (bp->dev->mtu > 1500) {
1938 u32 val;
1939
1940 /* Set extended packet length bit */
1941 bnx2_write_phy(bp, 0x18, 0x7);
1942 bnx2_read_phy(bp, 0x18, &val);
1943 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1944
1945 bnx2_write_phy(bp, 0x1c, 0x6c00);
1946 bnx2_read_phy(bp, 0x1c, &val);
1947 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1948 }
1949 else {
1950 u32 val;
1951
1952 bnx2_write_phy(bp, 0x18, 0x7);
1953 bnx2_read_phy(bp, 0x18, &val);
1954 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1955
1956 bnx2_write_phy(bp, 0x1c, 0x6c00);
1957 bnx2_read_phy(bp, 0x1c, &val);
1958 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1959 }
1960
1961 return 0;
1962 }
1963
1964 static int
1965 bnx2_init_copper_phy(struct bnx2 *bp)
1966 {
1967 u32 val;
1968
1969 bnx2_reset_phy(bp);
1970
1971 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
1972 bnx2_write_phy(bp, 0x18, 0x0c00);
1973 bnx2_write_phy(bp, 0x17, 0x000a);
1974 bnx2_write_phy(bp, 0x15, 0x310b);
1975 bnx2_write_phy(bp, 0x17, 0x201f);
1976 bnx2_write_phy(bp, 0x15, 0x9506);
1977 bnx2_write_phy(bp, 0x17, 0x401f);
1978 bnx2_write_phy(bp, 0x15, 0x14e2);
1979 bnx2_write_phy(bp, 0x18, 0x0400);
1980 }
1981
1982 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
1983 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1984 MII_BNX2_DSP_EXPAND_REG | 0x8);
1985 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1986 val &= ~(1 << 8);
1987 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1988 }
1989
1990 if (bp->dev->mtu > 1500) {
1991 /* Set extended packet length bit */
1992 bnx2_write_phy(bp, 0x18, 0x7);
1993 bnx2_read_phy(bp, 0x18, &val);
1994 bnx2_write_phy(bp, 0x18, val | 0x4000);
1995
1996 bnx2_read_phy(bp, 0x10, &val);
1997 bnx2_write_phy(bp, 0x10, val | 0x1);
1998 }
1999 else {
2000 bnx2_write_phy(bp, 0x18, 0x7);
2001 bnx2_read_phy(bp, 0x18, &val);
2002 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2003
2004 bnx2_read_phy(bp, 0x10, &val);
2005 bnx2_write_phy(bp, 0x10, val & ~0x1);
2006 }
2007
2008 /* ethernet@wirespeed */
2009 bnx2_write_phy(bp, 0x18, 0x7007);
2010 bnx2_read_phy(bp, 0x18, &val);
2011 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2012 return 0;
2013 }
2014
2015
2016 static int
2017 bnx2_init_phy(struct bnx2 *bp)
2018 {
2019 u32 val;
2020 int rc = 0;
2021
2022 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2023 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2024
2025 bp->mii_bmcr = MII_BMCR;
2026 bp->mii_bmsr = MII_BMSR;
2027 bp->mii_bmsr1 = MII_BMSR;
2028 bp->mii_adv = MII_ADVERTISE;
2029 bp->mii_lpa = MII_LPA;
2030
2031 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2032
2033 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2034 goto setup_phy;
2035
2036 bnx2_read_phy(bp, MII_PHYSID1, &val);
2037 bp->phy_id = val << 16;
2038 bnx2_read_phy(bp, MII_PHYSID2, &val);
2039 bp->phy_id |= val & 0xffff;
2040
2041 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2042 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2043 rc = bnx2_init_5706s_phy(bp);
2044 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2045 rc = bnx2_init_5708s_phy(bp);
2046 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2047 rc = bnx2_init_5709s_phy(bp);
2048 }
2049 else {
2050 rc = bnx2_init_copper_phy(bp);
2051 }
2052
2053 setup_phy:
2054 if (!rc)
2055 rc = bnx2_setup_phy(bp, bp->phy_port);
2056
2057 return rc;
2058 }
2059
2060 static int
2061 bnx2_set_mac_loopback(struct bnx2 *bp)
2062 {
2063 u32 mac_mode;
2064
2065 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2066 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2067 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2068 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2069 bp->link_up = 1;
2070 return 0;
2071 }
2072
2073 static int bnx2_test_link(struct bnx2 *);
2074
2075 static int
2076 bnx2_set_phy_loopback(struct bnx2 *bp)
2077 {
2078 u32 mac_mode;
2079 int rc, i;
2080
2081 spin_lock_bh(&bp->phy_lock);
2082 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2083 BMCR_SPEED1000);
2084 spin_unlock_bh(&bp->phy_lock);
2085 if (rc)
2086 return rc;
2087
2088 for (i = 0; i < 10; i++) {
2089 if (bnx2_test_link(bp) == 0)
2090 break;
2091 msleep(100);
2092 }
2093
2094 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2095 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2096 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2097 BNX2_EMAC_MODE_25G_MODE);
2098
2099 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2100 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2101 bp->link_up = 1;
2102 return 0;
2103 }
2104
2105 static int
2106 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2107 {
2108 int i;
2109 u32 val;
2110
2111 bp->fw_wr_seq++;
2112 msg_data |= bp->fw_wr_seq;
2113
2114 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2115
2116 /* wait for an acknowledgement. */
2117 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2118 msleep(10);
2119
2120 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2121
2122 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2123 break;
2124 }
2125 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2126 return 0;
2127
2128 /* If we timed out, inform the firmware that this is the case. */
2129 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2130 if (!silent)
2131 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2132 "%x\n", msg_data);
2133
2134 msg_data &= ~BNX2_DRV_MSG_CODE;
2135 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2136
2137 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2138
2139 return -EBUSY;
2140 }
2141
2142 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2143 return -EIO;
2144
2145 return 0;
2146 }
2147
2148 static int
2149 bnx2_init_5709_context(struct bnx2 *bp)
2150 {
2151 int i, ret = 0;
2152 u32 val;
2153
2154 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2155 val |= (BCM_PAGE_BITS - 8) << 16;
2156 REG_WR(bp, BNX2_CTX_COMMAND, val);
2157 for (i = 0; i < 10; i++) {
2158 val = REG_RD(bp, BNX2_CTX_COMMAND);
2159 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2160 break;
2161 udelay(2);
2162 }
2163 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2164 return -EBUSY;
2165
2166 for (i = 0; i < bp->ctx_pages; i++) {
2167 int j;
2168
2169 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2170 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2171 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2172 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2173 (u64) bp->ctx_blk_mapping[i] >> 32);
2174 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2175 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2176 for (j = 0; j < 10; j++) {
2177
2178 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2179 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2180 break;
2181 udelay(5);
2182 }
2183 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2184 ret = -EBUSY;
2185 break;
2186 }
2187 }
2188 return ret;
2189 }
2190
2191 static void
2192 bnx2_init_context(struct bnx2 *bp)
2193 {
2194 u32 vcid;
2195
2196 vcid = 96;
2197 while (vcid) {
2198 u32 vcid_addr, pcid_addr, offset;
2199 int i;
2200
2201 vcid--;
2202
2203 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2204 u32 new_vcid;
2205
2206 vcid_addr = GET_PCID_ADDR(vcid);
2207 if (vcid & 0x8) {
2208 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2209 }
2210 else {
2211 new_vcid = vcid;
2212 }
2213 pcid_addr = GET_PCID_ADDR(new_vcid);
2214 }
2215 else {
2216 vcid_addr = GET_CID_ADDR(vcid);
2217 pcid_addr = vcid_addr;
2218 }
2219
2220 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2221 vcid_addr += (i << PHY_CTX_SHIFT);
2222 pcid_addr += (i << PHY_CTX_SHIFT);
2223
2224 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2225 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2226
2227 /* Zero out the context. */
2228 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2229 CTX_WR(bp, vcid_addr, offset, 0);
2230 }
2231 }
2232 }
2233
2234 static int
2235 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2236 {
2237 u16 *good_mbuf;
2238 u32 good_mbuf_cnt;
2239 u32 val;
2240
2241 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2242 if (good_mbuf == NULL) {
2243 printk(KERN_ERR PFX "Failed to allocate memory in "
2244 "bnx2_alloc_bad_rbuf\n");
2245 return -ENOMEM;
2246 }
2247
2248 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2249 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2250
2251 good_mbuf_cnt = 0;
2252
2253 /* Allocate a bunch of mbufs and save the good ones in an array. */
2254 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2255 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2256 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2257
2258 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2259
2260 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2261
2262 /* The addresses with Bit 9 set are bad memory blocks. */
2263 if (!(val & (1 << 9))) {
2264 good_mbuf[good_mbuf_cnt] = (u16) val;
2265 good_mbuf_cnt++;
2266 }
2267
2268 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2269 }
2270
2271 /* Free the good ones back to the mbuf pool thus discarding
2272 * all the bad ones. */
2273 while (good_mbuf_cnt) {
2274 good_mbuf_cnt--;
2275
2276 val = good_mbuf[good_mbuf_cnt];
2277 val = (val << 9) | val | 1;
2278
2279 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2280 }
2281 kfree(good_mbuf);
2282 return 0;
2283 }
2284
2285 static void
2286 bnx2_set_mac_addr(struct bnx2 *bp)
2287 {
2288 u32 val;
2289 u8 *mac_addr = bp->dev->dev_addr;
2290
2291 val = (mac_addr[0] << 8) | mac_addr[1];
2292
2293 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2294
2295 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2296 (mac_addr[4] << 8) | mac_addr[5];
2297
2298 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2299 }
2300
2301 static inline int
2302 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2303 {
2304 dma_addr_t mapping;
2305 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2306 struct rx_bd *rxbd =
2307 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2308 struct page *page = alloc_page(GFP_ATOMIC);
2309
2310 if (!page)
2311 return -ENOMEM;
2312 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2313 PCI_DMA_FROMDEVICE);
2314 rx_pg->page = page;
2315 pci_unmap_addr_set(rx_pg, mapping, mapping);
2316 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2317 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2318 return 0;
2319 }
2320
2321 static void
2322 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2323 {
2324 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2325 struct page *page = rx_pg->page;
2326
2327 if (!page)
2328 return;
2329
2330 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2331 PCI_DMA_FROMDEVICE);
2332
2333 __free_page(page);
2334 rx_pg->page = NULL;
2335 }
2336
2337 static inline int
2338 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2339 {
2340 struct sk_buff *skb;
2341 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2342 dma_addr_t mapping;
2343 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2344 unsigned long align;
2345
2346 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2347 if (skb == NULL) {
2348 return -ENOMEM;
2349 }
2350
2351 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2352 skb_reserve(skb, BNX2_RX_ALIGN - align);
2353
2354 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2355 PCI_DMA_FROMDEVICE);
2356
2357 rx_buf->skb = skb;
2358 pci_unmap_addr_set(rx_buf, mapping, mapping);
2359
2360 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2361 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2362
2363 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2364
2365 return 0;
2366 }
2367
2368 static int
2369 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2370 {
2371 struct status_block *sblk = bnapi->status_blk;
2372 u32 new_link_state, old_link_state;
2373 int is_set = 1;
2374
2375 new_link_state = sblk->status_attn_bits & event;
2376 old_link_state = sblk->status_attn_bits_ack & event;
2377 if (new_link_state != old_link_state) {
2378 if (new_link_state)
2379 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2380 else
2381 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2382 } else
2383 is_set = 0;
2384
2385 return is_set;
2386 }
2387
2388 static void
2389 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2390 {
2391 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2392 spin_lock(&bp->phy_lock);
2393 bnx2_set_link(bp);
2394 spin_unlock(&bp->phy_lock);
2395 }
2396 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2397 bnx2_set_remote_link(bp);
2398
2399 }
2400
2401 static inline u16
2402 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2403 {
2404 u16 cons;
2405
2406 if (bnapi->int_num == 0)
2407 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2408 else
2409 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2410
2411 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2412 cons++;
2413 return cons;
2414 }
2415
2416 static int
2417 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2418 {
2419 u16 hw_cons, sw_cons, sw_ring_cons;
2420 int tx_pkt = 0;
2421
2422 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2423 sw_cons = bnapi->tx_cons;
2424
2425 while (sw_cons != hw_cons) {
2426 struct sw_bd *tx_buf;
2427 struct sk_buff *skb;
2428 int i, last;
2429
2430 sw_ring_cons = TX_RING_IDX(sw_cons);
2431
2432 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2433 skb = tx_buf->skb;
2434
2435 /* partial BD completions possible with TSO packets */
2436 if (skb_is_gso(skb)) {
2437 u16 last_idx, last_ring_idx;
2438
2439 last_idx = sw_cons +
2440 skb_shinfo(skb)->nr_frags + 1;
2441 last_ring_idx = sw_ring_cons +
2442 skb_shinfo(skb)->nr_frags + 1;
2443 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2444 last_idx++;
2445 }
2446 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2447 break;
2448 }
2449 }
2450
2451 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2452 skb_headlen(skb), PCI_DMA_TODEVICE);
2453
2454 tx_buf->skb = NULL;
2455 last = skb_shinfo(skb)->nr_frags;
2456
2457 for (i = 0; i < last; i++) {
2458 sw_cons = NEXT_TX_BD(sw_cons);
2459
2460 pci_unmap_page(bp->pdev,
2461 pci_unmap_addr(
2462 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2463 mapping),
2464 skb_shinfo(skb)->frags[i].size,
2465 PCI_DMA_TODEVICE);
2466 }
2467
2468 sw_cons = NEXT_TX_BD(sw_cons);
2469
2470 dev_kfree_skb(skb);
2471 tx_pkt++;
2472 if (tx_pkt == budget)
2473 break;
2474
2475 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2476 }
2477
2478 bnapi->hw_tx_cons = hw_cons;
2479 bnapi->tx_cons = sw_cons;
2480 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2481 * before checking for netif_queue_stopped(). Without the
2482 * memory barrier, there is a small possibility that bnx2_start_xmit()
2483 * will miss it and cause the queue to be stopped forever.
2484 */
2485 smp_mb();
2486
2487 if (unlikely(netif_queue_stopped(bp->dev)) &&
2488 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2489 netif_tx_lock(bp->dev);
2490 if ((netif_queue_stopped(bp->dev)) &&
2491 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2492 netif_wake_queue(bp->dev);
2493 netif_tx_unlock(bp->dev);
2494 }
2495 return tx_pkt;
2496 }
2497
2498 static void
2499 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2500 struct sk_buff *skb, int count)
2501 {
2502 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2503 struct rx_bd *cons_bd, *prod_bd;
2504 dma_addr_t mapping;
2505 int i;
2506 u16 hw_prod = bnapi->rx_pg_prod, prod;
2507 u16 cons = bnapi->rx_pg_cons;
2508
2509 for (i = 0; i < count; i++) {
2510 prod = RX_PG_RING_IDX(hw_prod);
2511
2512 prod_rx_pg = &bp->rx_pg_ring[prod];
2513 cons_rx_pg = &bp->rx_pg_ring[cons];
2514 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2515 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2516
2517 if (i == 0 && skb) {
2518 struct page *page;
2519 struct skb_shared_info *shinfo;
2520
2521 shinfo = skb_shinfo(skb);
2522 shinfo->nr_frags--;
2523 page = shinfo->frags[shinfo->nr_frags].page;
2524 shinfo->frags[shinfo->nr_frags].page = NULL;
2525 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2526 PCI_DMA_FROMDEVICE);
2527 cons_rx_pg->page = page;
2528 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2529 dev_kfree_skb(skb);
2530 }
2531 if (prod != cons) {
2532 prod_rx_pg->page = cons_rx_pg->page;
2533 cons_rx_pg->page = NULL;
2534 pci_unmap_addr_set(prod_rx_pg, mapping,
2535 pci_unmap_addr(cons_rx_pg, mapping));
2536
2537 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2538 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2539
2540 }
2541 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2542 hw_prod = NEXT_RX_BD(hw_prod);
2543 }
2544 bnapi->rx_pg_prod = hw_prod;
2545 bnapi->rx_pg_cons = cons;
2546 }
2547
2548 static inline void
2549 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2550 u16 cons, u16 prod)
2551 {
2552 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2553 struct rx_bd *cons_bd, *prod_bd;
2554
2555 cons_rx_buf = &bp->rx_buf_ring[cons];
2556 prod_rx_buf = &bp->rx_buf_ring[prod];
2557
2558 pci_dma_sync_single_for_device(bp->pdev,
2559 pci_unmap_addr(cons_rx_buf, mapping),
2560 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2561
2562 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2563
2564 prod_rx_buf->skb = skb;
2565
2566 if (cons == prod)
2567 return;
2568
2569 pci_unmap_addr_set(prod_rx_buf, mapping,
2570 pci_unmap_addr(cons_rx_buf, mapping));
2571
2572 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2573 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2574 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2575 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2576 }
2577
2578 static int
2579 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2580 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2581 u32 ring_idx)
2582 {
2583 int err;
2584 u16 prod = ring_idx & 0xffff;
2585
2586 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2587 if (unlikely(err)) {
2588 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2589 if (hdr_len) {
2590 unsigned int raw_len = len + 4;
2591 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2592
2593 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2594 }
2595 return err;
2596 }
2597
2598 skb_reserve(skb, bp->rx_offset);
2599 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2600 PCI_DMA_FROMDEVICE);
2601
2602 if (hdr_len == 0) {
2603 skb_put(skb, len);
2604 return 0;
2605 } else {
2606 unsigned int i, frag_len, frag_size, pages;
2607 struct sw_pg *rx_pg;
2608 u16 pg_cons = bnapi->rx_pg_cons;
2609 u16 pg_prod = bnapi->rx_pg_prod;
2610
2611 frag_size = len + 4 - hdr_len;
2612 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2613 skb_put(skb, hdr_len);
2614
2615 for (i = 0; i < pages; i++) {
2616 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2617 if (unlikely(frag_len <= 4)) {
2618 unsigned int tail = 4 - frag_len;
2619
2620 bnapi->rx_pg_cons = pg_cons;
2621 bnapi->rx_pg_prod = pg_prod;
2622 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2623 pages - i);
2624 skb->len -= tail;
2625 if (i == 0) {
2626 skb->tail -= tail;
2627 } else {
2628 skb_frag_t *frag =
2629 &skb_shinfo(skb)->frags[i - 1];
2630 frag->size -= tail;
2631 skb->data_len -= tail;
2632 skb->truesize -= tail;
2633 }
2634 return 0;
2635 }
2636 rx_pg = &bp->rx_pg_ring[pg_cons];
2637
2638 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2639 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2640
2641 if (i == pages - 1)
2642 frag_len -= 4;
2643
2644 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2645 rx_pg->page = NULL;
2646
2647 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2648 if (unlikely(err)) {
2649 bnapi->rx_pg_cons = pg_cons;
2650 bnapi->rx_pg_prod = pg_prod;
2651 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2652 pages - i);
2653 return err;
2654 }
2655
2656 frag_size -= frag_len;
2657 skb->data_len += frag_len;
2658 skb->truesize += frag_len;
2659 skb->len += frag_len;
2660
2661 pg_prod = NEXT_RX_BD(pg_prod);
2662 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2663 }
2664 bnapi->rx_pg_prod = pg_prod;
2665 bnapi->rx_pg_cons = pg_cons;
2666 }
2667 return 0;
2668 }
2669
2670 static inline u16
2671 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2672 {
2673 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2674
2675 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2676 cons++;
2677 return cons;
2678 }
2679
2680 static int
2681 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2682 {
2683 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2684 struct l2_fhdr *rx_hdr;
2685 int rx_pkt = 0, pg_ring_used = 0;
2686
2687 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2688 sw_cons = bnapi->rx_cons;
2689 sw_prod = bnapi->rx_prod;
2690
2691 /* Memory barrier necessary as speculative reads of the rx
2692 * buffer can be ahead of the index in the status block
2693 */
2694 rmb();
2695 while (sw_cons != hw_cons) {
2696 unsigned int len, hdr_len;
2697 u32 status;
2698 struct sw_bd *rx_buf;
2699 struct sk_buff *skb;
2700 dma_addr_t dma_addr;
2701
2702 sw_ring_cons = RX_RING_IDX(sw_cons);
2703 sw_ring_prod = RX_RING_IDX(sw_prod);
2704
2705 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2706 skb = rx_buf->skb;
2707
2708 rx_buf->skb = NULL;
2709
2710 dma_addr = pci_unmap_addr(rx_buf, mapping);
2711
2712 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2713 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2714
2715 rx_hdr = (struct l2_fhdr *) skb->data;
2716 len = rx_hdr->l2_fhdr_pkt_len;
2717
2718 if ((status = rx_hdr->l2_fhdr_status) &
2719 (L2_FHDR_ERRORS_BAD_CRC |
2720 L2_FHDR_ERRORS_PHY_DECODE |
2721 L2_FHDR_ERRORS_ALIGNMENT |
2722 L2_FHDR_ERRORS_TOO_SHORT |
2723 L2_FHDR_ERRORS_GIANT_FRAME)) {
2724
2725 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2726 sw_ring_prod);
2727 goto next_rx;
2728 }
2729 hdr_len = 0;
2730 if (status & L2_FHDR_STATUS_SPLIT) {
2731 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2732 pg_ring_used = 1;
2733 } else if (len > bp->rx_jumbo_thresh) {
2734 hdr_len = bp->rx_jumbo_thresh;
2735 pg_ring_used = 1;
2736 }
2737
2738 len -= 4;
2739
2740 if (len <= bp->rx_copy_thresh) {
2741 struct sk_buff *new_skb;
2742
2743 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2744 if (new_skb == NULL) {
2745 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2746 sw_ring_prod);
2747 goto next_rx;
2748 }
2749
2750 /* aligned copy */
2751 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2752 new_skb->data, len + 2);
2753 skb_reserve(new_skb, 2);
2754 skb_put(new_skb, len);
2755
2756 bnx2_reuse_rx_skb(bp, bnapi, skb,
2757 sw_ring_cons, sw_ring_prod);
2758
2759 skb = new_skb;
2760 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2761 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2762 goto next_rx;
2763
2764 skb->protocol = eth_type_trans(skb, bp->dev);
2765
2766 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2767 (ntohs(skb->protocol) != 0x8100)) {
2768
2769 dev_kfree_skb(skb);
2770 goto next_rx;
2771
2772 }
2773
2774 skb->ip_summed = CHECKSUM_NONE;
2775 if (bp->rx_csum &&
2776 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2777 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2778
2779 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2780 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2781 skb->ip_summed = CHECKSUM_UNNECESSARY;
2782 }
2783
2784 #ifdef BCM_VLAN
2785 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2786 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2787 rx_hdr->l2_fhdr_vlan_tag);
2788 }
2789 else
2790 #endif
2791 netif_receive_skb(skb);
2792
2793 bp->dev->last_rx = jiffies;
2794 rx_pkt++;
2795
2796 next_rx:
2797 sw_cons = NEXT_RX_BD(sw_cons);
2798 sw_prod = NEXT_RX_BD(sw_prod);
2799
2800 if ((rx_pkt == budget))
2801 break;
2802
2803 /* Refresh hw_cons to see if there is new work */
2804 if (sw_cons == hw_cons) {
2805 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2806 rmb();
2807 }
2808 }
2809 bnapi->rx_cons = sw_cons;
2810 bnapi->rx_prod = sw_prod;
2811
2812 if (pg_ring_used)
2813 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2814 bnapi->rx_pg_prod);
2815
2816 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2817
2818 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2819
2820 mmiowb();
2821
2822 return rx_pkt;
2823
2824 }
2825
2826 /* MSI ISR - The only difference between this and the INTx ISR
2827 * is that the MSI interrupt is always serviced.
2828 */
2829 static irqreturn_t
2830 bnx2_msi(int irq, void *dev_instance)
2831 {
2832 struct net_device *dev = dev_instance;
2833 struct bnx2 *bp = netdev_priv(dev);
2834 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2835
2836 prefetch(bnapi->status_blk);
2837 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2838 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2839 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2840
2841 /* Return here if interrupt is disabled. */
2842 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2843 return IRQ_HANDLED;
2844
2845 netif_rx_schedule(dev, &bnapi->napi);
2846
2847 return IRQ_HANDLED;
2848 }
2849
2850 static irqreturn_t
2851 bnx2_msi_1shot(int irq, void *dev_instance)
2852 {
2853 struct net_device *dev = dev_instance;
2854 struct bnx2 *bp = netdev_priv(dev);
2855 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2856
2857 prefetch(bnapi->status_blk);
2858
2859 /* Return here if interrupt is disabled. */
2860 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2861 return IRQ_HANDLED;
2862
2863 netif_rx_schedule(dev, &bnapi->napi);
2864
2865 return IRQ_HANDLED;
2866 }
2867
2868 static irqreturn_t
2869 bnx2_interrupt(int irq, void *dev_instance)
2870 {
2871 struct net_device *dev = dev_instance;
2872 struct bnx2 *bp = netdev_priv(dev);
2873 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2874 struct status_block *sblk = bnapi->status_blk;
2875
2876 /* When using INTx, it is possible for the interrupt to arrive
2877 * at the CPU before the status block posted prior to the
2878 * interrupt. Reading a register will flush the status block.
2879 * When using MSI, the MSI message will always complete after
2880 * the status block write.
2881 */
2882 if ((sblk->status_idx == bnapi->last_status_idx) &&
2883 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2884 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2885 return IRQ_NONE;
2886
2887 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2888 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2889 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2890
2891 /* Read back to deassert IRQ immediately to avoid too many
2892 * spurious interrupts.
2893 */
2894 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2895
2896 /* Return here if interrupt is shared and is disabled. */
2897 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2898 return IRQ_HANDLED;
2899
2900 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2901 bnapi->last_status_idx = sblk->status_idx;
2902 __netif_rx_schedule(dev, &bnapi->napi);
2903 }
2904
2905 return IRQ_HANDLED;
2906 }
2907
2908 static irqreturn_t
2909 bnx2_tx_msix(int irq, void *dev_instance)
2910 {
2911 struct net_device *dev = dev_instance;
2912 struct bnx2 *bp = netdev_priv(dev);
2913 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2914
2915 prefetch(bnapi->status_blk_msix);
2916
2917 /* Return here if interrupt is disabled. */
2918 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2919 return IRQ_HANDLED;
2920
2921 netif_rx_schedule(dev, &bnapi->napi);
2922 return IRQ_HANDLED;
2923 }
2924
2925 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2926 STATUS_ATTN_BITS_TIMER_ABORT)
2927
2928 static inline int
2929 bnx2_has_work(struct bnx2_napi *bnapi)
2930 {
2931 struct status_block *sblk = bnapi->status_blk;
2932
2933 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2934 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2935 return 1;
2936
2937 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2938 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2939 return 1;
2940
2941 return 0;
2942 }
2943
2944 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2945 {
2946 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2947 struct bnx2 *bp = bnapi->bp;
2948 int work_done = 0;
2949 struct status_block_msix *sblk = bnapi->status_blk_msix;
2950
2951 do {
2952 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2953 if (unlikely(work_done >= budget))
2954 return work_done;
2955
2956 bnapi->last_status_idx = sblk->status_idx;
2957 rmb();
2958 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2959
2960 netif_rx_complete(bp->dev, napi);
2961 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2962 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2963 bnapi->last_status_idx);
2964 return work_done;
2965 }
2966
2967 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2968 int work_done, int budget)
2969 {
2970 struct status_block *sblk = bnapi->status_blk;
2971 u32 status_attn_bits = sblk->status_attn_bits;
2972 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2973
2974 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2975 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2976
2977 bnx2_phy_int(bp, bnapi);
2978
2979 /* This is needed to take care of transient status
2980 * during link changes.
2981 */
2982 REG_WR(bp, BNX2_HC_COMMAND,
2983 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2984 REG_RD(bp, BNX2_HC_COMMAND);
2985 }
2986
2987 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2988 bnx2_tx_int(bp, bnapi, 0);
2989
2990 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2991 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2992
2993 return work_done;
2994 }
2995
2996 static int bnx2_poll(struct napi_struct *napi, int budget)
2997 {
2998 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2999 struct bnx2 *bp = bnapi->bp;
3000 int work_done = 0;
3001 struct status_block *sblk = bnapi->status_blk;
3002
3003 while (1) {
3004 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3005
3006 if (unlikely(work_done >= budget))
3007 break;
3008
3009 /* bnapi->last_status_idx is used below to tell the hw how
3010 * much work has been processed, so we must read it before
3011 * checking for more work.
3012 */
3013 bnapi->last_status_idx = sblk->status_idx;
3014 rmb();
3015 if (likely(!bnx2_has_work(bnapi))) {
3016 netif_rx_complete(bp->dev, napi);
3017 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3018 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3019 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3020 bnapi->last_status_idx);
3021 break;
3022 }
3023 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3024 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3025 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3026 bnapi->last_status_idx);
3027
3028 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3029 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3030 bnapi->last_status_idx);
3031 break;
3032 }
3033 }
3034
3035 return work_done;
3036 }
3037
3038 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3039 * from set_multicast.
3040 */
3041 static void
3042 bnx2_set_rx_mode(struct net_device *dev)
3043 {
3044 struct bnx2 *bp = netdev_priv(dev);
3045 u32 rx_mode, sort_mode;
3046 int i;
3047
3048 spin_lock_bh(&bp->phy_lock);
3049
3050 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3051 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3052 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3053 #ifdef BCM_VLAN
3054 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3055 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3056 #else
3057 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3058 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3059 #endif
3060 if (dev->flags & IFF_PROMISC) {
3061 /* Promiscuous mode. */
3062 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3063 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3064 BNX2_RPM_SORT_USER0_PROM_VLAN;
3065 }
3066 else if (dev->flags & IFF_ALLMULTI) {
3067 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3068 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3069 0xffffffff);
3070 }
3071 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3072 }
3073 else {
3074 /* Accept one or more multicast(s). */
3075 struct dev_mc_list *mclist;
3076 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3077 u32 regidx;
3078 u32 bit;
3079 u32 crc;
3080
3081 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3082
3083 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3084 i++, mclist = mclist->next) {
3085
3086 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3087 bit = crc & 0xff;
3088 regidx = (bit & 0xe0) >> 5;
3089 bit &= 0x1f;
3090 mc_filter[regidx] |= (1 << bit);
3091 }
3092
3093 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3094 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3095 mc_filter[i]);
3096 }
3097
3098 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3099 }
3100
3101 if (rx_mode != bp->rx_mode) {
3102 bp->rx_mode = rx_mode;
3103 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3104 }
3105
3106 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3107 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3108 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3109
3110 spin_unlock_bh(&bp->phy_lock);
3111 }
3112
3113 static void
3114 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3115 u32 rv2p_proc)
3116 {
3117 int i;
3118 u32 val;
3119
3120
3121 for (i = 0; i < rv2p_code_len; i += 8) {
3122 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3123 rv2p_code++;
3124 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3125 rv2p_code++;
3126
3127 if (rv2p_proc == RV2P_PROC1) {
3128 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3129 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3130 }
3131 else {
3132 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3133 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3134 }
3135 }
3136
3137 /* Reset the processor, un-stall is done later. */
3138 if (rv2p_proc == RV2P_PROC1) {
3139 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3140 }
3141 else {
3142 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3143 }
3144 }
3145
3146 static int
3147 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3148 {
3149 u32 offset;
3150 u32 val;
3151 int rc;
3152
3153 /* Halt the CPU. */
3154 val = REG_RD_IND(bp, cpu_reg->mode);
3155 val |= cpu_reg->mode_value_halt;
3156 REG_WR_IND(bp, cpu_reg->mode, val);
3157 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3158
3159 /* Load the Text area. */
3160 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3161 if (fw->gz_text) {
3162 int j;
3163
3164 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3165 fw->gz_text_len);
3166 if (rc < 0)
3167 return rc;
3168
3169 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3170 REG_WR_IND(bp, offset, le32_to_cpu(fw->text[j]));
3171 }
3172 }
3173
3174 /* Load the Data area. */
3175 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3176 if (fw->data) {
3177 int j;
3178
3179 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3180 REG_WR_IND(bp, offset, fw->data[j]);
3181 }
3182 }
3183
3184 /* Load the SBSS area. */
3185 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3186 if (fw->sbss_len) {
3187 int j;
3188
3189 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3190 REG_WR_IND(bp, offset, 0);
3191 }
3192 }
3193
3194 /* Load the BSS area. */
3195 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3196 if (fw->bss_len) {
3197 int j;
3198
3199 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3200 REG_WR_IND(bp, offset, 0);
3201 }
3202 }
3203
3204 /* Load the Read-Only area. */
3205 offset = cpu_reg->spad_base +
3206 (fw->rodata_addr - cpu_reg->mips_view_base);
3207 if (fw->rodata) {
3208 int j;
3209
3210 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3211 REG_WR_IND(bp, offset, fw->rodata[j]);
3212 }
3213 }
3214
3215 /* Clear the pre-fetch instruction. */
3216 REG_WR_IND(bp, cpu_reg->inst, 0);
3217 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3218
3219 /* Start the CPU. */
3220 val = REG_RD_IND(bp, cpu_reg->mode);
3221 val &= ~cpu_reg->mode_value_halt;
3222 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3223 REG_WR_IND(bp, cpu_reg->mode, val);
3224
3225 return 0;
3226 }
3227
3228 static int
3229 bnx2_init_cpus(struct bnx2 *bp)
3230 {
3231 struct cpu_reg cpu_reg;
3232 struct fw_info *fw;
3233 int rc, rv2p_len;
3234 void *text, *rv2p;
3235
3236 /* Initialize the RV2P processor. */
3237 text = vmalloc(FW_BUF_SIZE);
3238 if (!text)
3239 return -ENOMEM;
3240 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3241 rv2p = bnx2_xi_rv2p_proc1;
3242 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3243 } else {
3244 rv2p = bnx2_rv2p_proc1;
3245 rv2p_len = sizeof(bnx2_rv2p_proc1);
3246 }
3247 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3248 if (rc < 0)
3249 goto init_cpu_err;
3250
3251 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3252
3253 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3254 rv2p = bnx2_xi_rv2p_proc2;
3255 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3256 } else {
3257 rv2p = bnx2_rv2p_proc2;
3258 rv2p_len = sizeof(bnx2_rv2p_proc2);
3259 }
3260 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3261 if (rc < 0)
3262 goto init_cpu_err;
3263
3264 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3265
3266 /* Initialize the RX Processor. */
3267 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3268 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3269 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3270 cpu_reg.state = BNX2_RXP_CPU_STATE;
3271 cpu_reg.state_value_clear = 0xffffff;
3272 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3273 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3274 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3275 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3276 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3277 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3278 cpu_reg.mips_view_base = 0x8000000;
3279
3280 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3281 fw = &bnx2_rxp_fw_09;
3282 else
3283 fw = &bnx2_rxp_fw_06;
3284
3285 fw->text = text;
3286 rc = load_cpu_fw(bp, &cpu_reg, fw);
3287 if (rc)
3288 goto init_cpu_err;
3289
3290 /* Initialize the TX Processor. */
3291 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3292 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3293 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3294 cpu_reg.state = BNX2_TXP_CPU_STATE;
3295 cpu_reg.state_value_clear = 0xffffff;
3296 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3297 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3298 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3299 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3300 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3301 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3302 cpu_reg.mips_view_base = 0x8000000;
3303
3304 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3305 fw = &bnx2_txp_fw_09;
3306 else
3307 fw = &bnx2_txp_fw_06;
3308
3309 fw->text = text;
3310 rc = load_cpu_fw(bp, &cpu_reg, fw);
3311 if (rc)
3312 goto init_cpu_err;
3313
3314 /* Initialize the TX Patch-up Processor. */
3315 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3316 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3317 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3318 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3319 cpu_reg.state_value_clear = 0xffffff;
3320 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3321 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3322 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3323 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3324 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3325 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3326 cpu_reg.mips_view_base = 0x8000000;
3327
3328 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3329 fw = &bnx2_tpat_fw_09;
3330 else
3331 fw = &bnx2_tpat_fw_06;
3332
3333 fw->text = text;
3334 rc = load_cpu_fw(bp, &cpu_reg, fw);
3335 if (rc)
3336 goto init_cpu_err;
3337
3338 /* Initialize the Completion Processor. */
3339 cpu_reg.mode = BNX2_COM_CPU_MODE;
3340 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3341 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3342 cpu_reg.state = BNX2_COM_CPU_STATE;
3343 cpu_reg.state_value_clear = 0xffffff;
3344 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3345 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3346 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3347 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3348 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3349 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3350 cpu_reg.mips_view_base = 0x8000000;
3351
3352 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3353 fw = &bnx2_com_fw_09;
3354 else
3355 fw = &bnx2_com_fw_06;
3356
3357 fw->text = text;
3358 rc = load_cpu_fw(bp, &cpu_reg, fw);
3359 if (rc)
3360 goto init_cpu_err;
3361
3362 /* Initialize the Command Processor. */
3363 cpu_reg.mode = BNX2_CP_CPU_MODE;
3364 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3365 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3366 cpu_reg.state = BNX2_CP_CPU_STATE;
3367 cpu_reg.state_value_clear = 0xffffff;
3368 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3369 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3370 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3371 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3372 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3373 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3374 cpu_reg.mips_view_base = 0x8000000;
3375
3376 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3377 fw = &bnx2_cp_fw_09;
3378 else
3379 fw = &bnx2_cp_fw_06;
3380
3381 fw->text = text;
3382 rc = load_cpu_fw(bp, &cpu_reg, fw);
3383
3384 init_cpu_err:
3385 vfree(text);
3386 return rc;
3387 }
3388
3389 static int
3390 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3391 {
3392 u16 pmcsr;
3393
3394 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3395
3396 switch (state) {
3397 case PCI_D0: {
3398 u32 val;
3399
3400 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3401 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3402 PCI_PM_CTRL_PME_STATUS);
3403
3404 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3405 /* delay required during transition out of D3hot */
3406 msleep(20);
3407
3408 val = REG_RD(bp, BNX2_EMAC_MODE);
3409 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3410 val &= ~BNX2_EMAC_MODE_MPKT;
3411 REG_WR(bp, BNX2_EMAC_MODE, val);
3412
3413 val = REG_RD(bp, BNX2_RPM_CONFIG);
3414 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3415 REG_WR(bp, BNX2_RPM_CONFIG, val);
3416 break;
3417 }
3418 case PCI_D3hot: {
3419 int i;
3420 u32 val, wol_msg;
3421
3422 if (bp->wol) {
3423 u32 advertising;
3424 u8 autoneg;
3425
3426 autoneg = bp->autoneg;
3427 advertising = bp->advertising;
3428
3429 if (bp->phy_port == PORT_TP) {
3430 bp->autoneg = AUTONEG_SPEED;
3431 bp->advertising = ADVERTISED_10baseT_Half |
3432 ADVERTISED_10baseT_Full |
3433 ADVERTISED_100baseT_Half |
3434 ADVERTISED_100baseT_Full |
3435 ADVERTISED_Autoneg;
3436 }
3437
3438 spin_lock_bh(&bp->phy_lock);
3439 bnx2_setup_phy(bp, bp->phy_port);
3440 spin_unlock_bh(&bp->phy_lock);
3441
3442 bp->autoneg = autoneg;
3443 bp->advertising = advertising;
3444
3445 bnx2_set_mac_addr(bp);
3446
3447 val = REG_RD(bp, BNX2_EMAC_MODE);
3448
3449 /* Enable port mode. */
3450 val &= ~BNX2_EMAC_MODE_PORT;
3451 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3452 BNX2_EMAC_MODE_ACPI_RCVD |
3453 BNX2_EMAC_MODE_MPKT;
3454 if (bp->phy_port == PORT_TP)
3455 val |= BNX2_EMAC_MODE_PORT_MII;
3456 else {
3457 val |= BNX2_EMAC_MODE_PORT_GMII;
3458 if (bp->line_speed == SPEED_2500)
3459 val |= BNX2_EMAC_MODE_25G_MODE;
3460 }
3461
3462 REG_WR(bp, BNX2_EMAC_MODE, val);
3463
3464 /* receive all multicast */
3465 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3466 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3467 0xffffffff);
3468 }
3469 REG_WR(bp, BNX2_EMAC_RX_MODE,
3470 BNX2_EMAC_RX_MODE_SORT_MODE);
3471
3472 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3473 BNX2_RPM_SORT_USER0_MC_EN;
3474 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3475 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3476 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3477 BNX2_RPM_SORT_USER0_ENA);
3478
3479 /* Need to enable EMAC and RPM for WOL. */
3480 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3481 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3482 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3483 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3484
3485 val = REG_RD(bp, BNX2_RPM_CONFIG);
3486 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3487 REG_WR(bp, BNX2_RPM_CONFIG, val);
3488
3489 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3490 }
3491 else {
3492 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3493 }
3494
3495 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3496 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3497
3498 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3499 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3500 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3501
3502 if (bp->wol)
3503 pmcsr |= 3;
3504 }
3505 else {
3506 pmcsr |= 3;
3507 }
3508 if (bp->wol) {
3509 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3510 }
3511 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3512 pmcsr);
3513
3514 /* No more memory access after this point until
3515 * device is brought back to D0.
3516 */
3517 udelay(50);
3518 break;
3519 }
3520 default:
3521 return -EINVAL;
3522 }
3523 return 0;
3524 }
3525
3526 static int
3527 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3528 {
3529 u32 val;
3530 int j;
3531
3532 /* Request access to the flash interface. */
3533 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3534 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3535 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3536 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3537 break;
3538
3539 udelay(5);
3540 }
3541
3542 if (j >= NVRAM_TIMEOUT_COUNT)
3543 return -EBUSY;
3544
3545 return 0;
3546 }
3547
3548 static int
3549 bnx2_release_nvram_lock(struct bnx2 *bp)
3550 {
3551 int j;
3552 u32 val;
3553
3554 /* Relinquish nvram interface. */
3555 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3556
3557 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3558 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3559 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3560 break;
3561
3562 udelay(5);
3563 }
3564
3565 if (j >= NVRAM_TIMEOUT_COUNT)
3566 return -EBUSY;
3567
3568 return 0;
3569 }
3570
3571
3572 static int
3573 bnx2_enable_nvram_write(struct bnx2 *bp)
3574 {
3575 u32 val;
3576
3577 val = REG_RD(bp, BNX2_MISC_CFG);
3578 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3579
3580 if (bp->flash_info->flags & BNX2_NV_WREN) {
3581 int j;
3582
3583 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584 REG_WR(bp, BNX2_NVM_COMMAND,
3585 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3586
3587 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3588 udelay(5);
3589
3590 val = REG_RD(bp, BNX2_NVM_COMMAND);
3591 if (val & BNX2_NVM_COMMAND_DONE)
3592 break;
3593 }
3594
3595 if (j >= NVRAM_TIMEOUT_COUNT)
3596 return -EBUSY;
3597 }
3598 return 0;
3599 }
3600
3601 static void
3602 bnx2_disable_nvram_write(struct bnx2 *bp)
3603 {
3604 u32 val;
3605
3606 val = REG_RD(bp, BNX2_MISC_CFG);
3607 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3608 }
3609
3610
3611 static void
3612 bnx2_enable_nvram_access(struct bnx2 *bp)
3613 {
3614 u32 val;
3615
3616 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3617 /* Enable both bits, even on read. */
3618 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3619 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3620 }
3621
3622 static void
3623 bnx2_disable_nvram_access(struct bnx2 *bp)
3624 {
3625 u32 val;
3626
3627 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3628 /* Disable both bits, even after read. */
3629 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3630 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3631 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3632 }
3633
3634 static int
3635 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3636 {
3637 u32 cmd;
3638 int j;
3639
3640 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3641 /* Buffered flash, no erase needed */
3642 return 0;
3643
3644 /* Build an erase command */
3645 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3646 BNX2_NVM_COMMAND_DOIT;
3647
3648 /* Need to clear DONE bit separately. */
3649 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3650
3651 /* Address of the NVRAM to read from. */
3652 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3653
3654 /* Issue an erase command. */
3655 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3656
3657 /* Wait for completion. */
3658 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3659 u32 val;
3660
3661 udelay(5);
3662
3663 val = REG_RD(bp, BNX2_NVM_COMMAND);
3664 if (val & BNX2_NVM_COMMAND_DONE)
3665 break;
3666 }
3667
3668 if (j >= NVRAM_TIMEOUT_COUNT)
3669 return -EBUSY;
3670
3671 return 0;
3672 }
3673
3674 static int
3675 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3676 {
3677 u32 cmd;
3678 int j;
3679
3680 /* Build the command word. */
3681 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3682
3683 /* Calculate an offset of a buffered flash, not needed for 5709. */
3684 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3685 offset = ((offset / bp->flash_info->page_size) <<
3686 bp->flash_info->page_bits) +
3687 (offset % bp->flash_info->page_size);
3688 }
3689
3690 /* Need to clear DONE bit separately. */
3691 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3692
3693 /* Address of the NVRAM to read from. */
3694 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3695
3696 /* Issue a read command. */
3697 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3698
3699 /* Wait for completion. */
3700 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3701 u32 val;
3702
3703 udelay(5);
3704
3705 val = REG_RD(bp, BNX2_NVM_COMMAND);
3706 if (val & BNX2_NVM_COMMAND_DONE) {
3707 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3708 memcpy(ret_val, &v, 4);
3709 break;
3710 }
3711 }
3712 if (j >= NVRAM_TIMEOUT_COUNT)
3713 return -EBUSY;
3714
3715 return 0;
3716 }
3717
3718
3719 static int
3720 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3721 {
3722 u32 cmd;
3723 __be32 val32;
3724 int j;
3725
3726 /* Build the command word. */
3727 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3728
3729 /* Calculate an offset of a buffered flash, not needed for 5709. */
3730 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3731 offset = ((offset / bp->flash_info->page_size) <<
3732 bp->flash_info->page_bits) +
3733 (offset % bp->flash_info->page_size);
3734 }
3735
3736 /* Need to clear DONE bit separately. */
3737 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3738
3739 memcpy(&val32, val, 4);
3740
3741 /* Write the data. */
3742 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3743
3744 /* Address of the NVRAM to write to. */
3745 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3746
3747 /* Issue the write command. */
3748 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3749
3750 /* Wait for completion. */
3751 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3752 udelay(5);
3753
3754 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3755 break;
3756 }
3757 if (j >= NVRAM_TIMEOUT_COUNT)
3758 return -EBUSY;
3759
3760 return 0;
3761 }
3762
3763 static int
3764 bnx2_init_nvram(struct bnx2 *bp)
3765 {
3766 u32 val;
3767 int j, entry_count, rc = 0;
3768 struct flash_spec *flash;
3769
3770 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3771 bp->flash_info = &flash_5709;
3772 goto get_flash_size;
3773 }
3774
3775 /* Determine the selected interface. */
3776 val = REG_RD(bp, BNX2_NVM_CFG1);
3777
3778 entry_count = ARRAY_SIZE(flash_table);
3779
3780 if (val & 0x40000000) {
3781
3782 /* Flash interface has been reconfigured */
3783 for (j = 0, flash = &flash_table[0]; j < entry_count;
3784 j++, flash++) {
3785 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3786 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3787 bp->flash_info = flash;
3788 break;
3789 }
3790 }
3791 }
3792 else {
3793 u32 mask;
3794 /* Not yet been reconfigured */
3795
3796 if (val & (1 << 23))
3797 mask = FLASH_BACKUP_STRAP_MASK;
3798 else
3799 mask = FLASH_STRAP_MASK;
3800
3801 for (j = 0, flash = &flash_table[0]; j < entry_count;
3802 j++, flash++) {
3803
3804 if ((val & mask) == (flash->strapping & mask)) {
3805 bp->flash_info = flash;
3806
3807 /* Request access to the flash interface. */
3808 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3809 return rc;
3810
3811 /* Enable access to flash interface */
3812 bnx2_enable_nvram_access(bp);
3813
3814 /* Reconfigure the flash interface */
3815 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3816 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3817 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3818 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3819
3820 /* Disable access to flash interface */
3821 bnx2_disable_nvram_access(bp);
3822 bnx2_release_nvram_lock(bp);
3823
3824 break;
3825 }
3826 }
3827 } /* if (val & 0x40000000) */
3828
3829 if (j == entry_count) {
3830 bp->flash_info = NULL;
3831 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3832 return -ENODEV;
3833 }
3834
3835 get_flash_size:
3836 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3837 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3838 if (val)
3839 bp->flash_size = val;
3840 else
3841 bp->flash_size = bp->flash_info->total_size;
3842
3843 return rc;
3844 }
3845
3846 static int
3847 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3848 int buf_size)
3849 {
3850 int rc = 0;
3851 u32 cmd_flags, offset32, len32, extra;
3852
3853 if (buf_size == 0)
3854 return 0;
3855
3856 /* Request access to the flash interface. */
3857 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3858 return rc;
3859
3860 /* Enable access to flash interface */
3861 bnx2_enable_nvram_access(bp);
3862
3863 len32 = buf_size;
3864 offset32 = offset;
3865 extra = 0;
3866
3867 cmd_flags = 0;
3868
3869 if (offset32 & 3) {
3870 u8 buf[4];
3871 u32 pre_len;
3872
3873 offset32 &= ~3;
3874 pre_len = 4 - (offset & 3);
3875
3876 if (pre_len >= len32) {
3877 pre_len = len32;
3878 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3879 BNX2_NVM_COMMAND_LAST;
3880 }
3881 else {
3882 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3883 }
3884
3885 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3886
3887 if (rc)
3888 return rc;
3889
3890 memcpy(ret_buf, buf + (offset & 3), pre_len);
3891
3892 offset32 += 4;
3893 ret_buf += pre_len;
3894 len32 -= pre_len;
3895 }
3896 if (len32 & 3) {
3897 extra = 4 - (len32 & 3);
3898 len32 = (len32 + 4) & ~3;
3899 }
3900
3901 if (len32 == 4) {
3902 u8 buf[4];
3903
3904 if (cmd_flags)
3905 cmd_flags = BNX2_NVM_COMMAND_LAST;
3906 else
3907 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3908 BNX2_NVM_COMMAND_LAST;
3909
3910 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3911
3912 memcpy(ret_buf, buf, 4 - extra);
3913 }
3914 else if (len32 > 0) {
3915 u8 buf[4];
3916
3917 /* Read the first word. */
3918 if (cmd_flags)
3919 cmd_flags = 0;
3920 else
3921 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3922
3923 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3924
3925 /* Advance to the next dword. */
3926 offset32 += 4;
3927 ret_buf += 4;
3928 len32 -= 4;
3929
3930 while (len32 > 4 && rc == 0) {
3931 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3932
3933 /* Advance to the next dword. */
3934 offset32 += 4;
3935 ret_buf += 4;
3936 len32 -= 4;
3937 }
3938
3939 if (rc)
3940 return rc;
3941
3942 cmd_flags = BNX2_NVM_COMMAND_LAST;
3943 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3944
3945 memcpy(ret_buf, buf, 4 - extra);
3946 }
3947
3948 /* Disable access to flash interface */
3949 bnx2_disable_nvram_access(bp);
3950
3951 bnx2_release_nvram_lock(bp);
3952
3953 return rc;
3954 }
3955
3956 static int
3957 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3958 int buf_size)
3959 {
3960 u32 written, offset32, len32;
3961 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3962 int rc = 0;
3963 int align_start, align_end;
3964
3965 buf = data_buf;
3966 offset32 = offset;
3967 len32 = buf_size;
3968 align_start = align_end = 0;
3969
3970 if ((align_start = (offset32 & 3))) {
3971 offset32 &= ~3;
3972 len32 += align_start;
3973 if (len32 < 4)
3974 len32 = 4;
3975 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3976 return rc;
3977 }
3978
3979 if (len32 & 3) {
3980 align_end = 4 - (len32 & 3);
3981 len32 += align_end;
3982 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3983 return rc;
3984 }
3985
3986 if (align_start || align_end) {
3987 align_buf = kmalloc(len32, GFP_KERNEL);
3988 if (align_buf == NULL)
3989 return -ENOMEM;
3990 if (align_start) {
3991 memcpy(align_buf, start, 4);
3992 }
3993 if (align_end) {
3994 memcpy(align_buf + len32 - 4, end, 4);
3995 }
3996 memcpy(align_buf + align_start, data_buf, buf_size);
3997 buf = align_buf;
3998 }
3999
4000 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4001 flash_buffer = kmalloc(264, GFP_KERNEL);
4002 if (flash_buffer == NULL) {
4003 rc = -ENOMEM;
4004 goto nvram_write_end;
4005 }
4006 }
4007
4008 written = 0;
4009 while ((written < len32) && (rc == 0)) {
4010 u32 page_start, page_end, data_start, data_end;
4011 u32 addr, cmd_flags;
4012 int i;
4013
4014 /* Find the page_start addr */
4015 page_start = offset32 + written;
4016 page_start -= (page_start % bp->flash_info->page_size);
4017 /* Find the page_end addr */
4018 page_end = page_start + bp->flash_info->page_size;
4019 /* Find the data_start addr */
4020 data_start = (written == 0) ? offset32 : page_start;
4021 /* Find the data_end addr */
4022 data_end = (page_end > offset32 + len32) ?
4023 (offset32 + len32) : page_end;
4024
4025 /* Request access to the flash interface. */
4026 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4027 goto nvram_write_end;
4028
4029 /* Enable access to flash interface */
4030 bnx2_enable_nvram_access(bp);
4031
4032 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4033 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4034 int j;
4035
4036 /* Read the whole page into the buffer
4037 * (non-buffer flash only) */
4038 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4039 if (j == (bp->flash_info->page_size - 4)) {
4040 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4041 }
4042 rc = bnx2_nvram_read_dword(bp,
4043 page_start + j,
4044 &flash_buffer[j],
4045 cmd_flags);
4046
4047 if (rc)
4048 goto nvram_write_end;
4049
4050 cmd_flags = 0;
4051 }
4052 }
4053
4054 /* Enable writes to flash interface (unlock write-protect) */
4055 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4056 goto nvram_write_end;
4057
4058 /* Loop to write back the buffer data from page_start to
4059 * data_start */
4060 i = 0;
4061 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4062 /* Erase the page */
4063 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4064 goto nvram_write_end;
4065
4066 /* Re-enable the write again for the actual write */
4067 bnx2_enable_nvram_write(bp);
4068
4069 for (addr = page_start; addr < data_start;
4070 addr += 4, i += 4) {
4071
4072 rc = bnx2_nvram_write_dword(bp, addr,
4073 &flash_buffer[i], cmd_flags);
4074
4075 if (rc != 0)
4076 goto nvram_write_end;
4077
4078 cmd_flags = 0;
4079 }
4080 }
4081
4082 /* Loop to write the new data from data_start to data_end */
4083 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4084 if ((addr == page_end - 4) ||
4085 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4086 (addr == data_end - 4))) {
4087
4088 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4089 }
4090 rc = bnx2_nvram_write_dword(bp, addr, buf,
4091 cmd_flags);
4092
4093 if (rc != 0)
4094 goto nvram_write_end;
4095
4096 cmd_flags = 0;
4097 buf += 4;
4098 }
4099
4100 /* Loop to write back the buffer data from data_end
4101 * to page_end */
4102 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4103 for (addr = data_end; addr < page_end;
4104 addr += 4, i += 4) {
4105
4106 if (addr == page_end-4) {
4107 cmd_flags = BNX2_NVM_COMMAND_LAST;
4108 }
4109 rc = bnx2_nvram_write_dword(bp, addr,
4110 &flash_buffer[i], cmd_flags);
4111
4112 if (rc != 0)
4113 goto nvram_write_end;
4114
4115 cmd_flags = 0;
4116 }
4117 }
4118
4119 /* Disable writes to flash interface (lock write-protect) */
4120 bnx2_disable_nvram_write(bp);
4121
4122 /* Disable access to flash interface */
4123 bnx2_disable_nvram_access(bp);
4124 bnx2_release_nvram_lock(bp);
4125
4126 /* Increment written */
4127 written += data_end - data_start;
4128 }
4129
4130 nvram_write_end:
4131 kfree(flash_buffer);
4132 kfree(align_buf);
4133 return rc;
4134 }
4135
4136 static void
4137 bnx2_init_remote_phy(struct bnx2 *bp)
4138 {
4139 u32 val;
4140
4141 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4142 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4143 return;
4144
4145 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4146 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4147 return;
4148
4149 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4150 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4151
4152 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4153 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4154 bp->phy_port = PORT_FIBRE;
4155 else
4156 bp->phy_port = PORT_TP;
4157
4158 if (netif_running(bp->dev)) {
4159 u32 sig;
4160
4161 if (val & BNX2_LINK_STATUS_LINK_UP) {
4162 bp->link_up = 1;
4163 netif_carrier_on(bp->dev);
4164 } else {
4165 bp->link_up = 0;
4166 netif_carrier_off(bp->dev);
4167 }
4168 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4169 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4170 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4171 sig);
4172 }
4173 }
4174 }
4175
4176 static void
4177 bnx2_setup_msix_tbl(struct bnx2 *bp)
4178 {
4179 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4180
4181 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4182 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4183 }
4184
4185 static int
4186 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4187 {
4188 u32 val;
4189 int i, rc = 0;
4190 u8 old_port;
4191
4192 /* Wait for the current PCI transaction to complete before
4193 * issuing a reset. */
4194 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4195 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4196 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4197 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4198 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4199 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4200 udelay(5);
4201
4202 /* Wait for the firmware to tell us it is ok to issue a reset. */
4203 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4204
4205 /* Deposit a driver reset signature so the firmware knows that
4206 * this is a soft reset. */
4207 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4208 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4209
4210 /* Do a dummy read to force the chip to complete all current transaction
4211 * before we issue a reset. */
4212 val = REG_RD(bp, BNX2_MISC_ID);
4213
4214 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4215 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4216 REG_RD(bp, BNX2_MISC_COMMAND);
4217 udelay(5);
4218
4219 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4220 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4221
4222 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4223
4224 } else {
4225 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4226 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4227 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4228
4229 /* Chip reset. */
4230 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4231
4232 /* Reading back any register after chip reset will hang the
4233 * bus on 5706 A0 and A1. The msleep below provides plenty
4234 * of margin for write posting.
4235 */
4236 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4237 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4238 msleep(20);
4239
4240 /* Reset takes approximate 30 usec */
4241 for (i = 0; i < 10; i++) {
4242 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4243 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4244 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4245 break;
4246 udelay(10);
4247 }
4248
4249 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4250 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4251 printk(KERN_ERR PFX "Chip reset did not complete\n");
4252 return -EBUSY;
4253 }
4254 }
4255
4256 /* Make sure byte swapping is properly configured. */
4257 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4258 if (val != 0x01020304) {
4259 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4260 return -ENODEV;
4261 }
4262
4263 /* Wait for the firmware to finish its initialization. */
4264 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4265 if (rc)
4266 return rc;
4267
4268 spin_lock_bh(&bp->phy_lock);
4269 old_port = bp->phy_port;
4270 bnx2_init_remote_phy(bp);
4271 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4272 old_port != bp->phy_port)
4273 bnx2_set_default_remote_link(bp);
4274 spin_unlock_bh(&bp->phy_lock);
4275
4276 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4277 /* Adjust the voltage regular to two steps lower. The default
4278 * of this register is 0x0000000e. */
4279 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4280
4281 /* Remove bad rbuf memory from the free pool. */
4282 rc = bnx2_alloc_bad_rbuf(bp);
4283 }
4284
4285 if (bp->flags & BNX2_FLAG_USING_MSIX)
4286 bnx2_setup_msix_tbl(bp);
4287
4288 return rc;
4289 }
4290
4291 static int
4292 bnx2_init_chip(struct bnx2 *bp)
4293 {
4294 u32 val;
4295 int rc, i;
4296
4297 /* Make sure the interrupt is not active. */
4298 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4299
4300 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4301 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4302 #ifdef __BIG_ENDIAN
4303 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4304 #endif
4305 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4306 DMA_READ_CHANS << 12 |
4307 DMA_WRITE_CHANS << 16;
4308
4309 val |= (0x2 << 20) | (1 << 11);
4310
4311 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4312 val |= (1 << 23);
4313
4314 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4315 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4316 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4317
4318 REG_WR(bp, BNX2_DMA_CONFIG, val);
4319
4320 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4321 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4322 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4323 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4324 }
4325
4326 if (bp->flags & BNX2_FLAG_PCIX) {
4327 u16 val16;
4328
4329 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4330 &val16);
4331 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4332 val16 & ~PCI_X_CMD_ERO);
4333 }
4334
4335 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4336 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4337 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4338 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4339
4340 /* Initialize context mapping and zero out the quick contexts. The
4341 * context block must have already been enabled. */
4342 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4343 rc = bnx2_init_5709_context(bp);
4344 if (rc)
4345 return rc;
4346 } else
4347 bnx2_init_context(bp);
4348
4349 if ((rc = bnx2_init_cpus(bp)) != 0)
4350 return rc;
4351
4352 bnx2_init_nvram(bp);
4353
4354 bnx2_set_mac_addr(bp);
4355
4356 val = REG_RD(bp, BNX2_MQ_CONFIG);
4357 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4358 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4359 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4360 val |= BNX2_MQ_CONFIG_HALT_DIS;
4361
4362 REG_WR(bp, BNX2_MQ_CONFIG, val);
4363
4364 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4365 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4366 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4367
4368 val = (BCM_PAGE_BITS - 8) << 24;
4369 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4370
4371 /* Configure page size. */
4372 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4373 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4374 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4375 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4376
4377 val = bp->mac_addr[0] +
4378 (bp->mac_addr[1] << 8) +
4379 (bp->mac_addr[2] << 16) +
4380 bp->mac_addr[3] +
4381 (bp->mac_addr[4] << 8) +
4382 (bp->mac_addr[5] << 16);
4383 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4384
4385 /* Program the MTU. Also include 4 bytes for CRC32. */
4386 val = bp->dev->mtu + ETH_HLEN + 4;
4387 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4388 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4389 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4390
4391 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4392 bp->bnx2_napi[i].last_status_idx = 0;
4393
4394 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4395
4396 /* Set up how to generate a link change interrupt. */
4397 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4398
4399 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4400 (u64) bp->status_blk_mapping & 0xffffffff);
4401 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4402
4403 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4404 (u64) bp->stats_blk_mapping & 0xffffffff);
4405 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4406 (u64) bp->stats_blk_mapping >> 32);
4407
4408 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4409 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4410
4411 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4412 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4413
4414 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4415 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4416
4417 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4418
4419 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4420
4421 REG_WR(bp, BNX2_HC_COM_TICKS,
4422 (bp->com_ticks_int << 16) | bp->com_ticks);
4423
4424 REG_WR(bp, BNX2_HC_CMD_TICKS,
4425 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4426
4427 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4428 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4429 else
4430 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4431 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4432
4433 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4434 val = BNX2_HC_CONFIG_COLLECT_STATS;
4435 else {
4436 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4437 BNX2_HC_CONFIG_COLLECT_STATS;
4438 }
4439
4440 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4441 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4442 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4443
4444 REG_WR(bp, BNX2_HC_SB_CONFIG_1,
4445 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4446 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4447
4448 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
4449 (bp->tx_quick_cons_trip_int << 16) |
4450 bp->tx_quick_cons_trip);
4451
4452 REG_WR(bp, BNX2_HC_TX_TICKS_1,
4453 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4454
4455 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4456 }
4457
4458 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4459 val |= BNX2_HC_CONFIG_ONE_SHOT;
4460
4461 REG_WR(bp, BNX2_HC_CONFIG, val);
4462
4463 /* Clear internal stats counters. */
4464 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4465
4466 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4467
4468 /* Initialize the receive filter. */
4469 bnx2_set_rx_mode(bp->dev);
4470
4471 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4472 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4473 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4474 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4475 }
4476 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4477 0);
4478
4479 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4480 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4481
4482 udelay(20);
4483
4484 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4485
4486 return rc;
4487 }
4488
4489 static void
4490 bnx2_clear_ring_states(struct bnx2 *bp)
4491 {
4492 struct bnx2_napi *bnapi;
4493 int i;
4494
4495 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4496 bnapi = &bp->bnx2_napi[i];
4497
4498 bnapi->tx_cons = 0;
4499 bnapi->hw_tx_cons = 0;
4500 bnapi->rx_prod_bseq = 0;
4501 bnapi->rx_prod = 0;
4502 bnapi->rx_cons = 0;
4503 bnapi->rx_pg_prod = 0;
4504 bnapi->rx_pg_cons = 0;
4505 }
4506 }
4507
4508 static void
4509 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4510 {
4511 u32 val, offset0, offset1, offset2, offset3;
4512
4513 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4514 offset0 = BNX2_L2CTX_TYPE_XI;
4515 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4516 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4517 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4518 } else {
4519 offset0 = BNX2_L2CTX_TYPE;
4520 offset1 = BNX2_L2CTX_CMD_TYPE;
4521 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4522 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4523 }
4524 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4525 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4526
4527 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4528 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4529
4530 val = (u64) bp->tx_desc_mapping >> 32;
4531 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4532
4533 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4534 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4535 }
4536
4537 static void
4538 bnx2_init_tx_ring(struct bnx2 *bp)
4539 {
4540 struct tx_bd *txbd;
4541 u32 cid = TX_CID;
4542 struct bnx2_napi *bnapi;
4543
4544 bp->tx_vec = 0;
4545 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4546 cid = TX_TSS_CID;
4547 bp->tx_vec = BNX2_TX_VEC;
4548 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4549 (TX_TSS_CID << 7));
4550 }
4551 bnapi = &bp->bnx2_napi[bp->tx_vec];
4552
4553 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4554
4555 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4556
4557 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4558 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4559
4560 bp->tx_prod = 0;
4561 bp->tx_prod_bseq = 0;
4562
4563 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4564 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4565
4566 bnx2_init_tx_context(bp, cid);
4567 }
4568
4569 static void
4570 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4571 int num_rings)
4572 {
4573 int i;
4574 struct rx_bd *rxbd;
4575
4576 for (i = 0; i < num_rings; i++) {
4577 int j;
4578
4579 rxbd = &rx_ring[i][0];
4580 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4581 rxbd->rx_bd_len = buf_size;
4582 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4583 }
4584 if (i == (num_rings - 1))
4585 j = 0;
4586 else
4587 j = i + 1;
4588 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4589 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4590 }
4591 }
4592
4593 static void
4594 bnx2_init_rx_ring(struct bnx2 *bp)
4595 {
4596 int i;
4597 u16 prod, ring_prod;
4598 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4599 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4600
4601 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4602 bp->rx_buf_use_size, bp->rx_max_ring);
4603
4604 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4605 if (bp->rx_pg_ring_size) {
4606 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4607 bp->rx_pg_desc_mapping,
4608 PAGE_SIZE, bp->rx_max_pg_ring);
4609 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4610 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4611 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4612 BNX2_L2CTX_RBDC_JUMBO_KEY);
4613
4614 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4615 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4616
4617 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4618 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4619
4620 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4621 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4622 }
4623
4624 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4625 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4626 val |= 0x02 << 8;
4627 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4628
4629 val = (u64) bp->rx_desc_mapping[0] >> 32;
4630 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4631
4632 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4633 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4634
4635 ring_prod = prod = bnapi->rx_pg_prod;
4636 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4637 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4638 break;
4639 prod = NEXT_RX_BD(prod);
4640 ring_prod = RX_PG_RING_IDX(prod);
4641 }
4642 bnapi->rx_pg_prod = prod;
4643
4644 ring_prod = prod = bnapi->rx_prod;
4645 for (i = 0; i < bp->rx_ring_size; i++) {
4646 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4647 break;
4648 }
4649 prod = NEXT_RX_BD(prod);
4650 ring_prod = RX_RING_IDX(prod);
4651 }
4652 bnapi->rx_prod = prod;
4653
4654 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4655 bnapi->rx_pg_prod);
4656 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4657
4658 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4659 }
4660
4661 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4662 {
4663 u32 max, num_rings = 1;
4664
4665 while (ring_size > MAX_RX_DESC_CNT) {
4666 ring_size -= MAX_RX_DESC_CNT;
4667 num_rings++;
4668 }
4669 /* round to next power of 2 */
4670 max = max_size;
4671 while ((max & num_rings) == 0)
4672 max >>= 1;
4673
4674 if (num_rings != max)
4675 max <<= 1;
4676
4677 return max;
4678 }
4679
4680 static void
4681 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4682 {
4683 u32 rx_size, rx_space, jumbo_size;
4684
4685 /* 8 for CRC and VLAN */
4686 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4687
4688 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4689 sizeof(struct skb_shared_info);
4690
4691 bp->rx_copy_thresh = RX_COPY_THRESH;
4692 bp->rx_pg_ring_size = 0;
4693 bp->rx_max_pg_ring = 0;
4694 bp->rx_max_pg_ring_idx = 0;
4695 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4696 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4697
4698 jumbo_size = size * pages;
4699 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4700 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4701
4702 bp->rx_pg_ring_size = jumbo_size;
4703 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4704 MAX_RX_PG_RINGS);
4705 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4706 rx_size = RX_COPY_THRESH + bp->rx_offset;
4707 bp->rx_copy_thresh = 0;
4708 }
4709
4710 bp->rx_buf_use_size = rx_size;
4711 /* hw alignment */
4712 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4713 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4714 bp->rx_ring_size = size;
4715 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4716 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4717 }
4718
4719 static void
4720 bnx2_free_tx_skbs(struct bnx2 *bp)
4721 {
4722 int i;
4723
4724 if (bp->tx_buf_ring == NULL)
4725 return;
4726
4727 for (i = 0; i < TX_DESC_CNT; ) {
4728 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4729 struct sk_buff *skb = tx_buf->skb;
4730 int j, last;
4731
4732 if (skb == NULL) {
4733 i++;
4734 continue;
4735 }
4736
4737 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4738 skb_headlen(skb), PCI_DMA_TODEVICE);
4739
4740 tx_buf->skb = NULL;
4741
4742 last = skb_shinfo(skb)->nr_frags;
4743 for (j = 0; j < last; j++) {
4744 tx_buf = &bp->tx_buf_ring[i + j + 1];
4745 pci_unmap_page(bp->pdev,
4746 pci_unmap_addr(tx_buf, mapping),
4747 skb_shinfo(skb)->frags[j].size,
4748 PCI_DMA_TODEVICE);
4749 }
4750 dev_kfree_skb(skb);
4751 i += j + 1;
4752 }
4753
4754 }
4755
4756 static void
4757 bnx2_free_rx_skbs(struct bnx2 *bp)
4758 {
4759 int i;
4760
4761 if (bp->rx_buf_ring == NULL)
4762 return;
4763
4764 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4765 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4766 struct sk_buff *skb = rx_buf->skb;
4767
4768 if (skb == NULL)
4769 continue;
4770
4771 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4772 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4773
4774 rx_buf->skb = NULL;
4775
4776 dev_kfree_skb(skb);
4777 }
4778 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4779 bnx2_free_rx_page(bp, i);
4780 }
4781
4782 static void
4783 bnx2_free_skbs(struct bnx2 *bp)
4784 {
4785 bnx2_free_tx_skbs(bp);
4786 bnx2_free_rx_skbs(bp);
4787 }
4788
4789 static int
4790 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4791 {
4792 int rc;
4793
4794 rc = bnx2_reset_chip(bp, reset_code);
4795 bnx2_free_skbs(bp);
4796 if (rc)
4797 return rc;
4798
4799 if ((rc = bnx2_init_chip(bp)) != 0)
4800 return rc;
4801
4802 bnx2_clear_ring_states(bp);
4803 bnx2_init_tx_ring(bp);
4804 bnx2_init_rx_ring(bp);
4805 return 0;
4806 }
4807
4808 static int
4809 bnx2_init_nic(struct bnx2 *bp)
4810 {
4811 int rc;
4812
4813 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4814 return rc;
4815
4816 spin_lock_bh(&bp->phy_lock);
4817 bnx2_init_phy(bp);
4818 bnx2_set_link(bp);
4819 spin_unlock_bh(&bp->phy_lock);
4820 return 0;
4821 }
4822
4823 static int
4824 bnx2_test_registers(struct bnx2 *bp)
4825 {
4826 int ret;
4827 int i, is_5709;
4828 static const struct {
4829 u16 offset;
4830 u16 flags;
4831 #define BNX2_FL_NOT_5709 1
4832 u32 rw_mask;
4833 u32 ro_mask;
4834 } reg_tbl[] = {
4835 { 0x006c, 0, 0x00000000, 0x0000003f },
4836 { 0x0090, 0, 0xffffffff, 0x00000000 },
4837 { 0x0094, 0, 0x00000000, 0x00000000 },
4838
4839 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4840 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4841 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4842 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4843 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4844 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4845 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4846 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4847 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4848
4849 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4850 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4851 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4852 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4853 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4854 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4855
4856 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4857 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4858 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4859
4860 { 0x1000, 0, 0x00000000, 0x00000001 },
4861 { 0x1004, 0, 0x00000000, 0x000f0001 },
4862
4863 { 0x1408, 0, 0x01c00800, 0x00000000 },
4864 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4865 { 0x14a8, 0, 0x00000000, 0x000001ff },
4866 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4867 { 0x14b0, 0, 0x00000002, 0x00000001 },
4868 { 0x14b8, 0, 0x00000000, 0x00000000 },
4869 { 0x14c0, 0, 0x00000000, 0x00000009 },
4870 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4871 { 0x14cc, 0, 0x00000000, 0x00000001 },
4872 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4873
4874 { 0x1800, 0, 0x00000000, 0x00000001 },
4875 { 0x1804, 0, 0x00000000, 0x00000003 },
4876
4877 { 0x2800, 0, 0x00000000, 0x00000001 },
4878 { 0x2804, 0, 0x00000000, 0x00003f01 },
4879 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4880 { 0x2810, 0, 0xffff0000, 0x00000000 },
4881 { 0x2814, 0, 0xffff0000, 0x00000000 },
4882 { 0x2818, 0, 0xffff0000, 0x00000000 },
4883 { 0x281c, 0, 0xffff0000, 0x00000000 },
4884 { 0x2834, 0, 0xffffffff, 0x00000000 },
4885 { 0x2840, 0, 0x00000000, 0xffffffff },
4886 { 0x2844, 0, 0x00000000, 0xffffffff },
4887 { 0x2848, 0, 0xffffffff, 0x00000000 },
4888 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4889
4890 { 0x2c00, 0, 0x00000000, 0x00000011 },
4891 { 0x2c04, 0, 0x00000000, 0x00030007 },
4892
4893 { 0x3c00, 0, 0x00000000, 0x00000001 },
4894 { 0x3c04, 0, 0x00000000, 0x00070000 },
4895 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4896 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4897 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4898 { 0x3c14, 0, 0x00000000, 0xffffffff },
4899 { 0x3c18, 0, 0x00000000, 0xffffffff },
4900 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4901 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4902
4903 { 0x5004, 0, 0x00000000, 0x0000007f },
4904 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4905
4906 { 0x5c00, 0, 0x00000000, 0x00000001 },
4907 { 0x5c04, 0, 0x00000000, 0x0003000f },
4908 { 0x5c08, 0, 0x00000003, 0x00000000 },
4909 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4910 { 0x5c10, 0, 0x00000000, 0xffffffff },
4911 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4912 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4913 { 0x5c88, 0, 0x00000000, 0x00077373 },
4914 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4915
4916 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4917 { 0x680c, 0, 0xffffffff, 0x00000000 },
4918 { 0x6810, 0, 0xffffffff, 0x00000000 },
4919 { 0x6814, 0, 0xffffffff, 0x00000000 },
4920 { 0x6818, 0, 0xffffffff, 0x00000000 },
4921 { 0x681c, 0, 0xffffffff, 0x00000000 },
4922 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4923 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4924 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4925 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4926 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4927 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4928 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4929 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4930 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4931 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4932 { 0x684c, 0, 0xffffffff, 0x00000000 },
4933 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4934 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4935 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4936 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4937 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4938 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4939
4940 { 0xffff, 0, 0x00000000, 0x00000000 },
4941 };
4942
4943 ret = 0;
4944 is_5709 = 0;
4945 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4946 is_5709 = 1;
4947
4948 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4949 u32 offset, rw_mask, ro_mask, save_val, val;
4950 u16 flags = reg_tbl[i].flags;
4951
4952 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4953 continue;
4954
4955 offset = (u32) reg_tbl[i].offset;
4956 rw_mask = reg_tbl[i].rw_mask;
4957 ro_mask = reg_tbl[i].ro_mask;
4958
4959 save_val = readl(bp->regview + offset);
4960
4961 writel(0, bp->regview + offset);
4962
4963 val = readl(bp->regview + offset);
4964 if ((val & rw_mask) != 0) {
4965 goto reg_test_err;
4966 }
4967
4968 if ((val & ro_mask) != (save_val & ro_mask)) {
4969 goto reg_test_err;
4970 }
4971
4972 writel(0xffffffff, bp->regview + offset);
4973
4974 val = readl(bp->regview + offset);
4975 if ((val & rw_mask) != rw_mask) {
4976 goto reg_test_err;
4977 }
4978
4979 if ((val & ro_mask) != (save_val & ro_mask)) {
4980 goto reg_test_err;
4981 }
4982
4983 writel(save_val, bp->regview + offset);
4984 continue;
4985
4986 reg_test_err:
4987 writel(save_val, bp->regview + offset);
4988 ret = -ENODEV;
4989 break;
4990 }
4991 return ret;
4992 }
4993
4994 static int
4995 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4996 {
4997 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4998 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4999 int i;
5000
5001 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5002 u32 offset;
5003
5004 for (offset = 0; offset < size; offset += 4) {
5005
5006 REG_WR_IND(bp, start + offset, test_pattern[i]);
5007
5008 if (REG_RD_IND(bp, start + offset) !=
5009 test_pattern[i]) {
5010 return -ENODEV;
5011 }
5012 }
5013 }
5014 return 0;
5015 }
5016
5017 static int
5018 bnx2_test_memory(struct bnx2 *bp)
5019 {
5020 int ret = 0;
5021 int i;
5022 static struct mem_entry {
5023 u32 offset;
5024 u32 len;
5025 } mem_tbl_5706[] = {
5026 { 0x60000, 0x4000 },
5027 { 0xa0000, 0x3000 },
5028 { 0xe0000, 0x4000 },
5029 { 0x120000, 0x4000 },
5030 { 0x1a0000, 0x4000 },
5031 { 0x160000, 0x4000 },
5032 { 0xffffffff, 0 },
5033 },
5034 mem_tbl_5709[] = {
5035 { 0x60000, 0x4000 },
5036 { 0xa0000, 0x3000 },
5037 { 0xe0000, 0x4000 },
5038 { 0x120000, 0x4000 },
5039 { 0x1a0000, 0x4000 },
5040 { 0xffffffff, 0 },
5041 };
5042 struct mem_entry *mem_tbl;
5043
5044 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5045 mem_tbl = mem_tbl_5709;
5046 else
5047 mem_tbl = mem_tbl_5706;
5048
5049 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5050 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5051 mem_tbl[i].len)) != 0) {
5052 return ret;
5053 }
5054 }
5055
5056 return ret;
5057 }
5058
5059 #define BNX2_MAC_LOOPBACK 0
5060 #define BNX2_PHY_LOOPBACK 1
5061
5062 static int
5063 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5064 {
5065 unsigned int pkt_size, num_pkts, i;
5066 struct sk_buff *skb, *rx_skb;
5067 unsigned char *packet;
5068 u16 rx_start_idx, rx_idx;
5069 dma_addr_t map;
5070 struct tx_bd *txbd;
5071 struct sw_bd *rx_buf;
5072 struct l2_fhdr *rx_hdr;
5073 int ret = -ENODEV;
5074 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5075
5076 tx_napi = bnapi;
5077 if (bp->flags & BNX2_FLAG_USING_MSIX)
5078 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5079
5080 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5081 bp->loopback = MAC_LOOPBACK;
5082 bnx2_set_mac_loopback(bp);
5083 }
5084 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5085 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5086 return 0;
5087
5088 bp->loopback = PHY_LOOPBACK;
5089 bnx2_set_phy_loopback(bp);
5090 }
5091 else
5092 return -EINVAL;
5093
5094 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5095 skb = netdev_alloc_skb(bp->dev, pkt_size);
5096 if (!skb)
5097 return -ENOMEM;
5098 packet = skb_put(skb, pkt_size);
5099 memcpy(packet, bp->dev->dev_addr, 6);
5100 memset(packet + 6, 0x0, 8);
5101 for (i = 14; i < pkt_size; i++)
5102 packet[i] = (unsigned char) (i & 0xff);
5103
5104 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5105 PCI_DMA_TODEVICE);
5106
5107 REG_WR(bp, BNX2_HC_COMMAND,
5108 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5109
5110 REG_RD(bp, BNX2_HC_COMMAND);
5111
5112 udelay(5);
5113 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5114
5115 num_pkts = 0;
5116
5117 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5118
5119 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5120 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5121 txbd->tx_bd_mss_nbytes = pkt_size;
5122 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5123
5124 num_pkts++;
5125 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5126 bp->tx_prod_bseq += pkt_size;
5127
5128 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5129 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5130
5131 udelay(100);
5132
5133 REG_WR(bp, BNX2_HC_COMMAND,
5134 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5135
5136 REG_RD(bp, BNX2_HC_COMMAND);
5137
5138 udelay(5);
5139
5140 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5141 dev_kfree_skb(skb);
5142
5143 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5144 goto loopback_test_done;
5145
5146 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5147 if (rx_idx != rx_start_idx + num_pkts) {
5148 goto loopback_test_done;
5149 }
5150
5151 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5152 rx_skb = rx_buf->skb;
5153
5154 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5155 skb_reserve(rx_skb, bp->rx_offset);
5156
5157 pci_dma_sync_single_for_cpu(bp->pdev,
5158 pci_unmap_addr(rx_buf, mapping),
5159 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5160
5161 if (rx_hdr->l2_fhdr_status &
5162 (L2_FHDR_ERRORS_BAD_CRC |
5163 L2_FHDR_ERRORS_PHY_DECODE |
5164 L2_FHDR_ERRORS_ALIGNMENT |
5165 L2_FHDR_ERRORS_TOO_SHORT |
5166 L2_FHDR_ERRORS_GIANT_FRAME)) {
5167
5168 goto loopback_test_done;
5169 }
5170
5171 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5172 goto loopback_test_done;
5173 }
5174
5175 for (i = 14; i < pkt_size; i++) {
5176 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5177 goto loopback_test_done;
5178 }
5179 }
5180
5181 ret = 0;
5182
5183 loopback_test_done:
5184 bp->loopback = 0;
5185 return ret;
5186 }
5187
5188 #define BNX2_MAC_LOOPBACK_FAILED 1
5189 #define BNX2_PHY_LOOPBACK_FAILED 2
5190 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5191 BNX2_PHY_LOOPBACK_FAILED)
5192
5193 static int
5194 bnx2_test_loopback(struct bnx2 *bp)
5195 {
5196 int rc = 0;
5197
5198 if (!netif_running(bp->dev))
5199 return BNX2_LOOPBACK_FAILED;
5200
5201 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5202 spin_lock_bh(&bp->phy_lock);
5203 bnx2_init_phy(bp);
5204 spin_unlock_bh(&bp->phy_lock);
5205 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5206 rc |= BNX2_MAC_LOOPBACK_FAILED;
5207 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5208 rc |= BNX2_PHY_LOOPBACK_FAILED;
5209 return rc;
5210 }
5211
5212 #define NVRAM_SIZE 0x200
5213 #define CRC32_RESIDUAL 0xdebb20e3
5214
5215 static int
5216 bnx2_test_nvram(struct bnx2 *bp)
5217 {
5218 __be32 buf[NVRAM_SIZE / 4];
5219 u8 *data = (u8 *) buf;
5220 int rc = 0;
5221 u32 magic, csum;
5222
5223 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5224 goto test_nvram_done;
5225
5226 magic = be32_to_cpu(buf[0]);
5227 if (magic != 0x669955aa) {
5228 rc = -ENODEV;
5229 goto test_nvram_done;
5230 }
5231
5232 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5233 goto test_nvram_done;
5234
5235 csum = ether_crc_le(0x100, data);
5236 if (csum != CRC32_RESIDUAL) {
5237 rc = -ENODEV;
5238 goto test_nvram_done;
5239 }
5240
5241 csum = ether_crc_le(0x100, data + 0x100);
5242 if (csum != CRC32_RESIDUAL) {
5243 rc = -ENODEV;
5244 }
5245
5246 test_nvram_done:
5247 return rc;
5248 }
5249
5250 static int
5251 bnx2_test_link(struct bnx2 *bp)
5252 {
5253 u32 bmsr;
5254
5255 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5256 if (bp->link_up)
5257 return 0;
5258 return -ENODEV;
5259 }
5260 spin_lock_bh(&bp->phy_lock);
5261 bnx2_enable_bmsr1(bp);
5262 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5263 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5264 bnx2_disable_bmsr1(bp);
5265 spin_unlock_bh(&bp->phy_lock);
5266
5267 if (bmsr & BMSR_LSTATUS) {
5268 return 0;
5269 }
5270 return -ENODEV;
5271 }
5272
5273 static int
5274 bnx2_test_intr(struct bnx2 *bp)
5275 {
5276 int i;
5277 u16 status_idx;
5278
5279 if (!netif_running(bp->dev))
5280 return -ENODEV;
5281
5282 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5283
5284 /* This register is not touched during run-time. */
5285 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5286 REG_RD(bp, BNX2_HC_COMMAND);
5287
5288 for (i = 0; i < 10; i++) {
5289 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5290 status_idx) {
5291
5292 break;
5293 }
5294
5295 msleep_interruptible(10);
5296 }
5297 if (i < 10)
5298 return 0;
5299
5300 return -ENODEV;
5301 }
5302
5303 static int
5304 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5305 {
5306 u32 mode_ctl, an_dbg, exp;
5307
5308 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5309 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5310
5311 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5312 return 0;
5313
5314 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5315 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5316 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5317
5318 if (an_dbg & MISC_SHDW_AN_DBG_NOSYNC)
5319 return 0;
5320
5321 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5322 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5323 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5324
5325 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5326 return 0;
5327
5328 return 1;
5329 }
5330
5331 static void
5332 bnx2_5706_serdes_timer(struct bnx2 *bp)
5333 {
5334 int check_link = 1;
5335
5336 spin_lock(&bp->phy_lock);
5337 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
5338 bnx2_5706s_force_link_dn(bp, 0);
5339 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
5340 spin_unlock(&bp->phy_lock);
5341 return;
5342 }
5343
5344 if (bp->serdes_an_pending) {
5345 bp->serdes_an_pending--;
5346 check_link = 0;
5347 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5348 u32 bmcr;
5349
5350 bp->current_interval = bp->timer_interval;
5351
5352 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5353
5354 if (bmcr & BMCR_ANENABLE) {
5355 if (bnx2_5706_serdes_has_link(bp)) {
5356 bmcr &= ~BMCR_ANENABLE;
5357 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5358 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5359 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5360 }
5361 }
5362 }
5363 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5364 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5365 u32 phy2;
5366
5367 check_link = 0;
5368 bnx2_write_phy(bp, 0x17, 0x0f01);
5369 bnx2_read_phy(bp, 0x15, &phy2);
5370 if (phy2 & 0x20) {
5371 u32 bmcr;
5372
5373 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5374 bmcr |= BMCR_ANENABLE;
5375 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5376
5377 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5378 }
5379 } else
5380 bp->current_interval = bp->timer_interval;
5381
5382 if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) {
5383 u32 val;
5384
5385 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5386 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5387 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5388
5389 if (val & MISC_SHDW_AN_DBG_NOSYNC) {
5390 bnx2_5706s_force_link_dn(bp, 1);
5391 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5392 }
5393 }
5394 spin_unlock(&bp->phy_lock);
5395 }
5396
5397 static void
5398 bnx2_5708_serdes_timer(struct bnx2 *bp)
5399 {
5400 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5401 return;
5402
5403 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5404 bp->serdes_an_pending = 0;
5405 return;
5406 }
5407
5408 spin_lock(&bp->phy_lock);
5409 if (bp->serdes_an_pending)
5410 bp->serdes_an_pending--;
5411 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5412 u32 bmcr;
5413
5414 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5415 if (bmcr & BMCR_ANENABLE) {
5416 bnx2_enable_forced_2g5(bp);
5417 bp->current_interval = SERDES_FORCED_TIMEOUT;
5418 } else {
5419 bnx2_disable_forced_2g5(bp);
5420 bp->serdes_an_pending = 2;
5421 bp->current_interval = bp->timer_interval;
5422 }
5423
5424 } else
5425 bp->current_interval = bp->timer_interval;
5426
5427 spin_unlock(&bp->phy_lock);
5428 }
5429
5430 static void
5431 bnx2_timer(unsigned long data)
5432 {
5433 struct bnx2 *bp = (struct bnx2 *) data;
5434
5435 if (!netif_running(bp->dev))
5436 return;
5437
5438 if (atomic_read(&bp->intr_sem) != 0)
5439 goto bnx2_restart_timer;
5440
5441 bnx2_send_heart_beat(bp);
5442
5443 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5444
5445 /* workaround occasional corrupted counters */
5446 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5447 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5448 BNX2_HC_COMMAND_STATS_NOW);
5449
5450 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5451 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5452 bnx2_5706_serdes_timer(bp);
5453 else
5454 bnx2_5708_serdes_timer(bp);
5455 }
5456
5457 bnx2_restart_timer:
5458 mod_timer(&bp->timer, jiffies + bp->current_interval);
5459 }
5460
5461 static int
5462 bnx2_request_irq(struct bnx2 *bp)
5463 {
5464 struct net_device *dev = bp->dev;
5465 unsigned long flags;
5466 struct bnx2_irq *irq;
5467 int rc = 0, i;
5468
5469 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5470 flags = 0;
5471 else
5472 flags = IRQF_SHARED;
5473
5474 for (i = 0; i < bp->irq_nvecs; i++) {
5475 irq = &bp->irq_tbl[i];
5476 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5477 dev);
5478 if (rc)
5479 break;
5480 irq->requested = 1;
5481 }
5482 return rc;
5483 }
5484
5485 static void
5486 bnx2_free_irq(struct bnx2 *bp)
5487 {
5488 struct net_device *dev = bp->dev;
5489 struct bnx2_irq *irq;
5490 int i;
5491
5492 for (i = 0; i < bp->irq_nvecs; i++) {
5493 irq = &bp->irq_tbl[i];
5494 if (irq->requested)
5495 free_irq(irq->vector, dev);
5496 irq->requested = 0;
5497 }
5498 if (bp->flags & BNX2_FLAG_USING_MSI)
5499 pci_disable_msi(bp->pdev);
5500 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5501 pci_disable_msix(bp->pdev);
5502
5503 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5504 }
5505
5506 static void
5507 bnx2_enable_msix(struct bnx2 *bp)
5508 {
5509 int i, rc;
5510 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5511
5512 bnx2_setup_msix_tbl(bp);
5513 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5514 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5515 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5516
5517 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5518 msix_ent[i].entry = i;
5519 msix_ent[i].vector = 0;
5520 }
5521
5522 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5523 if (rc != 0)
5524 return;
5525
5526 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5527 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5528
5529 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5530 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5531 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5532 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5533
5534 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5535 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5536 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5537 bp->irq_tbl[i].vector = msix_ent[i].vector;
5538 }
5539
5540 static void
5541 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5542 {
5543 bp->irq_tbl[0].handler = bnx2_interrupt;
5544 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5545 bp->irq_nvecs = 1;
5546 bp->irq_tbl[0].vector = bp->pdev->irq;
5547
5548 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5549 bnx2_enable_msix(bp);
5550
5551 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5552 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5553 if (pci_enable_msi(bp->pdev) == 0) {
5554 bp->flags |= BNX2_FLAG_USING_MSI;
5555 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5556 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5557 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5558 } else
5559 bp->irq_tbl[0].handler = bnx2_msi;
5560
5561 bp->irq_tbl[0].vector = bp->pdev->irq;
5562 }
5563 }
5564 }
5565
5566 /* Called with rtnl_lock */
5567 static int
5568 bnx2_open(struct net_device *dev)
5569 {
5570 struct bnx2 *bp = netdev_priv(dev);
5571 int rc;
5572
5573 netif_carrier_off(dev);
5574
5575 bnx2_set_power_state(bp, PCI_D0);
5576 bnx2_disable_int(bp);
5577
5578 rc = bnx2_alloc_mem(bp);
5579 if (rc)
5580 return rc;
5581
5582 bnx2_setup_int_mode(bp, disable_msi);
5583 bnx2_napi_enable(bp);
5584 rc = bnx2_request_irq(bp);
5585
5586 if (rc) {
5587 bnx2_napi_disable(bp);
5588 bnx2_free_mem(bp);
5589 return rc;
5590 }
5591
5592 rc = bnx2_init_nic(bp);
5593
5594 if (rc) {
5595 bnx2_napi_disable(bp);
5596 bnx2_free_irq(bp);
5597 bnx2_free_skbs(bp);
5598 bnx2_free_mem(bp);
5599 return rc;
5600 }
5601
5602 mod_timer(&bp->timer, jiffies + bp->current_interval);
5603
5604 atomic_set(&bp->intr_sem, 0);
5605
5606 bnx2_enable_int(bp);
5607
5608 if (bp->flags & BNX2_FLAG_USING_MSI) {
5609 /* Test MSI to make sure it is working
5610 * If MSI test fails, go back to INTx mode
5611 */
5612 if (bnx2_test_intr(bp) != 0) {
5613 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5614 " using MSI, switching to INTx mode. Please"
5615 " report this failure to the PCI maintainer"
5616 " and include system chipset information.\n",
5617 bp->dev->name);
5618
5619 bnx2_disable_int(bp);
5620 bnx2_free_irq(bp);
5621
5622 bnx2_setup_int_mode(bp, 1);
5623
5624 rc = bnx2_init_nic(bp);
5625
5626 if (!rc)
5627 rc = bnx2_request_irq(bp);
5628
5629 if (rc) {
5630 bnx2_napi_disable(bp);
5631 bnx2_free_skbs(bp);
5632 bnx2_free_mem(bp);
5633 del_timer_sync(&bp->timer);
5634 return rc;
5635 }
5636 bnx2_enable_int(bp);
5637 }
5638 }
5639 if (bp->flags & BNX2_FLAG_USING_MSI)
5640 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5641 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5642 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5643
5644 netif_start_queue(dev);
5645
5646 return 0;
5647 }
5648
5649 static void
5650 bnx2_reset_task(struct work_struct *work)
5651 {
5652 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5653
5654 if (!netif_running(bp->dev))
5655 return;
5656
5657 bp->in_reset_task = 1;
5658 bnx2_netif_stop(bp);
5659
5660 bnx2_init_nic(bp);
5661
5662 atomic_set(&bp->intr_sem, 1);
5663 bnx2_netif_start(bp);
5664 bp->in_reset_task = 0;
5665 }
5666
5667 static void
5668 bnx2_tx_timeout(struct net_device *dev)
5669 {
5670 struct bnx2 *bp = netdev_priv(dev);
5671
5672 /* This allows the netif to be shutdown gracefully before resetting */
5673 schedule_work(&bp->reset_task);
5674 }
5675
5676 #ifdef BCM_VLAN
5677 /* Called with rtnl_lock */
5678 static void
5679 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5680 {
5681 struct bnx2 *bp = netdev_priv(dev);
5682
5683 bnx2_netif_stop(bp);
5684
5685 bp->vlgrp = vlgrp;
5686 bnx2_set_rx_mode(dev);
5687
5688 bnx2_netif_start(bp);
5689 }
5690 #endif
5691
5692 /* Called with netif_tx_lock.
5693 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5694 * netif_wake_queue().
5695 */
5696 static int
5697 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5698 {
5699 struct bnx2 *bp = netdev_priv(dev);
5700 dma_addr_t mapping;
5701 struct tx_bd *txbd;
5702 struct sw_bd *tx_buf;
5703 u32 len, vlan_tag_flags, last_frag, mss;
5704 u16 prod, ring_prod;
5705 int i;
5706 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5707
5708 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5709 (skb_shinfo(skb)->nr_frags + 1))) {
5710 netif_stop_queue(dev);
5711 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5712 dev->name);
5713
5714 return NETDEV_TX_BUSY;
5715 }
5716 len = skb_headlen(skb);
5717 prod = bp->tx_prod;
5718 ring_prod = TX_RING_IDX(prod);
5719
5720 vlan_tag_flags = 0;
5721 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5722 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5723 }
5724
5725 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5726 vlan_tag_flags |=
5727 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5728 }
5729 if ((mss = skb_shinfo(skb)->gso_size)) {
5730 u32 tcp_opt_len, ip_tcp_len;
5731 struct iphdr *iph;
5732
5733 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5734
5735 tcp_opt_len = tcp_optlen(skb);
5736
5737 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5738 u32 tcp_off = skb_transport_offset(skb) -
5739 sizeof(struct ipv6hdr) - ETH_HLEN;
5740
5741 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5742 TX_BD_FLAGS_SW_FLAGS;
5743 if (likely(tcp_off == 0))
5744 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5745 else {
5746 tcp_off >>= 3;
5747 vlan_tag_flags |= ((tcp_off & 0x3) <<
5748 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5749 ((tcp_off & 0x10) <<
5750 TX_BD_FLAGS_TCP6_OFF4_SHL);
5751 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5752 }
5753 } else {
5754 if (skb_header_cloned(skb) &&
5755 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5756 dev_kfree_skb(skb);
5757 return NETDEV_TX_OK;
5758 }
5759
5760 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5761
5762 iph = ip_hdr(skb);
5763 iph->check = 0;
5764 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5765 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5766 iph->daddr, 0,
5767 IPPROTO_TCP,
5768 0);
5769 if (tcp_opt_len || (iph->ihl > 5)) {
5770 vlan_tag_flags |= ((iph->ihl - 5) +
5771 (tcp_opt_len >> 2)) << 8;
5772 }
5773 }
5774 } else
5775 mss = 0;
5776
5777 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5778
5779 tx_buf = &bp->tx_buf_ring[ring_prod];
5780 tx_buf->skb = skb;
5781 pci_unmap_addr_set(tx_buf, mapping, mapping);
5782
5783 txbd = &bp->tx_desc_ring[ring_prod];
5784
5785 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5786 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5787 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5788 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5789
5790 last_frag = skb_shinfo(skb)->nr_frags;
5791
5792 for (i = 0; i < last_frag; i++) {
5793 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5794
5795 prod = NEXT_TX_BD(prod);
5796 ring_prod = TX_RING_IDX(prod);
5797 txbd = &bp->tx_desc_ring[ring_prod];
5798
5799 len = frag->size;
5800 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5801 len, PCI_DMA_TODEVICE);
5802 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5803 mapping, mapping);
5804
5805 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5806 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5807 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5808 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5809
5810 }
5811 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5812
5813 prod = NEXT_TX_BD(prod);
5814 bp->tx_prod_bseq += skb->len;
5815
5816 REG_WR16(bp, bp->tx_bidx_addr, prod);
5817 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5818
5819 mmiowb();
5820
5821 bp->tx_prod = prod;
5822 dev->trans_start = jiffies;
5823
5824 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5825 netif_stop_queue(dev);
5826 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5827 netif_wake_queue(dev);
5828 }
5829
5830 return NETDEV_TX_OK;
5831 }
5832
5833 /* Called with rtnl_lock */
5834 static int
5835 bnx2_close(struct net_device *dev)
5836 {
5837 struct bnx2 *bp = netdev_priv(dev);
5838 u32 reset_code;
5839
5840 /* Calling flush_scheduled_work() may deadlock because
5841 * linkwatch_event() may be on the workqueue and it will try to get
5842 * the rtnl_lock which we are holding.
5843 */
5844 while (bp->in_reset_task)
5845 msleep(1);
5846
5847 bnx2_disable_int_sync(bp);
5848 bnx2_napi_disable(bp);
5849 del_timer_sync(&bp->timer);
5850 if (bp->flags & BNX2_FLAG_NO_WOL)
5851 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5852 else if (bp->wol)
5853 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5854 else
5855 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5856 bnx2_reset_chip(bp, reset_code);
5857 bnx2_free_irq(bp);
5858 bnx2_free_skbs(bp);
5859 bnx2_free_mem(bp);
5860 bp->link_up = 0;
5861 netif_carrier_off(bp->dev);
5862 bnx2_set_power_state(bp, PCI_D3hot);
5863 return 0;
5864 }
5865
5866 #define GET_NET_STATS64(ctr) \
5867 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5868 (unsigned long) (ctr##_lo)
5869
5870 #define GET_NET_STATS32(ctr) \
5871 (ctr##_lo)
5872
5873 #if (BITS_PER_LONG == 64)
5874 #define GET_NET_STATS GET_NET_STATS64
5875 #else
5876 #define GET_NET_STATS GET_NET_STATS32
5877 #endif
5878
5879 static struct net_device_stats *
5880 bnx2_get_stats(struct net_device *dev)
5881 {
5882 struct bnx2 *bp = netdev_priv(dev);
5883 struct statistics_block *stats_blk = bp->stats_blk;
5884 struct net_device_stats *net_stats = &bp->net_stats;
5885
5886 if (bp->stats_blk == NULL) {
5887 return net_stats;
5888 }
5889 net_stats->rx_packets =
5890 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5891 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5892 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5893
5894 net_stats->tx_packets =
5895 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5896 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5897 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5898
5899 net_stats->rx_bytes =
5900 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5901
5902 net_stats->tx_bytes =
5903 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5904
5905 net_stats->multicast =
5906 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5907
5908 net_stats->collisions =
5909 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5910
5911 net_stats->rx_length_errors =
5912 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5913 stats_blk->stat_EtherStatsOverrsizePkts);
5914
5915 net_stats->rx_over_errors =
5916 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5917
5918 net_stats->rx_frame_errors =
5919 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5920
5921 net_stats->rx_crc_errors =
5922 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5923
5924 net_stats->rx_errors = net_stats->rx_length_errors +
5925 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5926 net_stats->rx_crc_errors;
5927
5928 net_stats->tx_aborted_errors =
5929 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5930 stats_blk->stat_Dot3StatsLateCollisions);
5931
5932 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5933 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5934 net_stats->tx_carrier_errors = 0;
5935 else {
5936 net_stats->tx_carrier_errors =
5937 (unsigned long)
5938 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5939 }
5940
5941 net_stats->tx_errors =
5942 (unsigned long)
5943 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5944 +
5945 net_stats->tx_aborted_errors +
5946 net_stats->tx_carrier_errors;
5947
5948 net_stats->rx_missed_errors =
5949 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5950 stats_blk->stat_FwRxDrop);
5951
5952 return net_stats;
5953 }
5954
5955 /* All ethtool functions called with rtnl_lock */
5956
5957 static int
5958 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5959 {
5960 struct bnx2 *bp = netdev_priv(dev);
5961 int support_serdes = 0, support_copper = 0;
5962
5963 cmd->supported = SUPPORTED_Autoneg;
5964 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5965 support_serdes = 1;
5966 support_copper = 1;
5967 } else if (bp->phy_port == PORT_FIBRE)
5968 support_serdes = 1;
5969 else
5970 support_copper = 1;
5971
5972 if (support_serdes) {
5973 cmd->supported |= SUPPORTED_1000baseT_Full |
5974 SUPPORTED_FIBRE;
5975 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
5976 cmd->supported |= SUPPORTED_2500baseX_Full;
5977
5978 }
5979 if (support_copper) {
5980 cmd->supported |= SUPPORTED_10baseT_Half |
5981 SUPPORTED_10baseT_Full |
5982 SUPPORTED_100baseT_Half |
5983 SUPPORTED_100baseT_Full |
5984 SUPPORTED_1000baseT_Full |
5985 SUPPORTED_TP;
5986
5987 }
5988
5989 spin_lock_bh(&bp->phy_lock);
5990 cmd->port = bp->phy_port;
5991 cmd->advertising = bp->advertising;
5992
5993 if (bp->autoneg & AUTONEG_SPEED) {
5994 cmd->autoneg = AUTONEG_ENABLE;
5995 }
5996 else {
5997 cmd->autoneg = AUTONEG_DISABLE;
5998 }
5999
6000 if (netif_carrier_ok(dev)) {
6001 cmd->speed = bp->line_speed;
6002 cmd->duplex = bp->duplex;
6003 }
6004 else {
6005 cmd->speed = -1;
6006 cmd->duplex = -1;
6007 }
6008 spin_unlock_bh(&bp->phy_lock);
6009
6010 cmd->transceiver = XCVR_INTERNAL;
6011 cmd->phy_address = bp->phy_addr;
6012
6013 return 0;
6014 }
6015
6016 static int
6017 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6018 {
6019 struct bnx2 *bp = netdev_priv(dev);
6020 u8 autoneg = bp->autoneg;
6021 u8 req_duplex = bp->req_duplex;
6022 u16 req_line_speed = bp->req_line_speed;
6023 u32 advertising = bp->advertising;
6024 int err = -EINVAL;
6025
6026 spin_lock_bh(&bp->phy_lock);
6027
6028 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6029 goto err_out_unlock;
6030
6031 if (cmd->port != bp->phy_port &&
6032 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6033 goto err_out_unlock;
6034
6035 if (cmd->autoneg == AUTONEG_ENABLE) {
6036 autoneg |= AUTONEG_SPEED;
6037
6038 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6039
6040 /* allow advertising 1 speed */
6041 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6042 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6043 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6044 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6045
6046 if (cmd->port == PORT_FIBRE)
6047 goto err_out_unlock;
6048
6049 advertising = cmd->advertising;
6050
6051 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6052 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6053 (cmd->port == PORT_TP))
6054 goto err_out_unlock;
6055 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6056 advertising = cmd->advertising;
6057 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6058 goto err_out_unlock;
6059 else {
6060 if (cmd->port == PORT_FIBRE)
6061 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6062 else
6063 advertising = ETHTOOL_ALL_COPPER_SPEED;
6064 }
6065 advertising |= ADVERTISED_Autoneg;
6066 }
6067 else {
6068 if (cmd->port == PORT_FIBRE) {
6069 if ((cmd->speed != SPEED_1000 &&
6070 cmd->speed != SPEED_2500) ||
6071 (cmd->duplex != DUPLEX_FULL))
6072 goto err_out_unlock;
6073
6074 if (cmd->speed == SPEED_2500 &&
6075 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6076 goto err_out_unlock;
6077 }
6078 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6079 goto err_out_unlock;
6080
6081 autoneg &= ~AUTONEG_SPEED;
6082 req_line_speed = cmd->speed;
6083 req_duplex = cmd->duplex;
6084 advertising = 0;
6085 }
6086
6087 bp->autoneg = autoneg;
6088 bp->advertising = advertising;
6089 bp->req_line_speed = req_line_speed;
6090 bp->req_duplex = req_duplex;
6091
6092 err = bnx2_setup_phy(bp, cmd->port);
6093
6094 err_out_unlock:
6095 spin_unlock_bh(&bp->phy_lock);
6096
6097 return err;
6098 }
6099
6100 static void
6101 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6102 {
6103 struct bnx2 *bp = netdev_priv(dev);
6104
6105 strcpy(info->driver, DRV_MODULE_NAME);
6106 strcpy(info->version, DRV_MODULE_VERSION);
6107 strcpy(info->bus_info, pci_name(bp->pdev));
6108 strcpy(info->fw_version, bp->fw_version);
6109 }
6110
6111 #define BNX2_REGDUMP_LEN (32 * 1024)
6112
6113 static int
6114 bnx2_get_regs_len(struct net_device *dev)
6115 {
6116 return BNX2_REGDUMP_LEN;
6117 }
6118
6119 static void
6120 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6121 {
6122 u32 *p = _p, i, offset;
6123 u8 *orig_p = _p;
6124 struct bnx2 *bp = netdev_priv(dev);
6125 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6126 0x0800, 0x0880, 0x0c00, 0x0c10,
6127 0x0c30, 0x0d08, 0x1000, 0x101c,
6128 0x1040, 0x1048, 0x1080, 0x10a4,
6129 0x1400, 0x1490, 0x1498, 0x14f0,
6130 0x1500, 0x155c, 0x1580, 0x15dc,
6131 0x1600, 0x1658, 0x1680, 0x16d8,
6132 0x1800, 0x1820, 0x1840, 0x1854,
6133 0x1880, 0x1894, 0x1900, 0x1984,
6134 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6135 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6136 0x2000, 0x2030, 0x23c0, 0x2400,
6137 0x2800, 0x2820, 0x2830, 0x2850,
6138 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6139 0x3c00, 0x3c94, 0x4000, 0x4010,
6140 0x4080, 0x4090, 0x43c0, 0x4458,
6141 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6142 0x4fc0, 0x5010, 0x53c0, 0x5444,
6143 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6144 0x5fc0, 0x6000, 0x6400, 0x6428,
6145 0x6800, 0x6848, 0x684c, 0x6860,
6146 0x6888, 0x6910, 0x8000 };
6147
6148 regs->version = 0;
6149
6150 memset(p, 0, BNX2_REGDUMP_LEN);
6151
6152 if (!netif_running(bp->dev))
6153 return;
6154
6155 i = 0;
6156 offset = reg_boundaries[0];
6157 p += offset;
6158 while (offset < BNX2_REGDUMP_LEN) {
6159 *p++ = REG_RD(bp, offset);
6160 offset += 4;
6161 if (offset == reg_boundaries[i + 1]) {
6162 offset = reg_boundaries[i + 2];
6163 p = (u32 *) (orig_p + offset);
6164 i += 2;
6165 }
6166 }
6167 }
6168
6169 static void
6170 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6171 {
6172 struct bnx2 *bp = netdev_priv(dev);
6173
6174 if (bp->flags & BNX2_FLAG_NO_WOL) {
6175 wol->supported = 0;
6176 wol->wolopts = 0;
6177 }
6178 else {
6179 wol->supported = WAKE_MAGIC;
6180 if (bp->wol)
6181 wol->wolopts = WAKE_MAGIC;
6182 else
6183 wol->wolopts = 0;
6184 }
6185 memset(&wol->sopass, 0, sizeof(wol->sopass));
6186 }
6187
6188 static int
6189 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6190 {
6191 struct bnx2 *bp = netdev_priv(dev);
6192
6193 if (wol->wolopts & ~WAKE_MAGIC)
6194 return -EINVAL;
6195
6196 if (wol->wolopts & WAKE_MAGIC) {
6197 if (bp->flags & BNX2_FLAG_NO_WOL)
6198 return -EINVAL;
6199
6200 bp->wol = 1;
6201 }
6202 else {
6203 bp->wol = 0;
6204 }
6205 return 0;
6206 }
6207
6208 static int
6209 bnx2_nway_reset(struct net_device *dev)
6210 {
6211 struct bnx2 *bp = netdev_priv(dev);
6212 u32 bmcr;
6213
6214 if (!(bp->autoneg & AUTONEG_SPEED)) {
6215 return -EINVAL;
6216 }
6217
6218 spin_lock_bh(&bp->phy_lock);
6219
6220 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6221 int rc;
6222
6223 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6224 spin_unlock_bh(&bp->phy_lock);
6225 return rc;
6226 }
6227
6228 /* Force a link down visible on the other side */
6229 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6230 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6231 spin_unlock_bh(&bp->phy_lock);
6232
6233 msleep(20);
6234
6235 spin_lock_bh(&bp->phy_lock);
6236
6237 bp->current_interval = SERDES_AN_TIMEOUT;
6238 bp->serdes_an_pending = 1;
6239 mod_timer(&bp->timer, jiffies + bp->current_interval);
6240 }
6241
6242 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6243 bmcr &= ~BMCR_LOOPBACK;
6244 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6245
6246 spin_unlock_bh(&bp->phy_lock);
6247
6248 return 0;
6249 }
6250
6251 static int
6252 bnx2_get_eeprom_len(struct net_device *dev)
6253 {
6254 struct bnx2 *bp = netdev_priv(dev);
6255
6256 if (bp->flash_info == NULL)
6257 return 0;
6258
6259 return (int) bp->flash_size;
6260 }
6261
6262 static int
6263 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6264 u8 *eebuf)
6265 {
6266 struct bnx2 *bp = netdev_priv(dev);
6267 int rc;
6268
6269 /* parameters already validated in ethtool_get_eeprom */
6270
6271 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6272
6273 return rc;
6274 }
6275
6276 static int
6277 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6278 u8 *eebuf)
6279 {
6280 struct bnx2 *bp = netdev_priv(dev);
6281 int rc;
6282
6283 /* parameters already validated in ethtool_set_eeprom */
6284
6285 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6286
6287 return rc;
6288 }
6289
6290 static int
6291 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6292 {
6293 struct bnx2 *bp = netdev_priv(dev);
6294
6295 memset(coal, 0, sizeof(struct ethtool_coalesce));
6296
6297 coal->rx_coalesce_usecs = bp->rx_ticks;
6298 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6299 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6300 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6301
6302 coal->tx_coalesce_usecs = bp->tx_ticks;
6303 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6304 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6305 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6306
6307 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6308
6309 return 0;
6310 }
6311
6312 static int
6313 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6314 {
6315 struct bnx2 *bp = netdev_priv(dev);
6316
6317 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6318 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6319
6320 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6321 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6322
6323 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6324 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6325
6326 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6327 if (bp->rx_quick_cons_trip_int > 0xff)
6328 bp->rx_quick_cons_trip_int = 0xff;
6329
6330 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6331 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6332
6333 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6334 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6335
6336 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6337 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6338
6339 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6340 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6341 0xff;
6342
6343 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6344 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6345 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6346 bp->stats_ticks = USEC_PER_SEC;
6347 }
6348 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6349 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6350 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6351
6352 if (netif_running(bp->dev)) {
6353 bnx2_netif_stop(bp);
6354 bnx2_init_nic(bp);
6355 bnx2_netif_start(bp);
6356 }
6357
6358 return 0;
6359 }
6360
6361 static void
6362 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6363 {
6364 struct bnx2 *bp = netdev_priv(dev);
6365
6366 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6367 ering->rx_mini_max_pending = 0;
6368 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6369
6370 ering->rx_pending = bp->rx_ring_size;
6371 ering->rx_mini_pending = 0;
6372 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6373
6374 ering->tx_max_pending = MAX_TX_DESC_CNT;
6375 ering->tx_pending = bp->tx_ring_size;
6376 }
6377
6378 static int
6379 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6380 {
6381 if (netif_running(bp->dev)) {
6382 bnx2_netif_stop(bp);
6383 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6384 bnx2_free_skbs(bp);
6385 bnx2_free_mem(bp);
6386 }
6387
6388 bnx2_set_rx_ring_size(bp, rx);
6389 bp->tx_ring_size = tx;
6390
6391 if (netif_running(bp->dev)) {
6392 int rc;
6393
6394 rc = bnx2_alloc_mem(bp);
6395 if (rc)
6396 return rc;
6397 bnx2_init_nic(bp);
6398 bnx2_netif_start(bp);
6399 }
6400 return 0;
6401 }
6402
6403 static int
6404 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6405 {
6406 struct bnx2 *bp = netdev_priv(dev);
6407 int rc;
6408
6409 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6410 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6411 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6412
6413 return -EINVAL;
6414 }
6415 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6416 return rc;
6417 }
6418
6419 static void
6420 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6421 {
6422 struct bnx2 *bp = netdev_priv(dev);
6423
6424 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6425 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6426 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6427 }
6428
6429 static int
6430 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6431 {
6432 struct bnx2 *bp = netdev_priv(dev);
6433
6434 bp->req_flow_ctrl = 0;
6435 if (epause->rx_pause)
6436 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6437 if (epause->tx_pause)
6438 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6439
6440 if (epause->autoneg) {
6441 bp->autoneg |= AUTONEG_FLOW_CTRL;
6442 }
6443 else {
6444 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6445 }
6446
6447 spin_lock_bh(&bp->phy_lock);
6448
6449 bnx2_setup_phy(bp, bp->phy_port);
6450
6451 spin_unlock_bh(&bp->phy_lock);
6452
6453 return 0;
6454 }
6455
6456 static u32
6457 bnx2_get_rx_csum(struct net_device *dev)
6458 {
6459 struct bnx2 *bp = netdev_priv(dev);
6460
6461 return bp->rx_csum;
6462 }
6463
6464 static int
6465 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6466 {
6467 struct bnx2 *bp = netdev_priv(dev);
6468
6469 bp->rx_csum = data;
6470 return 0;
6471 }
6472
6473 static int
6474 bnx2_set_tso(struct net_device *dev, u32 data)
6475 {
6476 struct bnx2 *bp = netdev_priv(dev);
6477
6478 if (data) {
6479 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6480 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6481 dev->features |= NETIF_F_TSO6;
6482 } else
6483 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6484 NETIF_F_TSO_ECN);
6485 return 0;
6486 }
6487
6488 #define BNX2_NUM_STATS 46
6489
6490 static struct {
6491 char string[ETH_GSTRING_LEN];
6492 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6493 { "rx_bytes" },
6494 { "rx_error_bytes" },
6495 { "tx_bytes" },
6496 { "tx_error_bytes" },
6497 { "rx_ucast_packets" },
6498 { "rx_mcast_packets" },
6499 { "rx_bcast_packets" },
6500 { "tx_ucast_packets" },
6501 { "tx_mcast_packets" },
6502 { "tx_bcast_packets" },
6503 { "tx_mac_errors" },
6504 { "tx_carrier_errors" },
6505 { "rx_crc_errors" },
6506 { "rx_align_errors" },
6507 { "tx_single_collisions" },
6508 { "tx_multi_collisions" },
6509 { "tx_deferred" },
6510 { "tx_excess_collisions" },
6511 { "tx_late_collisions" },
6512 { "tx_total_collisions" },
6513 { "rx_fragments" },
6514 { "rx_jabbers" },
6515 { "rx_undersize_packets" },
6516 { "rx_oversize_packets" },
6517 { "rx_64_byte_packets" },
6518 { "rx_65_to_127_byte_packets" },
6519 { "rx_128_to_255_byte_packets" },
6520 { "rx_256_to_511_byte_packets" },
6521 { "rx_512_to_1023_byte_packets" },
6522 { "rx_1024_to_1522_byte_packets" },
6523 { "rx_1523_to_9022_byte_packets" },
6524 { "tx_64_byte_packets" },
6525 { "tx_65_to_127_byte_packets" },
6526 { "tx_128_to_255_byte_packets" },
6527 { "tx_256_to_511_byte_packets" },
6528 { "tx_512_to_1023_byte_packets" },
6529 { "tx_1024_to_1522_byte_packets" },
6530 { "tx_1523_to_9022_byte_packets" },
6531 { "rx_xon_frames" },
6532 { "rx_xoff_frames" },
6533 { "tx_xon_frames" },
6534 { "tx_xoff_frames" },
6535 { "rx_mac_ctrl_frames" },
6536 { "rx_filtered_packets" },
6537 { "rx_discards" },
6538 { "rx_fw_discards" },
6539 };
6540
6541 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6542
6543 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6544 STATS_OFFSET32(stat_IfHCInOctets_hi),
6545 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6546 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6547 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6548 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6549 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6550 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6551 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6552 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6553 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6554 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6555 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6556 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6557 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6558 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6559 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6560 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6561 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6562 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6563 STATS_OFFSET32(stat_EtherStatsCollisions),
6564 STATS_OFFSET32(stat_EtherStatsFragments),
6565 STATS_OFFSET32(stat_EtherStatsJabbers),
6566 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6567 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6568 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6569 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6570 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6571 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6572 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6573 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6574 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6575 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6576 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6577 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6578 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6579 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6580 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6581 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6582 STATS_OFFSET32(stat_XonPauseFramesReceived),
6583 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6584 STATS_OFFSET32(stat_OutXonSent),
6585 STATS_OFFSET32(stat_OutXoffSent),
6586 STATS_OFFSET32(stat_MacControlFramesReceived),
6587 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6588 STATS_OFFSET32(stat_IfInMBUFDiscards),
6589 STATS_OFFSET32(stat_FwRxDrop),
6590 };
6591
6592 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6593 * skipped because of errata.
6594 */
6595 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6596 8,0,8,8,8,8,8,8,8,8,
6597 4,0,4,4,4,4,4,4,4,4,
6598 4,4,4,4,4,4,4,4,4,4,
6599 4,4,4,4,4,4,4,4,4,4,
6600 4,4,4,4,4,4,
6601 };
6602
6603 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6604 8,0,8,8,8,8,8,8,8,8,
6605 4,4,4,4,4,4,4,4,4,4,
6606 4,4,4,4,4,4,4,4,4,4,
6607 4,4,4,4,4,4,4,4,4,4,
6608 4,4,4,4,4,4,
6609 };
6610
6611 #define BNX2_NUM_TESTS 6
6612
6613 static struct {
6614 char string[ETH_GSTRING_LEN];
6615 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6616 { "register_test (offline)" },
6617 { "memory_test (offline)" },
6618 { "loopback_test (offline)" },
6619 { "nvram_test (online)" },
6620 { "interrupt_test (online)" },
6621 { "link_test (online)" },
6622 };
6623
6624 static int
6625 bnx2_get_sset_count(struct net_device *dev, int sset)
6626 {
6627 switch (sset) {
6628 case ETH_SS_TEST:
6629 return BNX2_NUM_TESTS;
6630 case ETH_SS_STATS:
6631 return BNX2_NUM_STATS;
6632 default:
6633 return -EOPNOTSUPP;
6634 }
6635 }
6636
6637 static void
6638 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6639 {
6640 struct bnx2 *bp = netdev_priv(dev);
6641
6642 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6643 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6644 int i;
6645
6646 bnx2_netif_stop(bp);
6647 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6648 bnx2_free_skbs(bp);
6649
6650 if (bnx2_test_registers(bp) != 0) {
6651 buf[0] = 1;
6652 etest->flags |= ETH_TEST_FL_FAILED;
6653 }
6654 if (bnx2_test_memory(bp) != 0) {
6655 buf[1] = 1;
6656 etest->flags |= ETH_TEST_FL_FAILED;
6657 }
6658 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6659 etest->flags |= ETH_TEST_FL_FAILED;
6660
6661 if (!netif_running(bp->dev)) {
6662 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6663 }
6664 else {
6665 bnx2_init_nic(bp);
6666 bnx2_netif_start(bp);
6667 }
6668
6669 /* wait for link up */
6670 for (i = 0; i < 7; i++) {
6671 if (bp->link_up)
6672 break;
6673 msleep_interruptible(1000);
6674 }
6675 }
6676
6677 if (bnx2_test_nvram(bp) != 0) {
6678 buf[3] = 1;
6679 etest->flags |= ETH_TEST_FL_FAILED;
6680 }
6681 if (bnx2_test_intr(bp) != 0) {
6682 buf[4] = 1;
6683 etest->flags |= ETH_TEST_FL_FAILED;
6684 }
6685
6686 if (bnx2_test_link(bp) != 0) {
6687 buf[5] = 1;
6688 etest->flags |= ETH_TEST_FL_FAILED;
6689
6690 }
6691 }
6692
6693 static void
6694 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6695 {
6696 switch (stringset) {
6697 case ETH_SS_STATS:
6698 memcpy(buf, bnx2_stats_str_arr,
6699 sizeof(bnx2_stats_str_arr));
6700 break;
6701 case ETH_SS_TEST:
6702 memcpy(buf, bnx2_tests_str_arr,
6703 sizeof(bnx2_tests_str_arr));
6704 break;
6705 }
6706 }
6707
6708 static void
6709 bnx2_get_ethtool_stats(struct net_device *dev,
6710 struct ethtool_stats *stats, u64 *buf)
6711 {
6712 struct bnx2 *bp = netdev_priv(dev);
6713 int i;
6714 u32 *hw_stats = (u32 *) bp->stats_blk;
6715 u8 *stats_len_arr = NULL;
6716
6717 if (hw_stats == NULL) {
6718 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6719 return;
6720 }
6721
6722 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6723 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6724 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6725 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6726 stats_len_arr = bnx2_5706_stats_len_arr;
6727 else
6728 stats_len_arr = bnx2_5708_stats_len_arr;
6729
6730 for (i = 0; i < BNX2_NUM_STATS; i++) {
6731 if (stats_len_arr[i] == 0) {
6732 /* skip this counter */
6733 buf[i] = 0;
6734 continue;
6735 }
6736 if (stats_len_arr[i] == 4) {
6737 /* 4-byte counter */
6738 buf[i] = (u64)
6739 *(hw_stats + bnx2_stats_offset_arr[i]);
6740 continue;
6741 }
6742 /* 8-byte counter */
6743 buf[i] = (((u64) *(hw_stats +
6744 bnx2_stats_offset_arr[i])) << 32) +
6745 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6746 }
6747 }
6748
6749 static int
6750 bnx2_phys_id(struct net_device *dev, u32 data)
6751 {
6752 struct bnx2 *bp = netdev_priv(dev);
6753 int i;
6754 u32 save;
6755
6756 if (data == 0)
6757 data = 2;
6758
6759 save = REG_RD(bp, BNX2_MISC_CFG);
6760 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6761
6762 for (i = 0; i < (data * 2); i++) {
6763 if ((i % 2) == 0) {
6764 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6765 }
6766 else {
6767 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6768 BNX2_EMAC_LED_1000MB_OVERRIDE |
6769 BNX2_EMAC_LED_100MB_OVERRIDE |
6770 BNX2_EMAC_LED_10MB_OVERRIDE |
6771 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6772 BNX2_EMAC_LED_TRAFFIC);
6773 }
6774 msleep_interruptible(500);
6775 if (signal_pending(current))
6776 break;
6777 }
6778 REG_WR(bp, BNX2_EMAC_LED, 0);
6779 REG_WR(bp, BNX2_MISC_CFG, save);
6780 return 0;
6781 }
6782
6783 static int
6784 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6785 {
6786 struct bnx2 *bp = netdev_priv(dev);
6787
6788 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6789 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6790 else
6791 return (ethtool_op_set_tx_csum(dev, data));
6792 }
6793
6794 static const struct ethtool_ops bnx2_ethtool_ops = {
6795 .get_settings = bnx2_get_settings,
6796 .set_settings = bnx2_set_settings,
6797 .get_drvinfo = bnx2_get_drvinfo,
6798 .get_regs_len = bnx2_get_regs_len,
6799 .get_regs = bnx2_get_regs,
6800 .get_wol = bnx2_get_wol,
6801 .set_wol = bnx2_set_wol,
6802 .nway_reset = bnx2_nway_reset,
6803 .get_link = ethtool_op_get_link,
6804 .get_eeprom_len = bnx2_get_eeprom_len,
6805 .get_eeprom = bnx2_get_eeprom,
6806 .set_eeprom = bnx2_set_eeprom,
6807 .get_coalesce = bnx2_get_coalesce,
6808 .set_coalesce = bnx2_set_coalesce,
6809 .get_ringparam = bnx2_get_ringparam,
6810 .set_ringparam = bnx2_set_ringparam,
6811 .get_pauseparam = bnx2_get_pauseparam,
6812 .set_pauseparam = bnx2_set_pauseparam,
6813 .get_rx_csum = bnx2_get_rx_csum,
6814 .set_rx_csum = bnx2_set_rx_csum,
6815 .set_tx_csum = bnx2_set_tx_csum,
6816 .set_sg = ethtool_op_set_sg,
6817 .set_tso = bnx2_set_tso,
6818 .self_test = bnx2_self_test,
6819 .get_strings = bnx2_get_strings,
6820 .phys_id = bnx2_phys_id,
6821 .get_ethtool_stats = bnx2_get_ethtool_stats,
6822 .get_sset_count = bnx2_get_sset_count,
6823 };
6824
6825 /* Called with rtnl_lock */
6826 static int
6827 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6828 {
6829 struct mii_ioctl_data *data = if_mii(ifr);
6830 struct bnx2 *bp = netdev_priv(dev);
6831 int err;
6832
6833 switch(cmd) {
6834 case SIOCGMIIPHY:
6835 data->phy_id = bp->phy_addr;
6836
6837 /* fallthru */
6838 case SIOCGMIIREG: {
6839 u32 mii_regval;
6840
6841 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6842 return -EOPNOTSUPP;
6843
6844 if (!netif_running(dev))
6845 return -EAGAIN;
6846
6847 spin_lock_bh(&bp->phy_lock);
6848 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6849 spin_unlock_bh(&bp->phy_lock);
6850
6851 data->val_out = mii_regval;
6852
6853 return err;
6854 }
6855
6856 case SIOCSMIIREG:
6857 if (!capable(CAP_NET_ADMIN))
6858 return -EPERM;
6859
6860 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6861 return -EOPNOTSUPP;
6862
6863 if (!netif_running(dev))
6864 return -EAGAIN;
6865
6866 spin_lock_bh(&bp->phy_lock);
6867 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6868 spin_unlock_bh(&bp->phy_lock);
6869
6870 return err;
6871
6872 default:
6873 /* do nothing */
6874 break;
6875 }
6876 return -EOPNOTSUPP;
6877 }
6878
6879 /* Called with rtnl_lock */
6880 static int
6881 bnx2_change_mac_addr(struct net_device *dev, void *p)
6882 {
6883 struct sockaddr *addr = p;
6884 struct bnx2 *bp = netdev_priv(dev);
6885
6886 if (!is_valid_ether_addr(addr->sa_data))
6887 return -EINVAL;
6888
6889 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6890 if (netif_running(dev))
6891 bnx2_set_mac_addr(bp);
6892
6893 return 0;
6894 }
6895
6896 /* Called with rtnl_lock */
6897 static int
6898 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6899 {
6900 struct bnx2 *bp = netdev_priv(dev);
6901
6902 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6903 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6904 return -EINVAL;
6905
6906 dev->mtu = new_mtu;
6907 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6908 }
6909
6910 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6911 static void
6912 poll_bnx2(struct net_device *dev)
6913 {
6914 struct bnx2 *bp = netdev_priv(dev);
6915
6916 disable_irq(bp->pdev->irq);
6917 bnx2_interrupt(bp->pdev->irq, dev);
6918 enable_irq(bp->pdev->irq);
6919 }
6920 #endif
6921
6922 static void __devinit
6923 bnx2_get_5709_media(struct bnx2 *bp)
6924 {
6925 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6926 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6927 u32 strap;
6928
6929 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6930 return;
6931 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6932 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6933 return;
6934 }
6935
6936 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6937 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6938 else
6939 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6940
6941 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6942 switch (strap) {
6943 case 0x4:
6944 case 0x5:
6945 case 0x6:
6946 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6947 return;
6948 }
6949 } else {
6950 switch (strap) {
6951 case 0x1:
6952 case 0x2:
6953 case 0x4:
6954 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6955 return;
6956 }
6957 }
6958 }
6959
6960 static void __devinit
6961 bnx2_get_pci_speed(struct bnx2 *bp)
6962 {
6963 u32 reg;
6964
6965 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6966 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6967 u32 clkreg;
6968
6969 bp->flags |= BNX2_FLAG_PCIX;
6970
6971 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6972
6973 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6974 switch (clkreg) {
6975 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6976 bp->bus_speed_mhz = 133;
6977 break;
6978
6979 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6980 bp->bus_speed_mhz = 100;
6981 break;
6982
6983 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6984 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6985 bp->bus_speed_mhz = 66;
6986 break;
6987
6988 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6989 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6990 bp->bus_speed_mhz = 50;
6991 break;
6992
6993 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6994 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6995 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6996 bp->bus_speed_mhz = 33;
6997 break;
6998 }
6999 }
7000 else {
7001 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7002 bp->bus_speed_mhz = 66;
7003 else
7004 bp->bus_speed_mhz = 33;
7005 }
7006
7007 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7008 bp->flags |= BNX2_FLAG_PCI_32BIT;
7009
7010 }
7011
7012 static int __devinit
7013 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7014 {
7015 struct bnx2 *bp;
7016 unsigned long mem_len;
7017 int rc, i, j;
7018 u32 reg;
7019 u64 dma_mask, persist_dma_mask;
7020
7021 SET_NETDEV_DEV(dev, &pdev->dev);
7022 bp = netdev_priv(dev);
7023
7024 bp->flags = 0;
7025 bp->phy_flags = 0;
7026
7027 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7028 rc = pci_enable_device(pdev);
7029 if (rc) {
7030 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7031 goto err_out;
7032 }
7033
7034 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7035 dev_err(&pdev->dev,
7036 "Cannot find PCI device base address, aborting.\n");
7037 rc = -ENODEV;
7038 goto err_out_disable;
7039 }
7040
7041 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7042 if (rc) {
7043 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7044 goto err_out_disable;
7045 }
7046
7047 pci_set_master(pdev);
7048
7049 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7050 if (bp->pm_cap == 0) {
7051 dev_err(&pdev->dev,
7052 "Cannot find power management capability, aborting.\n");
7053 rc = -EIO;
7054 goto err_out_release;
7055 }
7056
7057 bp->dev = dev;
7058 bp->pdev = pdev;
7059
7060 spin_lock_init(&bp->phy_lock);
7061 spin_lock_init(&bp->indirect_lock);
7062 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7063
7064 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7065 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7066 dev->mem_end = dev->mem_start + mem_len;
7067 dev->irq = pdev->irq;
7068
7069 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7070
7071 if (!bp->regview) {
7072 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7073 rc = -ENOMEM;
7074 goto err_out_release;
7075 }
7076
7077 /* Configure byte swap and enable write to the reg_window registers.
7078 * Rely on CPU to do target byte swapping on big endian systems
7079 * The chip's target access swapping will not swap all accesses
7080 */
7081 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7082 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7083 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7084
7085 bnx2_set_power_state(bp, PCI_D0);
7086
7087 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7088
7089 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7090 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7091 dev_err(&pdev->dev,
7092 "Cannot find PCIE capability, aborting.\n");
7093 rc = -EIO;
7094 goto err_out_unmap;
7095 }
7096 bp->flags |= BNX2_FLAG_PCIE;
7097 if (CHIP_REV(bp) == CHIP_REV_Ax)
7098 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7099 } else {
7100 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7101 if (bp->pcix_cap == 0) {
7102 dev_err(&pdev->dev,
7103 "Cannot find PCIX capability, aborting.\n");
7104 rc = -EIO;
7105 goto err_out_unmap;
7106 }
7107 }
7108
7109 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7110 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7111 bp->flags |= BNX2_FLAG_MSIX_CAP;
7112 }
7113
7114 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7115 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7116 bp->flags |= BNX2_FLAG_MSI_CAP;
7117 }
7118
7119 /* 5708 cannot support DMA addresses > 40-bit. */
7120 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7121 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7122 else
7123 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7124
7125 /* Configure DMA attributes. */
7126 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7127 dev->features |= NETIF_F_HIGHDMA;
7128 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7129 if (rc) {
7130 dev_err(&pdev->dev,
7131 "pci_set_consistent_dma_mask failed, aborting.\n");
7132 goto err_out_unmap;
7133 }
7134 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7135 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7136 goto err_out_unmap;
7137 }
7138
7139 if (!(bp->flags & BNX2_FLAG_PCIE))
7140 bnx2_get_pci_speed(bp);
7141
7142 /* 5706A0 may falsely detect SERR and PERR. */
7143 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7144 reg = REG_RD(bp, PCI_COMMAND);
7145 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7146 REG_WR(bp, PCI_COMMAND, reg);
7147 }
7148 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7149 !(bp->flags & BNX2_FLAG_PCIX)) {
7150
7151 dev_err(&pdev->dev,
7152 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7153 goto err_out_unmap;
7154 }
7155
7156 bnx2_init_nvram(bp);
7157
7158 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
7159
7160 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7161 BNX2_SHM_HDR_SIGNATURE_SIG) {
7162 u32 off = PCI_FUNC(pdev->devfn) << 2;
7163
7164 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
7165 } else
7166 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7167
7168 /* Get the permanent MAC address. First we need to make sure the
7169 * firmware is actually running.
7170 */
7171 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
7172
7173 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7174 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7175 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7176 rc = -ENODEV;
7177 goto err_out_unmap;
7178 }
7179
7180 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
7181 for (i = 0, j = 0; i < 3; i++) {
7182 u8 num, k, skip0;
7183
7184 num = (u8) (reg >> (24 - (i * 8)));
7185 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7186 if (num >= k || !skip0 || k == 1) {
7187 bp->fw_version[j++] = (num / k) + '0';
7188 skip0 = 0;
7189 }
7190 }
7191 if (i != 2)
7192 bp->fw_version[j++] = '.';
7193 }
7194 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
7195 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7196 bp->wol = 1;
7197
7198 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7199 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7200
7201 for (i = 0; i < 30; i++) {
7202 reg = REG_RD_IND(bp, bp->shmem_base +
7203 BNX2_BC_STATE_CONDITION);
7204 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7205 break;
7206 msleep(10);
7207 }
7208 }
7209 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
7210 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7211 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7212 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7213 int i;
7214 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
7215
7216 bp->fw_version[j++] = ' ';
7217 for (i = 0; i < 3; i++) {
7218 reg = REG_RD_IND(bp, addr + i * 4);
7219 reg = swab32(reg);
7220 memcpy(&bp->fw_version[j], &reg, 4);
7221 j += 4;
7222 }
7223 }
7224
7225 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
7226 bp->mac_addr[0] = (u8) (reg >> 8);
7227 bp->mac_addr[1] = (u8) reg;
7228
7229 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
7230 bp->mac_addr[2] = (u8) (reg >> 24);
7231 bp->mac_addr[3] = (u8) (reg >> 16);
7232 bp->mac_addr[4] = (u8) (reg >> 8);
7233 bp->mac_addr[5] = (u8) reg;
7234
7235 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7236
7237 bp->tx_ring_size = MAX_TX_DESC_CNT;
7238 bnx2_set_rx_ring_size(bp, 255);
7239
7240 bp->rx_csum = 1;
7241
7242 bp->tx_quick_cons_trip_int = 20;
7243 bp->tx_quick_cons_trip = 20;
7244 bp->tx_ticks_int = 80;
7245 bp->tx_ticks = 80;
7246
7247 bp->rx_quick_cons_trip_int = 6;
7248 bp->rx_quick_cons_trip = 6;
7249 bp->rx_ticks_int = 18;
7250 bp->rx_ticks = 18;
7251
7252 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7253
7254 bp->timer_interval = HZ;
7255 bp->current_interval = HZ;
7256
7257 bp->phy_addr = 1;
7258
7259 /* Disable WOL support if we are running on a SERDES chip. */
7260 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7261 bnx2_get_5709_media(bp);
7262 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7263 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7264
7265 bp->phy_port = PORT_TP;
7266 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7267 bp->phy_port = PORT_FIBRE;
7268 reg = REG_RD_IND(bp, bp->shmem_base +
7269 BNX2_SHARED_HW_CFG_CONFIG);
7270 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7271 bp->flags |= BNX2_FLAG_NO_WOL;
7272 bp->wol = 0;
7273 }
7274 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7275 bp->phy_addr = 2;
7276 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7277 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7278 }
7279 bnx2_init_remote_phy(bp);
7280
7281 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7282 CHIP_NUM(bp) == CHIP_NUM_5708)
7283 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7284 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7285 (CHIP_REV(bp) == CHIP_REV_Ax ||
7286 CHIP_REV(bp) == CHIP_REV_Bx))
7287 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7288
7289 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7290 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7291 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7292 bp->flags |= BNX2_FLAG_NO_WOL;
7293 bp->wol = 0;
7294 }
7295
7296 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7297 bp->tx_quick_cons_trip_int =
7298 bp->tx_quick_cons_trip;
7299 bp->tx_ticks_int = bp->tx_ticks;
7300 bp->rx_quick_cons_trip_int =
7301 bp->rx_quick_cons_trip;
7302 bp->rx_ticks_int = bp->rx_ticks;
7303 bp->comp_prod_trip_int = bp->comp_prod_trip;
7304 bp->com_ticks_int = bp->com_ticks;
7305 bp->cmd_ticks_int = bp->cmd_ticks;
7306 }
7307
7308 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7309 *
7310 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7311 * with byte enables disabled on the unused 32-bit word. This is legal
7312 * but causes problems on the AMD 8132 which will eventually stop
7313 * responding after a while.
7314 *
7315 * AMD believes this incompatibility is unique to the 5706, and
7316 * prefers to locally disable MSI rather than globally disabling it.
7317 */
7318 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7319 struct pci_dev *amd_8132 = NULL;
7320
7321 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7322 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7323 amd_8132))) {
7324
7325 if (amd_8132->revision >= 0x10 &&
7326 amd_8132->revision <= 0x13) {
7327 disable_msi = 1;
7328 pci_dev_put(amd_8132);
7329 break;
7330 }
7331 }
7332 }
7333
7334 bnx2_set_default_link(bp);
7335 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7336
7337 init_timer(&bp->timer);
7338 bp->timer.expires = RUN_AT(bp->timer_interval);
7339 bp->timer.data = (unsigned long) bp;
7340 bp->timer.function = bnx2_timer;
7341
7342 return 0;
7343
7344 err_out_unmap:
7345 if (bp->regview) {
7346 iounmap(bp->regview);
7347 bp->regview = NULL;
7348 }
7349
7350 err_out_release:
7351 pci_release_regions(pdev);
7352
7353 err_out_disable:
7354 pci_disable_device(pdev);
7355 pci_set_drvdata(pdev, NULL);
7356
7357 err_out:
7358 return rc;
7359 }
7360
7361 static char * __devinit
7362 bnx2_bus_string(struct bnx2 *bp, char *str)
7363 {
7364 char *s = str;
7365
7366 if (bp->flags & BNX2_FLAG_PCIE) {
7367 s += sprintf(s, "PCI Express");
7368 } else {
7369 s += sprintf(s, "PCI");
7370 if (bp->flags & BNX2_FLAG_PCIX)
7371 s += sprintf(s, "-X");
7372 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7373 s += sprintf(s, " 32-bit");
7374 else
7375 s += sprintf(s, " 64-bit");
7376 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7377 }
7378 return str;
7379 }
7380
7381 static void __devinit
7382 bnx2_init_napi(struct bnx2 *bp)
7383 {
7384 int i;
7385 struct bnx2_napi *bnapi;
7386
7387 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7388 bnapi = &bp->bnx2_napi[i];
7389 bnapi->bp = bp;
7390 }
7391 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7392 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7393 64);
7394 }
7395
7396 static int __devinit
7397 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7398 {
7399 static int version_printed = 0;
7400 struct net_device *dev = NULL;
7401 struct bnx2 *bp;
7402 int rc;
7403 char str[40];
7404 DECLARE_MAC_BUF(mac);
7405
7406 if (version_printed++ == 0)
7407 printk(KERN_INFO "%s", version);
7408
7409 /* dev zeroed in init_etherdev */
7410 dev = alloc_etherdev(sizeof(*bp));
7411
7412 if (!dev)
7413 return -ENOMEM;
7414
7415 rc = bnx2_init_board(pdev, dev);
7416 if (rc < 0) {
7417 free_netdev(dev);
7418 return rc;
7419 }
7420
7421 dev->open = bnx2_open;
7422 dev->hard_start_xmit = bnx2_start_xmit;
7423 dev->stop = bnx2_close;
7424 dev->get_stats = bnx2_get_stats;
7425 dev->set_multicast_list = bnx2_set_rx_mode;
7426 dev->do_ioctl = bnx2_ioctl;
7427 dev->set_mac_address = bnx2_change_mac_addr;
7428 dev->change_mtu = bnx2_change_mtu;
7429 dev->tx_timeout = bnx2_tx_timeout;
7430 dev->watchdog_timeo = TX_TIMEOUT;
7431 #ifdef BCM_VLAN
7432 dev->vlan_rx_register = bnx2_vlan_rx_register;
7433 #endif
7434 dev->ethtool_ops = &bnx2_ethtool_ops;
7435
7436 bp = netdev_priv(dev);
7437 bnx2_init_napi(bp);
7438
7439 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7440 dev->poll_controller = poll_bnx2;
7441 #endif
7442
7443 pci_set_drvdata(pdev, dev);
7444
7445 memcpy(dev->dev_addr, bp->mac_addr, 6);
7446 memcpy(dev->perm_addr, bp->mac_addr, 6);
7447 bp->name = board_info[ent->driver_data].name;
7448
7449 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7450 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7451 dev->features |= NETIF_F_IPV6_CSUM;
7452
7453 #ifdef BCM_VLAN
7454 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7455 #endif
7456 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7457 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7458 dev->features |= NETIF_F_TSO6;
7459
7460 if ((rc = register_netdev(dev))) {
7461 dev_err(&pdev->dev, "Cannot register net device\n");
7462 if (bp->regview)
7463 iounmap(bp->regview);
7464 pci_release_regions(pdev);
7465 pci_disable_device(pdev);
7466 pci_set_drvdata(pdev, NULL);
7467 free_netdev(dev);
7468 return rc;
7469 }
7470
7471 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7472 "IRQ %d, node addr %s\n",
7473 dev->name,
7474 bp->name,
7475 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7476 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7477 bnx2_bus_string(bp, str),
7478 dev->base_addr,
7479 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7480
7481 return 0;
7482 }
7483
7484 static void __devexit
7485 bnx2_remove_one(struct pci_dev *pdev)
7486 {
7487 struct net_device *dev = pci_get_drvdata(pdev);
7488 struct bnx2 *bp = netdev_priv(dev);
7489
7490 flush_scheduled_work();
7491
7492 unregister_netdev(dev);
7493
7494 if (bp->regview)
7495 iounmap(bp->regview);
7496
7497 free_netdev(dev);
7498 pci_release_regions(pdev);
7499 pci_disable_device(pdev);
7500 pci_set_drvdata(pdev, NULL);
7501 }
7502
7503 static int
7504 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7505 {
7506 struct net_device *dev = pci_get_drvdata(pdev);
7507 struct bnx2 *bp = netdev_priv(dev);
7508 u32 reset_code;
7509
7510 /* PCI register 4 needs to be saved whether netif_running() or not.
7511 * MSI address and data need to be saved if using MSI and
7512 * netif_running().
7513 */
7514 pci_save_state(pdev);
7515 if (!netif_running(dev))
7516 return 0;
7517
7518 flush_scheduled_work();
7519 bnx2_netif_stop(bp);
7520 netif_device_detach(dev);
7521 del_timer_sync(&bp->timer);
7522 if (bp->flags & BNX2_FLAG_NO_WOL)
7523 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7524 else if (bp->wol)
7525 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7526 else
7527 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7528 bnx2_reset_chip(bp, reset_code);
7529 bnx2_free_skbs(bp);
7530 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7531 return 0;
7532 }
7533
7534 static int
7535 bnx2_resume(struct pci_dev *pdev)
7536 {
7537 struct net_device *dev = pci_get_drvdata(pdev);
7538 struct bnx2 *bp = netdev_priv(dev);
7539
7540 pci_restore_state(pdev);
7541 if (!netif_running(dev))
7542 return 0;
7543
7544 bnx2_set_power_state(bp, PCI_D0);
7545 netif_device_attach(dev);
7546 bnx2_init_nic(bp);
7547 bnx2_netif_start(bp);
7548 return 0;
7549 }
7550
7551 static struct pci_driver bnx2_pci_driver = {
7552 .name = DRV_MODULE_NAME,
7553 .id_table = bnx2_pci_tbl,
7554 .probe = bnx2_init_one,
7555 .remove = __devexit_p(bnx2_remove_one),
7556 .suspend = bnx2_suspend,
7557 .resume = bnx2_resume,
7558 };
7559
7560 static int __init bnx2_init(void)
7561 {
7562 return pci_register_driver(&bnx2_pci_driver);
7563 }
7564
7565 static void __exit bnx2_cleanup(void)
7566 {
7567 pci_unregister_driver(&bnx2_pci_driver);
7568 }
7569
7570 module_init(bnx2_init);
7571 module_exit(bnx2_cleanup);
7572
7573
7574
This page took 0.820551 seconds and 5 git commands to generate.