fe575b9a9b73d7ce8d5d9a1130667947449408c8
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2008 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50 #include <linux/log2.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
55
56 #define FW_BUF_SIZE 0x10000
57
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.9.0"
61 #define DRV_MODULE_RELDATE "Dec 16, 2008"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
67
68 static char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
87 BCM5708,
88 BCM5708S,
89 BCM5709,
90 BCM5709S,
91 BCM5716,
92 BCM5716S,
93 } board_t;
94
95 /* indexed by board_t, above */
96 static struct {
97 char *name;
98 } board_info[] __devinitdata = {
99 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
100 { "HP NC370T Multifunction Gigabit Server Adapter" },
101 { "HP NC370i Multifunction Gigabit Server Adapter" },
102 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
103 { "HP NC370F Multifunction Gigabit Server Adapter" },
104 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
105 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
106 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
107 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
108 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
110 };
111
112 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
131 { PCI_VENDOR_ID_BROADCOM, 0x163b,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133 { PCI_VENDOR_ID_BROADCOM, 0x163c,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
135 { 0, }
136 };
137
138 static struct flash_spec flash_table[] =
139 {
140 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
141 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
142 /* Slow EEPROM */
143 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
144 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
145 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
146 "EEPROM - slow"},
147 /* Expansion entry 0001 */
148 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
149 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0001"},
152 /* Saifun SA25F010 (non-buffered flash) */
153 /* strap, cfg1, & write1 need updates */
154 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
157 "Non-buffered flash (128kB)"},
158 /* Saifun SA25F020 (non-buffered flash) */
159 /* strap, cfg1, & write1 need updates */
160 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
163 "Non-buffered flash (256kB)"},
164 /* Expansion entry 0100 */
165 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
168 "Entry 0100"},
169 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
170 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
173 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
174 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
175 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
178 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
179 /* Saifun SA25F005 (non-buffered flash) */
180 /* strap, cfg1, & write1 need updates */
181 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
184 "Non-buffered flash (64kB)"},
185 /* Fast EEPROM */
186 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
187 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
188 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
189 "EEPROM - fast"},
190 /* Expansion entry 1001 */
191 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 "Entry 1001"},
195 /* Expansion entry 1010 */
196 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1010"},
200 /* ATMEL AT45DB011B (buffered flash) */
201 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
202 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
204 "Buffered flash (128kB)"},
205 /* Expansion entry 1100 */
206 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1100"},
210 /* Expansion entry 1101 */
211 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1101"},
215 /* Ateml Expansion entry 1110 */
216 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
217 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1110 (Atmel)"},
220 /* ATMEL AT45DB021B (buffered flash) */
221 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
222 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
224 "Buffered flash (256kB)"},
225 };
226
227 static struct flash_spec flash_5709 = {
228 .flags = BNX2_NV_BUFFERED,
229 .page_bits = BCM5709_FLASH_PAGE_BITS,
230 .page_size = BCM5709_FLASH_PAGE_SIZE,
231 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
232 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
233 .name = "5709 Buffered flash (256kB)",
234 };
235
236 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
237
238 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
239 {
240 u32 diff;
241
242 smp_mb();
243
244 /* The ring uses 256 indices for 255 entries, one of them
245 * needs to be skipped.
246 */
247 diff = txr->tx_prod - txr->tx_cons;
248 if (unlikely(diff >= TX_DESC_CNT)) {
249 diff &= 0xffff;
250 if (diff == TX_DESC_CNT)
251 diff = MAX_TX_DESC_CNT;
252 }
253 return (bp->tx_ring_size - diff);
254 }
255
256 static u32
257 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
258 {
259 u32 val;
260
261 spin_lock_bh(&bp->indirect_lock);
262 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
263 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
264 spin_unlock_bh(&bp->indirect_lock);
265 return val;
266 }
267
268 static void
269 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
270 {
271 spin_lock_bh(&bp->indirect_lock);
272 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
274 spin_unlock_bh(&bp->indirect_lock);
275 }
276
277 static void
278 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
279 {
280 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
281 }
282
283 static u32
284 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
285 {
286 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
287 }
288
289 static void
290 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
291 {
292 offset += cid_addr;
293 spin_lock_bh(&bp->indirect_lock);
294 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
295 int i;
296
297 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
298 REG_WR(bp, BNX2_CTX_CTX_CTRL,
299 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
300 for (i = 0; i < 5; i++) {
301 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
302 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303 break;
304 udelay(5);
305 }
306 } else {
307 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
308 REG_WR(bp, BNX2_CTX_DATA, val);
309 }
310 spin_unlock_bh(&bp->indirect_lock);
311 }
312
313 static int
314 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
315 {
316 u32 val1;
317 int i, ret;
318
319 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
320 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
322
323 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
324 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325
326 udelay(40);
327 }
328
329 val1 = (bp->phy_addr << 21) | (reg << 16) |
330 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
331 BNX2_EMAC_MDIO_COMM_START_BUSY;
332 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
333
334 for (i = 0; i < 50; i++) {
335 udelay(10);
336
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
339 udelay(5);
340
341 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
342 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
343
344 break;
345 }
346 }
347
348 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 *val = 0x0;
350 ret = -EBUSY;
351 }
352 else {
353 *val = val1;
354 ret = 0;
355 }
356
357 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
358 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360
361 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
362 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363
364 udelay(40);
365 }
366
367 return ret;
368 }
369
370 static int
371 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
372 {
373 u32 val1;
374 int i, ret;
375
376 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
377 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
379
380 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
381 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
382
383 udelay(40);
384 }
385
386 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
387 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
388 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
389 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
390
391 for (i = 0; i < 50; i++) {
392 udelay(10);
393
394 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
395 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
396 udelay(5);
397 break;
398 }
399 }
400
401 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402 ret = -EBUSY;
403 else
404 ret = 0;
405
406 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
407 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
409
410 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
411 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412
413 udelay(40);
414 }
415
416 return ret;
417 }
418
419 static void
420 bnx2_disable_int(struct bnx2 *bp)
421 {
422 int i;
423 struct bnx2_napi *bnapi;
424
425 for (i = 0; i < bp->irq_nvecs; i++) {
426 bnapi = &bp->bnx2_napi[i];
427 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
429 }
430 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
431 }
432
433 static void
434 bnx2_enable_int(struct bnx2 *bp)
435 {
436 int i;
437 struct bnx2_napi *bnapi;
438
439 for (i = 0; i < bp->irq_nvecs; i++) {
440 bnapi = &bp->bnx2_napi[i];
441
442 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
443 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
444 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
445 bnapi->last_status_idx);
446
447 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449 bnapi->last_status_idx);
450 }
451 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
452 }
453
454 static void
455 bnx2_disable_int_sync(struct bnx2 *bp)
456 {
457 int i;
458
459 atomic_inc(&bp->intr_sem);
460 bnx2_disable_int(bp);
461 for (i = 0; i < bp->irq_nvecs; i++)
462 synchronize_irq(bp->irq_tbl[i].vector);
463 }
464
465 static void
466 bnx2_napi_disable(struct bnx2 *bp)
467 {
468 int i;
469
470 for (i = 0; i < bp->irq_nvecs; i++)
471 napi_disable(&bp->bnx2_napi[i].napi);
472 }
473
474 static void
475 bnx2_napi_enable(struct bnx2 *bp)
476 {
477 int i;
478
479 for (i = 0; i < bp->irq_nvecs; i++)
480 napi_enable(&bp->bnx2_napi[i].napi);
481 }
482
483 static void
484 bnx2_netif_stop(struct bnx2 *bp)
485 {
486 bnx2_disable_int_sync(bp);
487 if (netif_running(bp->dev)) {
488 bnx2_napi_disable(bp);
489 netif_tx_disable(bp->dev);
490 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491 }
492 }
493
494 static void
495 bnx2_netif_start(struct bnx2 *bp)
496 {
497 if (atomic_dec_and_test(&bp->intr_sem)) {
498 if (netif_running(bp->dev)) {
499 netif_tx_wake_all_queues(bp->dev);
500 bnx2_napi_enable(bp);
501 bnx2_enable_int(bp);
502 }
503 }
504 }
505
506 static void
507 bnx2_free_tx_mem(struct bnx2 *bp)
508 {
509 int i;
510
511 for (i = 0; i < bp->num_tx_rings; i++) {
512 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
513 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
514
515 if (txr->tx_desc_ring) {
516 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
517 txr->tx_desc_ring,
518 txr->tx_desc_mapping);
519 txr->tx_desc_ring = NULL;
520 }
521 kfree(txr->tx_buf_ring);
522 txr->tx_buf_ring = NULL;
523 }
524 }
525
526 static void
527 bnx2_free_rx_mem(struct bnx2 *bp)
528 {
529 int i;
530
531 for (i = 0; i < bp->num_rx_rings; i++) {
532 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
533 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
534 int j;
535
536 for (j = 0; j < bp->rx_max_ring; j++) {
537 if (rxr->rx_desc_ring[j])
538 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
539 rxr->rx_desc_ring[j],
540 rxr->rx_desc_mapping[j]);
541 rxr->rx_desc_ring[j] = NULL;
542 }
543 if (rxr->rx_buf_ring)
544 vfree(rxr->rx_buf_ring);
545 rxr->rx_buf_ring = NULL;
546
547 for (j = 0; j < bp->rx_max_pg_ring; j++) {
548 if (rxr->rx_pg_desc_ring[j])
549 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
550 rxr->rx_pg_desc_ring[j],
551 rxr->rx_pg_desc_mapping[j]);
552 rxr->rx_pg_desc_ring[j] = NULL;
553 }
554 if (rxr->rx_pg_ring)
555 vfree(rxr->rx_pg_ring);
556 rxr->rx_pg_ring = NULL;
557 }
558 }
559
560 static int
561 bnx2_alloc_tx_mem(struct bnx2 *bp)
562 {
563 int i;
564
565 for (i = 0; i < bp->num_tx_rings; i++) {
566 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
567 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
568
569 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
570 if (txr->tx_buf_ring == NULL)
571 return -ENOMEM;
572
573 txr->tx_desc_ring =
574 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
575 &txr->tx_desc_mapping);
576 if (txr->tx_desc_ring == NULL)
577 return -ENOMEM;
578 }
579 return 0;
580 }
581
582 static int
583 bnx2_alloc_rx_mem(struct bnx2 *bp)
584 {
585 int i;
586
587 for (i = 0; i < bp->num_rx_rings; i++) {
588 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
589 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
590 int j;
591
592 rxr->rx_buf_ring =
593 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
594 if (rxr->rx_buf_ring == NULL)
595 return -ENOMEM;
596
597 memset(rxr->rx_buf_ring, 0,
598 SW_RXBD_RING_SIZE * bp->rx_max_ring);
599
600 for (j = 0; j < bp->rx_max_ring; j++) {
601 rxr->rx_desc_ring[j] =
602 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
603 &rxr->rx_desc_mapping[j]);
604 if (rxr->rx_desc_ring[j] == NULL)
605 return -ENOMEM;
606
607 }
608
609 if (bp->rx_pg_ring_size) {
610 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
611 bp->rx_max_pg_ring);
612 if (rxr->rx_pg_ring == NULL)
613 return -ENOMEM;
614
615 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
616 bp->rx_max_pg_ring);
617 }
618
619 for (j = 0; j < bp->rx_max_pg_ring; j++) {
620 rxr->rx_pg_desc_ring[j] =
621 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
622 &rxr->rx_pg_desc_mapping[j]);
623 if (rxr->rx_pg_desc_ring[j] == NULL)
624 return -ENOMEM;
625
626 }
627 }
628 return 0;
629 }
630
631 static void
632 bnx2_free_mem(struct bnx2 *bp)
633 {
634 int i;
635 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
636
637 bnx2_free_tx_mem(bp);
638 bnx2_free_rx_mem(bp);
639
640 for (i = 0; i < bp->ctx_pages; i++) {
641 if (bp->ctx_blk[i]) {
642 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
643 bp->ctx_blk[i],
644 bp->ctx_blk_mapping[i]);
645 bp->ctx_blk[i] = NULL;
646 }
647 }
648 if (bnapi->status_blk.msi) {
649 pci_free_consistent(bp->pdev, bp->status_stats_size,
650 bnapi->status_blk.msi,
651 bp->status_blk_mapping);
652 bnapi->status_blk.msi = NULL;
653 bp->stats_blk = NULL;
654 }
655 }
656
657 static int
658 bnx2_alloc_mem(struct bnx2 *bp)
659 {
660 int i, status_blk_size, err;
661 struct bnx2_napi *bnapi;
662 void *status_blk;
663
664 /* Combine status and statistics blocks into one allocation. */
665 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
666 if (bp->flags & BNX2_FLAG_MSIX_CAP)
667 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
668 BNX2_SBLK_MSIX_ALIGN_SIZE);
669 bp->status_stats_size = status_blk_size +
670 sizeof(struct statistics_block);
671
672 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
673 &bp->status_blk_mapping);
674 if (status_blk == NULL)
675 goto alloc_mem_err;
676
677 memset(status_blk, 0, bp->status_stats_size);
678
679 bnapi = &bp->bnx2_napi[0];
680 bnapi->status_blk.msi = status_blk;
681 bnapi->hw_tx_cons_ptr =
682 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
683 bnapi->hw_rx_cons_ptr =
684 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
685 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
686 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
687 struct status_block_msix *sblk;
688
689 bnapi = &bp->bnx2_napi[i];
690
691 sblk = (void *) (status_blk +
692 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
693 bnapi->status_blk.msix = sblk;
694 bnapi->hw_tx_cons_ptr =
695 &sblk->status_tx_quick_consumer_index;
696 bnapi->hw_rx_cons_ptr =
697 &sblk->status_rx_quick_consumer_index;
698 bnapi->int_num = i << 24;
699 }
700 }
701
702 bp->stats_blk = status_blk + status_blk_size;
703
704 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
705
706 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
707 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
708 if (bp->ctx_pages == 0)
709 bp->ctx_pages = 1;
710 for (i = 0; i < bp->ctx_pages; i++) {
711 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
712 BCM_PAGE_SIZE,
713 &bp->ctx_blk_mapping[i]);
714 if (bp->ctx_blk[i] == NULL)
715 goto alloc_mem_err;
716 }
717 }
718
719 err = bnx2_alloc_rx_mem(bp);
720 if (err)
721 goto alloc_mem_err;
722
723 err = bnx2_alloc_tx_mem(bp);
724 if (err)
725 goto alloc_mem_err;
726
727 return 0;
728
729 alloc_mem_err:
730 bnx2_free_mem(bp);
731 return -ENOMEM;
732 }
733
734 static void
735 bnx2_report_fw_link(struct bnx2 *bp)
736 {
737 u32 fw_link_status = 0;
738
739 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
740 return;
741
742 if (bp->link_up) {
743 u32 bmsr;
744
745 switch (bp->line_speed) {
746 case SPEED_10:
747 if (bp->duplex == DUPLEX_HALF)
748 fw_link_status = BNX2_LINK_STATUS_10HALF;
749 else
750 fw_link_status = BNX2_LINK_STATUS_10FULL;
751 break;
752 case SPEED_100:
753 if (bp->duplex == DUPLEX_HALF)
754 fw_link_status = BNX2_LINK_STATUS_100HALF;
755 else
756 fw_link_status = BNX2_LINK_STATUS_100FULL;
757 break;
758 case SPEED_1000:
759 if (bp->duplex == DUPLEX_HALF)
760 fw_link_status = BNX2_LINK_STATUS_1000HALF;
761 else
762 fw_link_status = BNX2_LINK_STATUS_1000FULL;
763 break;
764 case SPEED_2500:
765 if (bp->duplex == DUPLEX_HALF)
766 fw_link_status = BNX2_LINK_STATUS_2500HALF;
767 else
768 fw_link_status = BNX2_LINK_STATUS_2500FULL;
769 break;
770 }
771
772 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
773
774 if (bp->autoneg) {
775 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
776
777 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
778 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
779
780 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
781 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
782 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
783 else
784 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
785 }
786 }
787 else
788 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
789
790 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
791 }
792
793 static char *
794 bnx2_xceiver_str(struct bnx2 *bp)
795 {
796 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
797 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
798 "Copper"));
799 }
800
801 static void
802 bnx2_report_link(struct bnx2 *bp)
803 {
804 if (bp->link_up) {
805 netif_carrier_on(bp->dev);
806 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
807 bnx2_xceiver_str(bp));
808
809 printk("%d Mbps ", bp->line_speed);
810
811 if (bp->duplex == DUPLEX_FULL)
812 printk("full duplex");
813 else
814 printk("half duplex");
815
816 if (bp->flow_ctrl) {
817 if (bp->flow_ctrl & FLOW_CTRL_RX) {
818 printk(", receive ");
819 if (bp->flow_ctrl & FLOW_CTRL_TX)
820 printk("& transmit ");
821 }
822 else {
823 printk(", transmit ");
824 }
825 printk("flow control ON");
826 }
827 printk("\n");
828 }
829 else {
830 netif_carrier_off(bp->dev);
831 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
832 bnx2_xceiver_str(bp));
833 }
834
835 bnx2_report_fw_link(bp);
836 }
837
838 static void
839 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
840 {
841 u32 local_adv, remote_adv;
842
843 bp->flow_ctrl = 0;
844 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
845 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
846
847 if (bp->duplex == DUPLEX_FULL) {
848 bp->flow_ctrl = bp->req_flow_ctrl;
849 }
850 return;
851 }
852
853 if (bp->duplex != DUPLEX_FULL) {
854 return;
855 }
856
857 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
858 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
859 u32 val;
860
861 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
862 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
863 bp->flow_ctrl |= FLOW_CTRL_TX;
864 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
865 bp->flow_ctrl |= FLOW_CTRL_RX;
866 return;
867 }
868
869 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
870 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871
872 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
873 u32 new_local_adv = 0;
874 u32 new_remote_adv = 0;
875
876 if (local_adv & ADVERTISE_1000XPAUSE)
877 new_local_adv |= ADVERTISE_PAUSE_CAP;
878 if (local_adv & ADVERTISE_1000XPSE_ASYM)
879 new_local_adv |= ADVERTISE_PAUSE_ASYM;
880 if (remote_adv & ADVERTISE_1000XPAUSE)
881 new_remote_adv |= ADVERTISE_PAUSE_CAP;
882 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
883 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
884
885 local_adv = new_local_adv;
886 remote_adv = new_remote_adv;
887 }
888
889 /* See Table 28B-3 of 802.3ab-1999 spec. */
890 if (local_adv & ADVERTISE_PAUSE_CAP) {
891 if(local_adv & ADVERTISE_PAUSE_ASYM) {
892 if (remote_adv & ADVERTISE_PAUSE_CAP) {
893 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894 }
895 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
896 bp->flow_ctrl = FLOW_CTRL_RX;
897 }
898 }
899 else {
900 if (remote_adv & ADVERTISE_PAUSE_CAP) {
901 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
902 }
903 }
904 }
905 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
906 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
907 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
908
909 bp->flow_ctrl = FLOW_CTRL_TX;
910 }
911 }
912 }
913
914 static int
915 bnx2_5709s_linkup(struct bnx2 *bp)
916 {
917 u32 val, speed;
918
919 bp->link_up = 1;
920
921 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
922 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
923 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
924
925 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
926 bp->line_speed = bp->req_line_speed;
927 bp->duplex = bp->req_duplex;
928 return 0;
929 }
930 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
931 switch (speed) {
932 case MII_BNX2_GP_TOP_AN_SPEED_10:
933 bp->line_speed = SPEED_10;
934 break;
935 case MII_BNX2_GP_TOP_AN_SPEED_100:
936 bp->line_speed = SPEED_100;
937 break;
938 case MII_BNX2_GP_TOP_AN_SPEED_1G:
939 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
940 bp->line_speed = SPEED_1000;
941 break;
942 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
943 bp->line_speed = SPEED_2500;
944 break;
945 }
946 if (val & MII_BNX2_GP_TOP_AN_FD)
947 bp->duplex = DUPLEX_FULL;
948 else
949 bp->duplex = DUPLEX_HALF;
950 return 0;
951 }
952
953 static int
954 bnx2_5708s_linkup(struct bnx2 *bp)
955 {
956 u32 val;
957
958 bp->link_up = 1;
959 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
960 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
961 case BCM5708S_1000X_STAT1_SPEED_10:
962 bp->line_speed = SPEED_10;
963 break;
964 case BCM5708S_1000X_STAT1_SPEED_100:
965 bp->line_speed = SPEED_100;
966 break;
967 case BCM5708S_1000X_STAT1_SPEED_1G:
968 bp->line_speed = SPEED_1000;
969 break;
970 case BCM5708S_1000X_STAT1_SPEED_2G5:
971 bp->line_speed = SPEED_2500;
972 break;
973 }
974 if (val & BCM5708S_1000X_STAT1_FD)
975 bp->duplex = DUPLEX_FULL;
976 else
977 bp->duplex = DUPLEX_HALF;
978
979 return 0;
980 }
981
982 static int
983 bnx2_5706s_linkup(struct bnx2 *bp)
984 {
985 u32 bmcr, local_adv, remote_adv, common;
986
987 bp->link_up = 1;
988 bp->line_speed = SPEED_1000;
989
990 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
991 if (bmcr & BMCR_FULLDPLX) {
992 bp->duplex = DUPLEX_FULL;
993 }
994 else {
995 bp->duplex = DUPLEX_HALF;
996 }
997
998 if (!(bmcr & BMCR_ANENABLE)) {
999 return 0;
1000 }
1001
1002 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1003 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1004
1005 common = local_adv & remote_adv;
1006 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1007
1008 if (common & ADVERTISE_1000XFULL) {
1009 bp->duplex = DUPLEX_FULL;
1010 }
1011 else {
1012 bp->duplex = DUPLEX_HALF;
1013 }
1014 }
1015
1016 return 0;
1017 }
1018
1019 static int
1020 bnx2_copper_linkup(struct bnx2 *bp)
1021 {
1022 u32 bmcr;
1023
1024 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1025 if (bmcr & BMCR_ANENABLE) {
1026 u32 local_adv, remote_adv, common;
1027
1028 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1029 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1030
1031 common = local_adv & (remote_adv >> 2);
1032 if (common & ADVERTISE_1000FULL) {
1033 bp->line_speed = SPEED_1000;
1034 bp->duplex = DUPLEX_FULL;
1035 }
1036 else if (common & ADVERTISE_1000HALF) {
1037 bp->line_speed = SPEED_1000;
1038 bp->duplex = DUPLEX_HALF;
1039 }
1040 else {
1041 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044 common = local_adv & remote_adv;
1045 if (common & ADVERTISE_100FULL) {
1046 bp->line_speed = SPEED_100;
1047 bp->duplex = DUPLEX_FULL;
1048 }
1049 else if (common & ADVERTISE_100HALF) {
1050 bp->line_speed = SPEED_100;
1051 bp->duplex = DUPLEX_HALF;
1052 }
1053 else if (common & ADVERTISE_10FULL) {
1054 bp->line_speed = SPEED_10;
1055 bp->duplex = DUPLEX_FULL;
1056 }
1057 else if (common & ADVERTISE_10HALF) {
1058 bp->line_speed = SPEED_10;
1059 bp->duplex = DUPLEX_HALF;
1060 }
1061 else {
1062 bp->line_speed = 0;
1063 bp->link_up = 0;
1064 }
1065 }
1066 }
1067 else {
1068 if (bmcr & BMCR_SPEED100) {
1069 bp->line_speed = SPEED_100;
1070 }
1071 else {
1072 bp->line_speed = SPEED_10;
1073 }
1074 if (bmcr & BMCR_FULLDPLX) {
1075 bp->duplex = DUPLEX_FULL;
1076 }
1077 else {
1078 bp->duplex = DUPLEX_HALF;
1079 }
1080 }
1081
1082 return 0;
1083 }
1084
1085 static void
1086 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1087 {
1088 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1089
1090 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1091 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1092 val |= 0x02 << 8;
1093
1094 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1095 u32 lo_water, hi_water;
1096
1097 if (bp->flow_ctrl & FLOW_CTRL_TX)
1098 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1099 else
1100 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1101 if (lo_water >= bp->rx_ring_size)
1102 lo_water = 0;
1103
1104 hi_water = bp->rx_ring_size / 4;
1105
1106 if (hi_water <= lo_water)
1107 lo_water = 0;
1108
1109 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1110 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1111
1112 if (hi_water > 0xf)
1113 hi_water = 0xf;
1114 else if (hi_water == 0)
1115 lo_water = 0;
1116 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1117 }
1118 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1119 }
1120
1121 static void
1122 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1123 {
1124 int i;
1125 u32 cid;
1126
1127 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1128 if (i == 1)
1129 cid = RX_RSS_CID;
1130 bnx2_init_rx_context(bp, cid);
1131 }
1132 }
1133
1134 static void
1135 bnx2_set_mac_link(struct bnx2 *bp)
1136 {
1137 u32 val;
1138
1139 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1140 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1141 (bp->duplex == DUPLEX_HALF)) {
1142 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1143 }
1144
1145 /* Configure the EMAC mode register. */
1146 val = REG_RD(bp, BNX2_EMAC_MODE);
1147
1148 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1149 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1150 BNX2_EMAC_MODE_25G_MODE);
1151
1152 if (bp->link_up) {
1153 switch (bp->line_speed) {
1154 case SPEED_10:
1155 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1156 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1157 break;
1158 }
1159 /* fall through */
1160 case SPEED_100:
1161 val |= BNX2_EMAC_MODE_PORT_MII;
1162 break;
1163 case SPEED_2500:
1164 val |= BNX2_EMAC_MODE_25G_MODE;
1165 /* fall through */
1166 case SPEED_1000:
1167 val |= BNX2_EMAC_MODE_PORT_GMII;
1168 break;
1169 }
1170 }
1171 else {
1172 val |= BNX2_EMAC_MODE_PORT_GMII;
1173 }
1174
1175 /* Set the MAC to operate in the appropriate duplex mode. */
1176 if (bp->duplex == DUPLEX_HALF)
1177 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1178 REG_WR(bp, BNX2_EMAC_MODE, val);
1179
1180 /* Enable/disable rx PAUSE. */
1181 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1182
1183 if (bp->flow_ctrl & FLOW_CTRL_RX)
1184 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1185 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1186
1187 /* Enable/disable tx PAUSE. */
1188 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1189 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1190
1191 if (bp->flow_ctrl & FLOW_CTRL_TX)
1192 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1193 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1194
1195 /* Acknowledge the interrupt. */
1196 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1197
1198 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1199 bnx2_init_all_rx_contexts(bp);
1200 }
1201
1202 static void
1203 bnx2_enable_bmsr1(struct bnx2 *bp)
1204 {
1205 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1206 (CHIP_NUM(bp) == CHIP_NUM_5709))
1207 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1208 MII_BNX2_BLK_ADDR_GP_STATUS);
1209 }
1210
1211 static void
1212 bnx2_disable_bmsr1(struct bnx2 *bp)
1213 {
1214 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1215 (CHIP_NUM(bp) == CHIP_NUM_5709))
1216 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1217 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1218 }
1219
1220 static int
1221 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1222 {
1223 u32 up1;
1224 int ret = 1;
1225
1226 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1227 return 0;
1228
1229 if (bp->autoneg & AUTONEG_SPEED)
1230 bp->advertising |= ADVERTISED_2500baseX_Full;
1231
1232 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1233 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1234
1235 bnx2_read_phy(bp, bp->mii_up1, &up1);
1236 if (!(up1 & BCM5708S_UP1_2G5)) {
1237 up1 |= BCM5708S_UP1_2G5;
1238 bnx2_write_phy(bp, bp->mii_up1, up1);
1239 ret = 0;
1240 }
1241
1242 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1243 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1244 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1245
1246 return ret;
1247 }
1248
1249 static int
1250 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1251 {
1252 u32 up1;
1253 int ret = 0;
1254
1255 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1256 return 0;
1257
1258 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1260
1261 bnx2_read_phy(bp, bp->mii_up1, &up1);
1262 if (up1 & BCM5708S_UP1_2G5) {
1263 up1 &= ~BCM5708S_UP1_2G5;
1264 bnx2_write_phy(bp, bp->mii_up1, up1);
1265 ret = 1;
1266 }
1267
1268 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1269 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1270 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1271
1272 return ret;
1273 }
1274
1275 static void
1276 bnx2_enable_forced_2g5(struct bnx2 *bp)
1277 {
1278 u32 bmcr;
1279
1280 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1281 return;
1282
1283 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284 u32 val;
1285
1286 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1287 MII_BNX2_BLK_ADDR_SERDES_DIG);
1288 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1289 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1290 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1291 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1292
1293 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1294 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1295 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1296
1297 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1298 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1299 bmcr |= BCM5708S_BMCR_FORCE_2500;
1300 }
1301
1302 if (bp->autoneg & AUTONEG_SPEED) {
1303 bmcr &= ~BMCR_ANENABLE;
1304 if (bp->req_duplex == DUPLEX_FULL)
1305 bmcr |= BMCR_FULLDPLX;
1306 }
1307 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1308 }
1309
1310 static void
1311 bnx2_disable_forced_2g5(struct bnx2 *bp)
1312 {
1313 u32 bmcr;
1314
1315 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1316 return;
1317
1318 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319 u32 val;
1320
1321 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322 MII_BNX2_BLK_ADDR_SERDES_DIG);
1323 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1324 val &= ~MII_BNX2_SD_MISC1_FORCE;
1325 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1326
1327 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1329 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1330
1331 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1332 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1333 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1334 }
1335
1336 if (bp->autoneg & AUTONEG_SPEED)
1337 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1338 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1339 }
1340
1341 static void
1342 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1343 {
1344 u32 val;
1345
1346 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1347 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1348 if (start)
1349 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1350 else
1351 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1352 }
1353
1354 static int
1355 bnx2_set_link(struct bnx2 *bp)
1356 {
1357 u32 bmsr;
1358 u8 link_up;
1359
1360 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1361 bp->link_up = 1;
1362 return 0;
1363 }
1364
1365 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1366 return 0;
1367
1368 link_up = bp->link_up;
1369
1370 bnx2_enable_bmsr1(bp);
1371 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1372 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1373 bnx2_disable_bmsr1(bp);
1374
1375 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1377 u32 val, an_dbg;
1378
1379 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1380 bnx2_5706s_force_link_dn(bp, 0);
1381 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1382 }
1383 val = REG_RD(bp, BNX2_EMAC_STATUS);
1384
1385 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1386 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1388
1389 if ((val & BNX2_EMAC_STATUS_LINK) &&
1390 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1391 bmsr |= BMSR_LSTATUS;
1392 else
1393 bmsr &= ~BMSR_LSTATUS;
1394 }
1395
1396 if (bmsr & BMSR_LSTATUS) {
1397 bp->link_up = 1;
1398
1399 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1400 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1401 bnx2_5706s_linkup(bp);
1402 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1403 bnx2_5708s_linkup(bp);
1404 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405 bnx2_5709s_linkup(bp);
1406 }
1407 else {
1408 bnx2_copper_linkup(bp);
1409 }
1410 bnx2_resolve_flow_ctrl(bp);
1411 }
1412 else {
1413 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1414 (bp->autoneg & AUTONEG_SPEED))
1415 bnx2_disable_forced_2g5(bp);
1416
1417 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1418 u32 bmcr;
1419
1420 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421 bmcr |= BMCR_ANENABLE;
1422 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1423
1424 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1425 }
1426 bp->link_up = 0;
1427 }
1428
1429 if (bp->link_up != link_up) {
1430 bnx2_report_link(bp);
1431 }
1432
1433 bnx2_set_mac_link(bp);
1434
1435 return 0;
1436 }
1437
1438 static int
1439 bnx2_reset_phy(struct bnx2 *bp)
1440 {
1441 int i;
1442 u32 reg;
1443
1444 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1445
1446 #define PHY_RESET_MAX_WAIT 100
1447 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448 udelay(10);
1449
1450 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1451 if (!(reg & BMCR_RESET)) {
1452 udelay(20);
1453 break;
1454 }
1455 }
1456 if (i == PHY_RESET_MAX_WAIT) {
1457 return -EBUSY;
1458 }
1459 return 0;
1460 }
1461
1462 static u32
1463 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1464 {
1465 u32 adv = 0;
1466
1467 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1468 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1469
1470 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1471 adv = ADVERTISE_1000XPAUSE;
1472 }
1473 else {
1474 adv = ADVERTISE_PAUSE_CAP;
1475 }
1476 }
1477 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1478 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1479 adv = ADVERTISE_1000XPSE_ASYM;
1480 }
1481 else {
1482 adv = ADVERTISE_PAUSE_ASYM;
1483 }
1484 }
1485 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1486 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1487 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1488 }
1489 else {
1490 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1491 }
1492 }
1493 return adv;
1494 }
1495
1496 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497
1498 static int
1499 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1500 __releases(&bp->phy_lock)
1501 __acquires(&bp->phy_lock)
1502 {
1503 u32 speed_arg = 0, pause_adv;
1504
1505 pause_adv = bnx2_phy_get_pause_adv(bp);
1506
1507 if (bp->autoneg & AUTONEG_SPEED) {
1508 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1509 if (bp->advertising & ADVERTISED_10baseT_Half)
1510 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1511 if (bp->advertising & ADVERTISED_10baseT_Full)
1512 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1513 if (bp->advertising & ADVERTISED_100baseT_Half)
1514 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1515 if (bp->advertising & ADVERTISED_100baseT_Full)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1517 if (bp->advertising & ADVERTISED_1000baseT_Full)
1518 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1519 if (bp->advertising & ADVERTISED_2500baseX_Full)
1520 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1521 } else {
1522 if (bp->req_line_speed == SPEED_2500)
1523 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1524 else if (bp->req_line_speed == SPEED_1000)
1525 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1526 else if (bp->req_line_speed == SPEED_100) {
1527 if (bp->req_duplex == DUPLEX_FULL)
1528 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1529 else
1530 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1531 } else if (bp->req_line_speed == SPEED_10) {
1532 if (bp->req_duplex == DUPLEX_FULL)
1533 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1534 else
1535 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1536 }
1537 }
1538
1539 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1540 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1541 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1542 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1543
1544 if (port == PORT_TP)
1545 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1546 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1547
1548 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1549
1550 spin_unlock_bh(&bp->phy_lock);
1551 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1552 spin_lock_bh(&bp->phy_lock);
1553
1554 return 0;
1555 }
1556
1557 static int
1558 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1559 __releases(&bp->phy_lock)
1560 __acquires(&bp->phy_lock)
1561 {
1562 u32 adv, bmcr;
1563 u32 new_adv = 0;
1564
1565 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1566 return (bnx2_setup_remote_phy(bp, port));
1567
1568 if (!(bp->autoneg & AUTONEG_SPEED)) {
1569 u32 new_bmcr;
1570 int force_link_down = 0;
1571
1572 if (bp->req_line_speed == SPEED_2500) {
1573 if (!bnx2_test_and_enable_2g5(bp))
1574 force_link_down = 1;
1575 } else if (bp->req_line_speed == SPEED_1000) {
1576 if (bnx2_test_and_disable_2g5(bp))
1577 force_link_down = 1;
1578 }
1579 bnx2_read_phy(bp, bp->mii_adv, &adv);
1580 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1581
1582 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1583 new_bmcr = bmcr & ~BMCR_ANENABLE;
1584 new_bmcr |= BMCR_SPEED1000;
1585
1586 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1587 if (bp->req_line_speed == SPEED_2500)
1588 bnx2_enable_forced_2g5(bp);
1589 else if (bp->req_line_speed == SPEED_1000) {
1590 bnx2_disable_forced_2g5(bp);
1591 new_bmcr &= ~0x2000;
1592 }
1593
1594 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1595 if (bp->req_line_speed == SPEED_2500)
1596 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1597 else
1598 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1599 }
1600
1601 if (bp->req_duplex == DUPLEX_FULL) {
1602 adv |= ADVERTISE_1000XFULL;
1603 new_bmcr |= BMCR_FULLDPLX;
1604 }
1605 else {
1606 adv |= ADVERTISE_1000XHALF;
1607 new_bmcr &= ~BMCR_FULLDPLX;
1608 }
1609 if ((new_bmcr != bmcr) || (force_link_down)) {
1610 /* Force a link down visible on the other side */
1611 if (bp->link_up) {
1612 bnx2_write_phy(bp, bp->mii_adv, adv &
1613 ~(ADVERTISE_1000XFULL |
1614 ADVERTISE_1000XHALF));
1615 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1616 BMCR_ANRESTART | BMCR_ANENABLE);
1617
1618 bp->link_up = 0;
1619 netif_carrier_off(bp->dev);
1620 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1621 bnx2_report_link(bp);
1622 }
1623 bnx2_write_phy(bp, bp->mii_adv, adv);
1624 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1625 } else {
1626 bnx2_resolve_flow_ctrl(bp);
1627 bnx2_set_mac_link(bp);
1628 }
1629 return 0;
1630 }
1631
1632 bnx2_test_and_enable_2g5(bp);
1633
1634 if (bp->advertising & ADVERTISED_1000baseT_Full)
1635 new_adv |= ADVERTISE_1000XFULL;
1636
1637 new_adv |= bnx2_phy_get_pause_adv(bp);
1638
1639 bnx2_read_phy(bp, bp->mii_adv, &adv);
1640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1641
1642 bp->serdes_an_pending = 0;
1643 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1644 /* Force a link down visible on the other side */
1645 if (bp->link_up) {
1646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1647 spin_unlock_bh(&bp->phy_lock);
1648 msleep(20);
1649 spin_lock_bh(&bp->phy_lock);
1650 }
1651
1652 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1653 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1654 BMCR_ANENABLE);
1655 /* Speed up link-up time when the link partner
1656 * does not autonegotiate which is very common
1657 * in blade servers. Some blade servers use
1658 * IPMI for kerboard input and it's important
1659 * to minimize link disruptions. Autoneg. involves
1660 * exchanging base pages plus 3 next pages and
1661 * normally completes in about 120 msec.
1662 */
1663 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1664 bp->serdes_an_pending = 1;
1665 mod_timer(&bp->timer, jiffies + bp->current_interval);
1666 } else {
1667 bnx2_resolve_flow_ctrl(bp);
1668 bnx2_set_mac_link(bp);
1669 }
1670
1671 return 0;
1672 }
1673
1674 #define ETHTOOL_ALL_FIBRE_SPEED \
1675 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1676 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1677 (ADVERTISED_1000baseT_Full)
1678
1679 #define ETHTOOL_ALL_COPPER_SPEED \
1680 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1681 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1682 ADVERTISED_1000baseT_Full)
1683
1684 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1685 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1686
1687 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1688
1689 static void
1690 bnx2_set_default_remote_link(struct bnx2 *bp)
1691 {
1692 u32 link;
1693
1694 if (bp->phy_port == PORT_TP)
1695 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1696 else
1697 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1698
1699 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1700 bp->req_line_speed = 0;
1701 bp->autoneg |= AUTONEG_SPEED;
1702 bp->advertising = ADVERTISED_Autoneg;
1703 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1704 bp->advertising |= ADVERTISED_10baseT_Half;
1705 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1706 bp->advertising |= ADVERTISED_10baseT_Full;
1707 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1708 bp->advertising |= ADVERTISED_100baseT_Half;
1709 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1710 bp->advertising |= ADVERTISED_100baseT_Full;
1711 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1712 bp->advertising |= ADVERTISED_1000baseT_Full;
1713 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1714 bp->advertising |= ADVERTISED_2500baseX_Full;
1715 } else {
1716 bp->autoneg = 0;
1717 bp->advertising = 0;
1718 bp->req_duplex = DUPLEX_FULL;
1719 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1720 bp->req_line_speed = SPEED_10;
1721 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1722 bp->req_duplex = DUPLEX_HALF;
1723 }
1724 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1725 bp->req_line_speed = SPEED_100;
1726 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1727 bp->req_duplex = DUPLEX_HALF;
1728 }
1729 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1730 bp->req_line_speed = SPEED_1000;
1731 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1732 bp->req_line_speed = SPEED_2500;
1733 }
1734 }
1735
1736 static void
1737 bnx2_set_default_link(struct bnx2 *bp)
1738 {
1739 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1740 bnx2_set_default_remote_link(bp);
1741 return;
1742 }
1743
1744 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1745 bp->req_line_speed = 0;
1746 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1747 u32 reg;
1748
1749 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1750
1751 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1752 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1753 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1754 bp->autoneg = 0;
1755 bp->req_line_speed = bp->line_speed = SPEED_1000;
1756 bp->req_duplex = DUPLEX_FULL;
1757 }
1758 } else
1759 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1760 }
1761
1762 static void
1763 bnx2_send_heart_beat(struct bnx2 *bp)
1764 {
1765 u32 msg;
1766 u32 addr;
1767
1768 spin_lock(&bp->indirect_lock);
1769 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1770 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1771 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1772 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1773 spin_unlock(&bp->indirect_lock);
1774 }
1775
1776 static void
1777 bnx2_remote_phy_event(struct bnx2 *bp)
1778 {
1779 u32 msg;
1780 u8 link_up = bp->link_up;
1781 u8 old_port;
1782
1783 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1784
1785 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1786 bnx2_send_heart_beat(bp);
1787
1788 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1789
1790 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1791 bp->link_up = 0;
1792 else {
1793 u32 speed;
1794
1795 bp->link_up = 1;
1796 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1797 bp->duplex = DUPLEX_FULL;
1798 switch (speed) {
1799 case BNX2_LINK_STATUS_10HALF:
1800 bp->duplex = DUPLEX_HALF;
1801 case BNX2_LINK_STATUS_10FULL:
1802 bp->line_speed = SPEED_10;
1803 break;
1804 case BNX2_LINK_STATUS_100HALF:
1805 bp->duplex = DUPLEX_HALF;
1806 case BNX2_LINK_STATUS_100BASE_T4:
1807 case BNX2_LINK_STATUS_100FULL:
1808 bp->line_speed = SPEED_100;
1809 break;
1810 case BNX2_LINK_STATUS_1000HALF:
1811 bp->duplex = DUPLEX_HALF;
1812 case BNX2_LINK_STATUS_1000FULL:
1813 bp->line_speed = SPEED_1000;
1814 break;
1815 case BNX2_LINK_STATUS_2500HALF:
1816 bp->duplex = DUPLEX_HALF;
1817 case BNX2_LINK_STATUS_2500FULL:
1818 bp->line_speed = SPEED_2500;
1819 break;
1820 default:
1821 bp->line_speed = 0;
1822 break;
1823 }
1824
1825 bp->flow_ctrl = 0;
1826 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1827 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1828 if (bp->duplex == DUPLEX_FULL)
1829 bp->flow_ctrl = bp->req_flow_ctrl;
1830 } else {
1831 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1832 bp->flow_ctrl |= FLOW_CTRL_TX;
1833 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1834 bp->flow_ctrl |= FLOW_CTRL_RX;
1835 }
1836
1837 old_port = bp->phy_port;
1838 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1839 bp->phy_port = PORT_FIBRE;
1840 else
1841 bp->phy_port = PORT_TP;
1842
1843 if (old_port != bp->phy_port)
1844 bnx2_set_default_link(bp);
1845
1846 }
1847 if (bp->link_up != link_up)
1848 bnx2_report_link(bp);
1849
1850 bnx2_set_mac_link(bp);
1851 }
1852
1853 static int
1854 bnx2_set_remote_link(struct bnx2 *bp)
1855 {
1856 u32 evt_code;
1857
1858 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1859 switch (evt_code) {
1860 case BNX2_FW_EVT_CODE_LINK_EVENT:
1861 bnx2_remote_phy_event(bp);
1862 break;
1863 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1864 default:
1865 bnx2_send_heart_beat(bp);
1866 break;
1867 }
1868 return 0;
1869 }
1870
1871 static int
1872 bnx2_setup_copper_phy(struct bnx2 *bp)
1873 __releases(&bp->phy_lock)
1874 __acquires(&bp->phy_lock)
1875 {
1876 u32 bmcr;
1877 u32 new_bmcr;
1878
1879 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1880
1881 if (bp->autoneg & AUTONEG_SPEED) {
1882 u32 adv_reg, adv1000_reg;
1883 u32 new_adv_reg = 0;
1884 u32 new_adv1000_reg = 0;
1885
1886 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1887 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1888 ADVERTISE_PAUSE_ASYM);
1889
1890 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1891 adv1000_reg &= PHY_ALL_1000_SPEED;
1892
1893 if (bp->advertising & ADVERTISED_10baseT_Half)
1894 new_adv_reg |= ADVERTISE_10HALF;
1895 if (bp->advertising & ADVERTISED_10baseT_Full)
1896 new_adv_reg |= ADVERTISE_10FULL;
1897 if (bp->advertising & ADVERTISED_100baseT_Half)
1898 new_adv_reg |= ADVERTISE_100HALF;
1899 if (bp->advertising & ADVERTISED_100baseT_Full)
1900 new_adv_reg |= ADVERTISE_100FULL;
1901 if (bp->advertising & ADVERTISED_1000baseT_Full)
1902 new_adv1000_reg |= ADVERTISE_1000FULL;
1903
1904 new_adv_reg |= ADVERTISE_CSMA;
1905
1906 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1907
1908 if ((adv1000_reg != new_adv1000_reg) ||
1909 (adv_reg != new_adv_reg) ||
1910 ((bmcr & BMCR_ANENABLE) == 0)) {
1911
1912 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1913 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1914 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1915 BMCR_ANENABLE);
1916 }
1917 else if (bp->link_up) {
1918 /* Flow ctrl may have changed from auto to forced */
1919 /* or vice-versa. */
1920
1921 bnx2_resolve_flow_ctrl(bp);
1922 bnx2_set_mac_link(bp);
1923 }
1924 return 0;
1925 }
1926
1927 new_bmcr = 0;
1928 if (bp->req_line_speed == SPEED_100) {
1929 new_bmcr |= BMCR_SPEED100;
1930 }
1931 if (bp->req_duplex == DUPLEX_FULL) {
1932 new_bmcr |= BMCR_FULLDPLX;
1933 }
1934 if (new_bmcr != bmcr) {
1935 u32 bmsr;
1936
1937 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1938 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1939
1940 if (bmsr & BMSR_LSTATUS) {
1941 /* Force link down */
1942 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1943 spin_unlock_bh(&bp->phy_lock);
1944 msleep(50);
1945 spin_lock_bh(&bp->phy_lock);
1946
1947 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1948 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1949 }
1950
1951 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1952
1953 /* Normally, the new speed is setup after the link has
1954 * gone down and up again. In some cases, link will not go
1955 * down so we need to set up the new speed here.
1956 */
1957 if (bmsr & BMSR_LSTATUS) {
1958 bp->line_speed = bp->req_line_speed;
1959 bp->duplex = bp->req_duplex;
1960 bnx2_resolve_flow_ctrl(bp);
1961 bnx2_set_mac_link(bp);
1962 }
1963 } else {
1964 bnx2_resolve_flow_ctrl(bp);
1965 bnx2_set_mac_link(bp);
1966 }
1967 return 0;
1968 }
1969
1970 static int
1971 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1972 __releases(&bp->phy_lock)
1973 __acquires(&bp->phy_lock)
1974 {
1975 if (bp->loopback == MAC_LOOPBACK)
1976 return 0;
1977
1978 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1979 return (bnx2_setup_serdes_phy(bp, port));
1980 }
1981 else {
1982 return (bnx2_setup_copper_phy(bp));
1983 }
1984 }
1985
1986 static int
1987 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1988 {
1989 u32 val;
1990
1991 bp->mii_bmcr = MII_BMCR + 0x10;
1992 bp->mii_bmsr = MII_BMSR + 0x10;
1993 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1994 bp->mii_adv = MII_ADVERTISE + 0x10;
1995 bp->mii_lpa = MII_LPA + 0x10;
1996 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1997
1998 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1999 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2000
2001 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2002 if (reset_phy)
2003 bnx2_reset_phy(bp);
2004
2005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2006
2007 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2008 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2009 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2010 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2011
2012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2013 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2014 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2015 val |= BCM5708S_UP1_2G5;
2016 else
2017 val &= ~BCM5708S_UP1_2G5;
2018 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2019
2020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2021 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2022 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2023 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2024
2025 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2026
2027 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2028 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2029 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2030
2031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2032
2033 return 0;
2034 }
2035
2036 static int
2037 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2038 {
2039 u32 val;
2040
2041 if (reset_phy)
2042 bnx2_reset_phy(bp);
2043
2044 bp->mii_up1 = BCM5708S_UP1;
2045
2046 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2047 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2048 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2049
2050 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2051 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2052 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2053
2054 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2055 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2056 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2057
2058 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2059 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2060 val |= BCM5708S_UP1_2G5;
2061 bnx2_write_phy(bp, BCM5708S_UP1, val);
2062 }
2063
2064 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2065 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2066 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2067 /* increase tx signal amplitude */
2068 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2069 BCM5708S_BLK_ADDR_TX_MISC);
2070 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2071 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2072 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2073 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2074 }
2075
2076 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2077 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2078
2079 if (val) {
2080 u32 is_backplane;
2081
2082 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2083 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2084 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2085 BCM5708S_BLK_ADDR_TX_MISC);
2086 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2087 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2088 BCM5708S_BLK_ADDR_DIG);
2089 }
2090 }
2091 return 0;
2092 }
2093
2094 static int
2095 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2096 {
2097 if (reset_phy)
2098 bnx2_reset_phy(bp);
2099
2100 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2101
2102 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2103 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2104
2105 if (bp->dev->mtu > 1500) {
2106 u32 val;
2107
2108 /* Set extended packet length bit */
2109 bnx2_write_phy(bp, 0x18, 0x7);
2110 bnx2_read_phy(bp, 0x18, &val);
2111 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2112
2113 bnx2_write_phy(bp, 0x1c, 0x6c00);
2114 bnx2_read_phy(bp, 0x1c, &val);
2115 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2116 }
2117 else {
2118 u32 val;
2119
2120 bnx2_write_phy(bp, 0x18, 0x7);
2121 bnx2_read_phy(bp, 0x18, &val);
2122 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2123
2124 bnx2_write_phy(bp, 0x1c, 0x6c00);
2125 bnx2_read_phy(bp, 0x1c, &val);
2126 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2127 }
2128
2129 return 0;
2130 }
2131
2132 static int
2133 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2134 {
2135 u32 val;
2136
2137 if (reset_phy)
2138 bnx2_reset_phy(bp);
2139
2140 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2141 bnx2_write_phy(bp, 0x18, 0x0c00);
2142 bnx2_write_phy(bp, 0x17, 0x000a);
2143 bnx2_write_phy(bp, 0x15, 0x310b);
2144 bnx2_write_phy(bp, 0x17, 0x201f);
2145 bnx2_write_phy(bp, 0x15, 0x9506);
2146 bnx2_write_phy(bp, 0x17, 0x401f);
2147 bnx2_write_phy(bp, 0x15, 0x14e2);
2148 bnx2_write_phy(bp, 0x18, 0x0400);
2149 }
2150
2151 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2152 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2153 MII_BNX2_DSP_EXPAND_REG | 0x8);
2154 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2155 val &= ~(1 << 8);
2156 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2157 }
2158
2159 if (bp->dev->mtu > 1500) {
2160 /* Set extended packet length bit */
2161 bnx2_write_phy(bp, 0x18, 0x7);
2162 bnx2_read_phy(bp, 0x18, &val);
2163 bnx2_write_phy(bp, 0x18, val | 0x4000);
2164
2165 bnx2_read_phy(bp, 0x10, &val);
2166 bnx2_write_phy(bp, 0x10, val | 0x1);
2167 }
2168 else {
2169 bnx2_write_phy(bp, 0x18, 0x7);
2170 bnx2_read_phy(bp, 0x18, &val);
2171 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2172
2173 bnx2_read_phy(bp, 0x10, &val);
2174 bnx2_write_phy(bp, 0x10, val & ~0x1);
2175 }
2176
2177 /* ethernet@wirespeed */
2178 bnx2_write_phy(bp, 0x18, 0x7007);
2179 bnx2_read_phy(bp, 0x18, &val);
2180 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2181 return 0;
2182 }
2183
2184
2185 static int
2186 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2187 __releases(&bp->phy_lock)
2188 __acquires(&bp->phy_lock)
2189 {
2190 u32 val;
2191 int rc = 0;
2192
2193 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2194 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2195
2196 bp->mii_bmcr = MII_BMCR;
2197 bp->mii_bmsr = MII_BMSR;
2198 bp->mii_bmsr1 = MII_BMSR;
2199 bp->mii_adv = MII_ADVERTISE;
2200 bp->mii_lpa = MII_LPA;
2201
2202 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2203
2204 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2205 goto setup_phy;
2206
2207 bnx2_read_phy(bp, MII_PHYSID1, &val);
2208 bp->phy_id = val << 16;
2209 bnx2_read_phy(bp, MII_PHYSID2, &val);
2210 bp->phy_id |= val & 0xffff;
2211
2212 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2213 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2214 rc = bnx2_init_5706s_phy(bp, reset_phy);
2215 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2216 rc = bnx2_init_5708s_phy(bp, reset_phy);
2217 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2218 rc = bnx2_init_5709s_phy(bp, reset_phy);
2219 }
2220 else {
2221 rc = bnx2_init_copper_phy(bp, reset_phy);
2222 }
2223
2224 setup_phy:
2225 if (!rc)
2226 rc = bnx2_setup_phy(bp, bp->phy_port);
2227
2228 return rc;
2229 }
2230
2231 static int
2232 bnx2_set_mac_loopback(struct bnx2 *bp)
2233 {
2234 u32 mac_mode;
2235
2236 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2237 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2238 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2239 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2240 bp->link_up = 1;
2241 return 0;
2242 }
2243
2244 static int bnx2_test_link(struct bnx2 *);
2245
2246 static int
2247 bnx2_set_phy_loopback(struct bnx2 *bp)
2248 {
2249 u32 mac_mode;
2250 int rc, i;
2251
2252 spin_lock_bh(&bp->phy_lock);
2253 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2254 BMCR_SPEED1000);
2255 spin_unlock_bh(&bp->phy_lock);
2256 if (rc)
2257 return rc;
2258
2259 for (i = 0; i < 10; i++) {
2260 if (bnx2_test_link(bp) == 0)
2261 break;
2262 msleep(100);
2263 }
2264
2265 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2266 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2267 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2268 BNX2_EMAC_MODE_25G_MODE);
2269
2270 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2271 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2272 bp->link_up = 1;
2273 return 0;
2274 }
2275
2276 static int
2277 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2278 {
2279 int i;
2280 u32 val;
2281
2282 bp->fw_wr_seq++;
2283 msg_data |= bp->fw_wr_seq;
2284
2285 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2286
2287 if (!ack)
2288 return 0;
2289
2290 /* wait for an acknowledgement. */
2291 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2292 msleep(10);
2293
2294 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2295
2296 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2297 break;
2298 }
2299 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2300 return 0;
2301
2302 /* If we timed out, inform the firmware that this is the case. */
2303 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2304 if (!silent)
2305 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2306 "%x\n", msg_data);
2307
2308 msg_data &= ~BNX2_DRV_MSG_CODE;
2309 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2310
2311 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2312
2313 return -EBUSY;
2314 }
2315
2316 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2317 return -EIO;
2318
2319 return 0;
2320 }
2321
2322 static int
2323 bnx2_init_5709_context(struct bnx2 *bp)
2324 {
2325 int i, ret = 0;
2326 u32 val;
2327
2328 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2329 val |= (BCM_PAGE_BITS - 8) << 16;
2330 REG_WR(bp, BNX2_CTX_COMMAND, val);
2331 for (i = 0; i < 10; i++) {
2332 val = REG_RD(bp, BNX2_CTX_COMMAND);
2333 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2334 break;
2335 udelay(2);
2336 }
2337 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2338 return -EBUSY;
2339
2340 for (i = 0; i < bp->ctx_pages; i++) {
2341 int j;
2342
2343 if (bp->ctx_blk[i])
2344 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2345 else
2346 return -ENOMEM;
2347
2348 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2349 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2350 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2351 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2352 (u64) bp->ctx_blk_mapping[i] >> 32);
2353 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2354 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2355 for (j = 0; j < 10; j++) {
2356
2357 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2358 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2359 break;
2360 udelay(5);
2361 }
2362 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2363 ret = -EBUSY;
2364 break;
2365 }
2366 }
2367 return ret;
2368 }
2369
2370 static void
2371 bnx2_init_context(struct bnx2 *bp)
2372 {
2373 u32 vcid;
2374
2375 vcid = 96;
2376 while (vcid) {
2377 u32 vcid_addr, pcid_addr, offset;
2378 int i;
2379
2380 vcid--;
2381
2382 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2383 u32 new_vcid;
2384
2385 vcid_addr = GET_PCID_ADDR(vcid);
2386 if (vcid & 0x8) {
2387 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2388 }
2389 else {
2390 new_vcid = vcid;
2391 }
2392 pcid_addr = GET_PCID_ADDR(new_vcid);
2393 }
2394 else {
2395 vcid_addr = GET_CID_ADDR(vcid);
2396 pcid_addr = vcid_addr;
2397 }
2398
2399 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2400 vcid_addr += (i << PHY_CTX_SHIFT);
2401 pcid_addr += (i << PHY_CTX_SHIFT);
2402
2403 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2404 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2405
2406 /* Zero out the context. */
2407 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2408 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2409 }
2410 }
2411 }
2412
2413 static int
2414 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2415 {
2416 u16 *good_mbuf;
2417 u32 good_mbuf_cnt;
2418 u32 val;
2419
2420 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2421 if (good_mbuf == NULL) {
2422 printk(KERN_ERR PFX "Failed to allocate memory in "
2423 "bnx2_alloc_bad_rbuf\n");
2424 return -ENOMEM;
2425 }
2426
2427 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2428 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2429
2430 good_mbuf_cnt = 0;
2431
2432 /* Allocate a bunch of mbufs and save the good ones in an array. */
2433 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2434 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2435 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2436 BNX2_RBUF_COMMAND_ALLOC_REQ);
2437
2438 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2439
2440 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2441
2442 /* The addresses with Bit 9 set are bad memory blocks. */
2443 if (!(val & (1 << 9))) {
2444 good_mbuf[good_mbuf_cnt] = (u16) val;
2445 good_mbuf_cnt++;
2446 }
2447
2448 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2449 }
2450
2451 /* Free the good ones back to the mbuf pool thus discarding
2452 * all the bad ones. */
2453 while (good_mbuf_cnt) {
2454 good_mbuf_cnt--;
2455
2456 val = good_mbuf[good_mbuf_cnt];
2457 val = (val << 9) | val | 1;
2458
2459 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2460 }
2461 kfree(good_mbuf);
2462 return 0;
2463 }
2464
2465 static void
2466 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2467 {
2468 u32 val;
2469
2470 val = (mac_addr[0] << 8) | mac_addr[1];
2471
2472 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2473
2474 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2475 (mac_addr[4] << 8) | mac_addr[5];
2476
2477 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2478 }
2479
2480 static inline int
2481 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2482 {
2483 dma_addr_t mapping;
2484 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2485 struct rx_bd *rxbd =
2486 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2487 struct page *page = alloc_page(GFP_ATOMIC);
2488
2489 if (!page)
2490 return -ENOMEM;
2491 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2492 PCI_DMA_FROMDEVICE);
2493 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2494 __free_page(page);
2495 return -EIO;
2496 }
2497
2498 rx_pg->page = page;
2499 pci_unmap_addr_set(rx_pg, mapping, mapping);
2500 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2501 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2502 return 0;
2503 }
2504
2505 static void
2506 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2507 {
2508 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2509 struct page *page = rx_pg->page;
2510
2511 if (!page)
2512 return;
2513
2514 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2515 PCI_DMA_FROMDEVICE);
2516
2517 __free_page(page);
2518 rx_pg->page = NULL;
2519 }
2520
2521 static inline int
2522 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2523 {
2524 struct sk_buff *skb;
2525 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2526 dma_addr_t mapping;
2527 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2528 unsigned long align;
2529
2530 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2531 if (skb == NULL) {
2532 return -ENOMEM;
2533 }
2534
2535 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2536 skb_reserve(skb, BNX2_RX_ALIGN - align);
2537
2538 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2539 PCI_DMA_FROMDEVICE);
2540 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2541 dev_kfree_skb(skb);
2542 return -EIO;
2543 }
2544
2545 rx_buf->skb = skb;
2546 pci_unmap_addr_set(rx_buf, mapping, mapping);
2547
2548 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2549 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2550
2551 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2552
2553 return 0;
2554 }
2555
2556 static int
2557 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2558 {
2559 struct status_block *sblk = bnapi->status_blk.msi;
2560 u32 new_link_state, old_link_state;
2561 int is_set = 1;
2562
2563 new_link_state = sblk->status_attn_bits & event;
2564 old_link_state = sblk->status_attn_bits_ack & event;
2565 if (new_link_state != old_link_state) {
2566 if (new_link_state)
2567 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2568 else
2569 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2570 } else
2571 is_set = 0;
2572
2573 return is_set;
2574 }
2575
2576 static void
2577 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2578 {
2579 spin_lock(&bp->phy_lock);
2580
2581 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2582 bnx2_set_link(bp);
2583 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2584 bnx2_set_remote_link(bp);
2585
2586 spin_unlock(&bp->phy_lock);
2587
2588 }
2589
2590 static inline u16
2591 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2592 {
2593 u16 cons;
2594
2595 /* Tell compiler that status block fields can change. */
2596 barrier();
2597 cons = *bnapi->hw_tx_cons_ptr;
2598 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2599 cons++;
2600 return cons;
2601 }
2602
2603 static int
2604 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2605 {
2606 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2607 u16 hw_cons, sw_cons, sw_ring_cons;
2608 int tx_pkt = 0, index;
2609 struct netdev_queue *txq;
2610
2611 index = (bnapi - bp->bnx2_napi);
2612 txq = netdev_get_tx_queue(bp->dev, index);
2613
2614 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2615 sw_cons = txr->tx_cons;
2616
2617 while (sw_cons != hw_cons) {
2618 struct sw_tx_bd *tx_buf;
2619 struct sk_buff *skb;
2620 int i, last;
2621
2622 sw_ring_cons = TX_RING_IDX(sw_cons);
2623
2624 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2625 skb = tx_buf->skb;
2626
2627 /* partial BD completions possible with TSO packets */
2628 if (skb_is_gso(skb)) {
2629 u16 last_idx, last_ring_idx;
2630
2631 last_idx = sw_cons +
2632 skb_shinfo(skb)->nr_frags + 1;
2633 last_ring_idx = sw_ring_cons +
2634 skb_shinfo(skb)->nr_frags + 1;
2635 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2636 last_idx++;
2637 }
2638 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2639 break;
2640 }
2641 }
2642
2643 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2644
2645 tx_buf->skb = NULL;
2646 last = skb_shinfo(skb)->nr_frags;
2647
2648 for (i = 0; i < last; i++) {
2649 sw_cons = NEXT_TX_BD(sw_cons);
2650 }
2651
2652 sw_cons = NEXT_TX_BD(sw_cons);
2653
2654 dev_kfree_skb(skb);
2655 tx_pkt++;
2656 if (tx_pkt == budget)
2657 break;
2658
2659 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2660 }
2661
2662 txr->hw_tx_cons = hw_cons;
2663 txr->tx_cons = sw_cons;
2664
2665 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2666 * before checking for netif_tx_queue_stopped(). Without the
2667 * memory barrier, there is a small possibility that bnx2_start_xmit()
2668 * will miss it and cause the queue to be stopped forever.
2669 */
2670 smp_mb();
2671
2672 if (unlikely(netif_tx_queue_stopped(txq)) &&
2673 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2674 __netif_tx_lock(txq, smp_processor_id());
2675 if ((netif_tx_queue_stopped(txq)) &&
2676 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2677 netif_tx_wake_queue(txq);
2678 __netif_tx_unlock(txq);
2679 }
2680
2681 return tx_pkt;
2682 }
2683
2684 static void
2685 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2686 struct sk_buff *skb, int count)
2687 {
2688 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2689 struct rx_bd *cons_bd, *prod_bd;
2690 int i;
2691 u16 hw_prod, prod;
2692 u16 cons = rxr->rx_pg_cons;
2693
2694 cons_rx_pg = &rxr->rx_pg_ring[cons];
2695
2696 /* The caller was unable to allocate a new page to replace the
2697 * last one in the frags array, so we need to recycle that page
2698 * and then free the skb.
2699 */
2700 if (skb) {
2701 struct page *page;
2702 struct skb_shared_info *shinfo;
2703
2704 shinfo = skb_shinfo(skb);
2705 shinfo->nr_frags--;
2706 page = shinfo->frags[shinfo->nr_frags].page;
2707 shinfo->frags[shinfo->nr_frags].page = NULL;
2708
2709 cons_rx_pg->page = page;
2710 dev_kfree_skb(skb);
2711 }
2712
2713 hw_prod = rxr->rx_pg_prod;
2714
2715 for (i = 0; i < count; i++) {
2716 prod = RX_PG_RING_IDX(hw_prod);
2717
2718 prod_rx_pg = &rxr->rx_pg_ring[prod];
2719 cons_rx_pg = &rxr->rx_pg_ring[cons];
2720 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2721 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2722
2723 if (prod != cons) {
2724 prod_rx_pg->page = cons_rx_pg->page;
2725 cons_rx_pg->page = NULL;
2726 pci_unmap_addr_set(prod_rx_pg, mapping,
2727 pci_unmap_addr(cons_rx_pg, mapping));
2728
2729 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2730 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2731
2732 }
2733 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2734 hw_prod = NEXT_RX_BD(hw_prod);
2735 }
2736 rxr->rx_pg_prod = hw_prod;
2737 rxr->rx_pg_cons = cons;
2738 }
2739
2740 static inline void
2741 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2742 struct sk_buff *skb, u16 cons, u16 prod)
2743 {
2744 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2745 struct rx_bd *cons_bd, *prod_bd;
2746
2747 cons_rx_buf = &rxr->rx_buf_ring[cons];
2748 prod_rx_buf = &rxr->rx_buf_ring[prod];
2749
2750 pci_dma_sync_single_for_device(bp->pdev,
2751 pci_unmap_addr(cons_rx_buf, mapping),
2752 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2753
2754 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2755
2756 prod_rx_buf->skb = skb;
2757
2758 if (cons == prod)
2759 return;
2760
2761 pci_unmap_addr_set(prod_rx_buf, mapping,
2762 pci_unmap_addr(cons_rx_buf, mapping));
2763
2764 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2765 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2766 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2767 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2768 }
2769
2770 static int
2771 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2772 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2773 u32 ring_idx)
2774 {
2775 int err;
2776 u16 prod = ring_idx & 0xffff;
2777
2778 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2779 if (unlikely(err)) {
2780 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2781 if (hdr_len) {
2782 unsigned int raw_len = len + 4;
2783 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2784
2785 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2786 }
2787 return err;
2788 }
2789
2790 skb_reserve(skb, BNX2_RX_OFFSET);
2791 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2792 PCI_DMA_FROMDEVICE);
2793
2794 if (hdr_len == 0) {
2795 skb_put(skb, len);
2796 return 0;
2797 } else {
2798 unsigned int i, frag_len, frag_size, pages;
2799 struct sw_pg *rx_pg;
2800 u16 pg_cons = rxr->rx_pg_cons;
2801 u16 pg_prod = rxr->rx_pg_prod;
2802
2803 frag_size = len + 4 - hdr_len;
2804 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2805 skb_put(skb, hdr_len);
2806
2807 for (i = 0; i < pages; i++) {
2808 dma_addr_t mapping_old;
2809
2810 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2811 if (unlikely(frag_len <= 4)) {
2812 unsigned int tail = 4 - frag_len;
2813
2814 rxr->rx_pg_cons = pg_cons;
2815 rxr->rx_pg_prod = pg_prod;
2816 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2817 pages - i);
2818 skb->len -= tail;
2819 if (i == 0) {
2820 skb->tail -= tail;
2821 } else {
2822 skb_frag_t *frag =
2823 &skb_shinfo(skb)->frags[i - 1];
2824 frag->size -= tail;
2825 skb->data_len -= tail;
2826 skb->truesize -= tail;
2827 }
2828 return 0;
2829 }
2830 rx_pg = &rxr->rx_pg_ring[pg_cons];
2831
2832 /* Don't unmap yet. If we're unable to allocate a new
2833 * page, we need to recycle the page and the DMA addr.
2834 */
2835 mapping_old = pci_unmap_addr(rx_pg, mapping);
2836 if (i == pages - 1)
2837 frag_len -= 4;
2838
2839 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2840 rx_pg->page = NULL;
2841
2842 err = bnx2_alloc_rx_page(bp, rxr,
2843 RX_PG_RING_IDX(pg_prod));
2844 if (unlikely(err)) {
2845 rxr->rx_pg_cons = pg_cons;
2846 rxr->rx_pg_prod = pg_prod;
2847 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2848 pages - i);
2849 return err;
2850 }
2851
2852 pci_unmap_page(bp->pdev, mapping_old,
2853 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2854
2855 frag_size -= frag_len;
2856 skb->data_len += frag_len;
2857 skb->truesize += frag_len;
2858 skb->len += frag_len;
2859
2860 pg_prod = NEXT_RX_BD(pg_prod);
2861 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2862 }
2863 rxr->rx_pg_prod = pg_prod;
2864 rxr->rx_pg_cons = pg_cons;
2865 }
2866 return 0;
2867 }
2868
2869 static inline u16
2870 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2871 {
2872 u16 cons;
2873
2874 /* Tell compiler that status block fields can change. */
2875 barrier();
2876 cons = *bnapi->hw_rx_cons_ptr;
2877 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2878 cons++;
2879 return cons;
2880 }
2881
2882 static int
2883 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2884 {
2885 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2886 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2887 struct l2_fhdr *rx_hdr;
2888 int rx_pkt = 0, pg_ring_used = 0;
2889
2890 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2891 sw_cons = rxr->rx_cons;
2892 sw_prod = rxr->rx_prod;
2893
2894 /* Memory barrier necessary as speculative reads of the rx
2895 * buffer can be ahead of the index in the status block
2896 */
2897 rmb();
2898 while (sw_cons != hw_cons) {
2899 unsigned int len, hdr_len;
2900 u32 status;
2901 struct sw_bd *rx_buf;
2902 struct sk_buff *skb;
2903 dma_addr_t dma_addr;
2904 u16 vtag = 0;
2905 int hw_vlan __maybe_unused = 0;
2906
2907 sw_ring_cons = RX_RING_IDX(sw_cons);
2908 sw_ring_prod = RX_RING_IDX(sw_prod);
2909
2910 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2911 skb = rx_buf->skb;
2912
2913 rx_buf->skb = NULL;
2914
2915 dma_addr = pci_unmap_addr(rx_buf, mapping);
2916
2917 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2918 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2919 PCI_DMA_FROMDEVICE);
2920
2921 rx_hdr = (struct l2_fhdr *) skb->data;
2922 len = rx_hdr->l2_fhdr_pkt_len;
2923
2924 if ((status = rx_hdr->l2_fhdr_status) &
2925 (L2_FHDR_ERRORS_BAD_CRC |
2926 L2_FHDR_ERRORS_PHY_DECODE |
2927 L2_FHDR_ERRORS_ALIGNMENT |
2928 L2_FHDR_ERRORS_TOO_SHORT |
2929 L2_FHDR_ERRORS_GIANT_FRAME)) {
2930
2931 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2932 sw_ring_prod);
2933 goto next_rx;
2934 }
2935 hdr_len = 0;
2936 if (status & L2_FHDR_STATUS_SPLIT) {
2937 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2938 pg_ring_used = 1;
2939 } else if (len > bp->rx_jumbo_thresh) {
2940 hdr_len = bp->rx_jumbo_thresh;
2941 pg_ring_used = 1;
2942 }
2943
2944 len -= 4;
2945
2946 if (len <= bp->rx_copy_thresh) {
2947 struct sk_buff *new_skb;
2948
2949 new_skb = netdev_alloc_skb(bp->dev, len + 6);
2950 if (new_skb == NULL) {
2951 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2952 sw_ring_prod);
2953 goto next_rx;
2954 }
2955
2956 /* aligned copy */
2957 skb_copy_from_linear_data_offset(skb,
2958 BNX2_RX_OFFSET - 6,
2959 new_skb->data, len + 6);
2960 skb_reserve(new_skb, 6);
2961 skb_put(new_skb, len);
2962
2963 bnx2_reuse_rx_skb(bp, rxr, skb,
2964 sw_ring_cons, sw_ring_prod);
2965
2966 skb = new_skb;
2967 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2968 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2969 goto next_rx;
2970
2971 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2972 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2973 vtag = rx_hdr->l2_fhdr_vlan_tag;
2974 #ifdef BCM_VLAN
2975 if (bp->vlgrp)
2976 hw_vlan = 1;
2977 else
2978 #endif
2979 {
2980 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2981 __skb_push(skb, 4);
2982
2983 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2984 ve->h_vlan_proto = htons(ETH_P_8021Q);
2985 ve->h_vlan_TCI = htons(vtag);
2986 len += 4;
2987 }
2988 }
2989
2990 skb->protocol = eth_type_trans(skb, bp->dev);
2991
2992 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2993 (ntohs(skb->protocol) != 0x8100)) {
2994
2995 dev_kfree_skb(skb);
2996 goto next_rx;
2997
2998 }
2999
3000 skb->ip_summed = CHECKSUM_NONE;
3001 if (bp->rx_csum &&
3002 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3003 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3004
3005 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3006 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3007 skb->ip_summed = CHECKSUM_UNNECESSARY;
3008 }
3009
3010 #ifdef BCM_VLAN
3011 if (hw_vlan)
3012 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3013 else
3014 #endif
3015 netif_receive_skb(skb);
3016
3017 rx_pkt++;
3018
3019 next_rx:
3020 sw_cons = NEXT_RX_BD(sw_cons);
3021 sw_prod = NEXT_RX_BD(sw_prod);
3022
3023 if ((rx_pkt == budget))
3024 break;
3025
3026 /* Refresh hw_cons to see if there is new work */
3027 if (sw_cons == hw_cons) {
3028 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3029 rmb();
3030 }
3031 }
3032 rxr->rx_cons = sw_cons;
3033 rxr->rx_prod = sw_prod;
3034
3035 if (pg_ring_used)
3036 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3037
3038 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3039
3040 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3041
3042 mmiowb();
3043
3044 return rx_pkt;
3045
3046 }
3047
3048 /* MSI ISR - The only difference between this and the INTx ISR
3049 * is that the MSI interrupt is always serviced.
3050 */
3051 static irqreturn_t
3052 bnx2_msi(int irq, void *dev_instance)
3053 {
3054 struct bnx2_napi *bnapi = dev_instance;
3055 struct bnx2 *bp = bnapi->bp;
3056
3057 prefetch(bnapi->status_blk.msi);
3058 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3059 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3060 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3061
3062 /* Return here if interrupt is disabled. */
3063 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3064 return IRQ_HANDLED;
3065
3066 napi_schedule(&bnapi->napi);
3067
3068 return IRQ_HANDLED;
3069 }
3070
3071 static irqreturn_t
3072 bnx2_msi_1shot(int irq, void *dev_instance)
3073 {
3074 struct bnx2_napi *bnapi = dev_instance;
3075 struct bnx2 *bp = bnapi->bp;
3076
3077 prefetch(bnapi->status_blk.msi);
3078
3079 /* Return here if interrupt is disabled. */
3080 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3081 return IRQ_HANDLED;
3082
3083 napi_schedule(&bnapi->napi);
3084
3085 return IRQ_HANDLED;
3086 }
3087
3088 static irqreturn_t
3089 bnx2_interrupt(int irq, void *dev_instance)
3090 {
3091 struct bnx2_napi *bnapi = dev_instance;
3092 struct bnx2 *bp = bnapi->bp;
3093 struct status_block *sblk = bnapi->status_blk.msi;
3094
3095 /* When using INTx, it is possible for the interrupt to arrive
3096 * at the CPU before the status block posted prior to the
3097 * interrupt. Reading a register will flush the status block.
3098 * When using MSI, the MSI message will always complete after
3099 * the status block write.
3100 */
3101 if ((sblk->status_idx == bnapi->last_status_idx) &&
3102 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3103 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3104 return IRQ_NONE;
3105
3106 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3107 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3108 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3109
3110 /* Read back to deassert IRQ immediately to avoid too many
3111 * spurious interrupts.
3112 */
3113 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3114
3115 /* Return here if interrupt is shared and is disabled. */
3116 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3117 return IRQ_HANDLED;
3118
3119 if (napi_schedule_prep(&bnapi->napi)) {
3120 bnapi->last_status_idx = sblk->status_idx;
3121 __napi_schedule(&bnapi->napi);
3122 }
3123
3124 return IRQ_HANDLED;
3125 }
3126
3127 static inline int
3128 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3129 {
3130 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3131 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3132
3133 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3134 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3135 return 1;
3136 return 0;
3137 }
3138
3139 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3140 STATUS_ATTN_BITS_TIMER_ABORT)
3141
3142 static inline int
3143 bnx2_has_work(struct bnx2_napi *bnapi)
3144 {
3145 struct status_block *sblk = bnapi->status_blk.msi;
3146
3147 if (bnx2_has_fast_work(bnapi))
3148 return 1;
3149
3150 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3151 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3152 return 1;
3153
3154 return 0;
3155 }
3156
3157 static void
3158 bnx2_chk_missed_msi(struct bnx2 *bp)
3159 {
3160 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3161 u32 msi_ctrl;
3162
3163 if (bnx2_has_work(bnapi)) {
3164 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3165 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3166 return;
3167
3168 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3169 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3170 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3171 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3172 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3173 }
3174 }
3175
3176 bp->idle_chk_status_idx = bnapi->last_status_idx;
3177 }
3178
3179 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3180 {
3181 struct status_block *sblk = bnapi->status_blk.msi;
3182 u32 status_attn_bits = sblk->status_attn_bits;
3183 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3184
3185 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3186 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3187
3188 bnx2_phy_int(bp, bnapi);
3189
3190 /* This is needed to take care of transient status
3191 * during link changes.
3192 */
3193 REG_WR(bp, BNX2_HC_COMMAND,
3194 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3195 REG_RD(bp, BNX2_HC_COMMAND);
3196 }
3197 }
3198
3199 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3200 int work_done, int budget)
3201 {
3202 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3203 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3204
3205 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3206 bnx2_tx_int(bp, bnapi, 0);
3207
3208 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3209 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3210
3211 return work_done;
3212 }
3213
3214 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3215 {
3216 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3217 struct bnx2 *bp = bnapi->bp;
3218 int work_done = 0;
3219 struct status_block_msix *sblk = bnapi->status_blk.msix;
3220
3221 while (1) {
3222 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3223 if (unlikely(work_done >= budget))
3224 break;
3225
3226 bnapi->last_status_idx = sblk->status_idx;
3227 /* status idx must be read before checking for more work. */
3228 rmb();
3229 if (likely(!bnx2_has_fast_work(bnapi))) {
3230
3231 napi_complete(napi);
3232 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3233 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3234 bnapi->last_status_idx);
3235 break;
3236 }
3237 }
3238 return work_done;
3239 }
3240
3241 static int bnx2_poll(struct napi_struct *napi, int budget)
3242 {
3243 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3244 struct bnx2 *bp = bnapi->bp;
3245 int work_done = 0;
3246 struct status_block *sblk = bnapi->status_blk.msi;
3247
3248 while (1) {
3249 bnx2_poll_link(bp, bnapi);
3250
3251 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3252
3253 /* bnapi->last_status_idx is used below to tell the hw how
3254 * much work has been processed, so we must read it before
3255 * checking for more work.
3256 */
3257 bnapi->last_status_idx = sblk->status_idx;
3258
3259 if (unlikely(work_done >= budget))
3260 break;
3261
3262 rmb();
3263 if (likely(!bnx2_has_work(bnapi))) {
3264 napi_complete(napi);
3265 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3266 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3267 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3268 bnapi->last_status_idx);
3269 break;
3270 }
3271 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3272 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3273 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3274 bnapi->last_status_idx);
3275
3276 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3277 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3278 bnapi->last_status_idx);
3279 break;
3280 }
3281 }
3282
3283 return work_done;
3284 }
3285
3286 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3287 * from set_multicast.
3288 */
3289 static void
3290 bnx2_set_rx_mode(struct net_device *dev)
3291 {
3292 struct bnx2 *bp = netdev_priv(dev);
3293 u32 rx_mode, sort_mode;
3294 struct dev_addr_list *uc_ptr;
3295 int i;
3296
3297 if (!netif_running(dev))
3298 return;
3299
3300 spin_lock_bh(&bp->phy_lock);
3301
3302 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3303 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3304 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3305 #ifdef BCM_VLAN
3306 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3307 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3308 #else
3309 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3310 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3311 #endif
3312 if (dev->flags & IFF_PROMISC) {
3313 /* Promiscuous mode. */
3314 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3315 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3316 BNX2_RPM_SORT_USER0_PROM_VLAN;
3317 }
3318 else if (dev->flags & IFF_ALLMULTI) {
3319 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3320 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3321 0xffffffff);
3322 }
3323 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3324 }
3325 else {
3326 /* Accept one or more multicast(s). */
3327 struct dev_mc_list *mclist;
3328 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3329 u32 regidx;
3330 u32 bit;
3331 u32 crc;
3332
3333 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3334
3335 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3336 i++, mclist = mclist->next) {
3337
3338 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3339 bit = crc & 0xff;
3340 regidx = (bit & 0xe0) >> 5;
3341 bit &= 0x1f;
3342 mc_filter[regidx] |= (1 << bit);
3343 }
3344
3345 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3346 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3347 mc_filter[i]);
3348 }
3349
3350 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3351 }
3352
3353 uc_ptr = NULL;
3354 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3355 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3356 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3357 BNX2_RPM_SORT_USER0_PROM_VLAN;
3358 } else if (!(dev->flags & IFF_PROMISC)) {
3359 uc_ptr = dev->uc_list;
3360
3361 /* Add all entries into to the match filter list */
3362 for (i = 0; i < dev->uc_count; i++) {
3363 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3364 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3365 sort_mode |= (1 <<
3366 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3367 uc_ptr = uc_ptr->next;
3368 }
3369
3370 }
3371
3372 if (rx_mode != bp->rx_mode) {
3373 bp->rx_mode = rx_mode;
3374 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3375 }
3376
3377 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3378 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3379 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3380
3381 spin_unlock_bh(&bp->phy_lock);
3382 }
3383
3384 static void
3385 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3386 u32 rv2p_proc)
3387 {
3388 int i;
3389 u32 val;
3390
3391 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3392 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3393 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3394 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3395 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3396 }
3397
3398 for (i = 0; i < rv2p_code_len; i += 8) {
3399 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3400 rv2p_code++;
3401 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3402 rv2p_code++;
3403
3404 if (rv2p_proc == RV2P_PROC1) {
3405 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3406 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3407 }
3408 else {
3409 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3410 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3411 }
3412 }
3413
3414 /* Reset the processor, un-stall is done later. */
3415 if (rv2p_proc == RV2P_PROC1) {
3416 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3417 }
3418 else {
3419 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3420 }
3421 }
3422
3423 static int
3424 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3425 {
3426 u32 offset;
3427 u32 val;
3428 int rc;
3429
3430 /* Halt the CPU. */
3431 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3432 val |= cpu_reg->mode_value_halt;
3433 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3434 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3435
3436 /* Load the Text area. */
3437 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3438 if (fw->gz_text) {
3439 int j;
3440
3441 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3442 fw->gz_text_len);
3443 if (rc < 0)
3444 return rc;
3445
3446 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3447 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3448 }
3449 }
3450
3451 /* Load the Data area. */
3452 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3453 if (fw->data) {
3454 int j;
3455
3456 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3457 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3458 }
3459 }
3460
3461 /* Load the SBSS area. */
3462 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3463 if (fw->sbss_len) {
3464 int j;
3465
3466 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3467 bnx2_reg_wr_ind(bp, offset, 0);
3468 }
3469 }
3470
3471 /* Load the BSS area. */
3472 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3473 if (fw->bss_len) {
3474 int j;
3475
3476 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3477 bnx2_reg_wr_ind(bp, offset, 0);
3478 }
3479 }
3480
3481 /* Load the Read-Only area. */
3482 offset = cpu_reg->spad_base +
3483 (fw->rodata_addr - cpu_reg->mips_view_base);
3484 if (fw->rodata) {
3485 int j;
3486
3487 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3488 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3489 }
3490 }
3491
3492 /* Clear the pre-fetch instruction. */
3493 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3494 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3495
3496 /* Start the CPU. */
3497 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3498 val &= ~cpu_reg->mode_value_halt;
3499 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3500 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3501
3502 return 0;
3503 }
3504
3505 static int
3506 bnx2_init_cpus(struct bnx2 *bp)
3507 {
3508 struct fw_info *fw;
3509 int rc, rv2p_len;
3510 void *text, *rv2p;
3511
3512 /* Initialize the RV2P processor. */
3513 text = vmalloc(FW_BUF_SIZE);
3514 if (!text)
3515 return -ENOMEM;
3516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3517 rv2p = bnx2_xi_rv2p_proc1;
3518 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3519 } else {
3520 rv2p = bnx2_rv2p_proc1;
3521 rv2p_len = sizeof(bnx2_rv2p_proc1);
3522 }
3523 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3524 if (rc < 0)
3525 goto init_cpu_err;
3526
3527 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3528
3529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530 rv2p = bnx2_xi_rv2p_proc2;
3531 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3532 } else {
3533 rv2p = bnx2_rv2p_proc2;
3534 rv2p_len = sizeof(bnx2_rv2p_proc2);
3535 }
3536 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3537 if (rc < 0)
3538 goto init_cpu_err;
3539
3540 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3541
3542 /* Initialize the RX Processor. */
3543 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3544 fw = &bnx2_rxp_fw_09;
3545 else
3546 fw = &bnx2_rxp_fw_06;
3547
3548 fw->text = text;
3549 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3550 if (rc)
3551 goto init_cpu_err;
3552
3553 /* Initialize the TX Processor. */
3554 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3555 fw = &bnx2_txp_fw_09;
3556 else
3557 fw = &bnx2_txp_fw_06;
3558
3559 fw->text = text;
3560 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3561 if (rc)
3562 goto init_cpu_err;
3563
3564 /* Initialize the TX Patch-up Processor. */
3565 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3566 fw = &bnx2_tpat_fw_09;
3567 else
3568 fw = &bnx2_tpat_fw_06;
3569
3570 fw->text = text;
3571 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3572 if (rc)
3573 goto init_cpu_err;
3574
3575 /* Initialize the Completion Processor. */
3576 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3577 fw = &bnx2_com_fw_09;
3578 else
3579 fw = &bnx2_com_fw_06;
3580
3581 fw->text = text;
3582 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3583 if (rc)
3584 goto init_cpu_err;
3585
3586 /* Initialize the Command Processor. */
3587 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3588 fw = &bnx2_cp_fw_09;
3589 else
3590 fw = &bnx2_cp_fw_06;
3591
3592 fw->text = text;
3593 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3594
3595 init_cpu_err:
3596 vfree(text);
3597 return rc;
3598 }
3599
3600 static int
3601 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3602 {
3603 u16 pmcsr;
3604
3605 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3606
3607 switch (state) {
3608 case PCI_D0: {
3609 u32 val;
3610
3611 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3612 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3613 PCI_PM_CTRL_PME_STATUS);
3614
3615 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3616 /* delay required during transition out of D3hot */
3617 msleep(20);
3618
3619 val = REG_RD(bp, BNX2_EMAC_MODE);
3620 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3621 val &= ~BNX2_EMAC_MODE_MPKT;
3622 REG_WR(bp, BNX2_EMAC_MODE, val);
3623
3624 val = REG_RD(bp, BNX2_RPM_CONFIG);
3625 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3626 REG_WR(bp, BNX2_RPM_CONFIG, val);
3627 break;
3628 }
3629 case PCI_D3hot: {
3630 int i;
3631 u32 val, wol_msg;
3632
3633 if (bp->wol) {
3634 u32 advertising;
3635 u8 autoneg;
3636
3637 autoneg = bp->autoneg;
3638 advertising = bp->advertising;
3639
3640 if (bp->phy_port == PORT_TP) {
3641 bp->autoneg = AUTONEG_SPEED;
3642 bp->advertising = ADVERTISED_10baseT_Half |
3643 ADVERTISED_10baseT_Full |
3644 ADVERTISED_100baseT_Half |
3645 ADVERTISED_100baseT_Full |
3646 ADVERTISED_Autoneg;
3647 }
3648
3649 spin_lock_bh(&bp->phy_lock);
3650 bnx2_setup_phy(bp, bp->phy_port);
3651 spin_unlock_bh(&bp->phy_lock);
3652
3653 bp->autoneg = autoneg;
3654 bp->advertising = advertising;
3655
3656 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3657
3658 val = REG_RD(bp, BNX2_EMAC_MODE);
3659
3660 /* Enable port mode. */
3661 val &= ~BNX2_EMAC_MODE_PORT;
3662 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3663 BNX2_EMAC_MODE_ACPI_RCVD |
3664 BNX2_EMAC_MODE_MPKT;
3665 if (bp->phy_port == PORT_TP)
3666 val |= BNX2_EMAC_MODE_PORT_MII;
3667 else {
3668 val |= BNX2_EMAC_MODE_PORT_GMII;
3669 if (bp->line_speed == SPEED_2500)
3670 val |= BNX2_EMAC_MODE_25G_MODE;
3671 }
3672
3673 REG_WR(bp, BNX2_EMAC_MODE, val);
3674
3675 /* receive all multicast */
3676 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3677 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3678 0xffffffff);
3679 }
3680 REG_WR(bp, BNX2_EMAC_RX_MODE,
3681 BNX2_EMAC_RX_MODE_SORT_MODE);
3682
3683 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3684 BNX2_RPM_SORT_USER0_MC_EN;
3685 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3686 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3687 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3688 BNX2_RPM_SORT_USER0_ENA);
3689
3690 /* Need to enable EMAC and RPM for WOL. */
3691 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3692 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3693 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3694 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3695
3696 val = REG_RD(bp, BNX2_RPM_CONFIG);
3697 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3698 REG_WR(bp, BNX2_RPM_CONFIG, val);
3699
3700 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3701 }
3702 else {
3703 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3704 }
3705
3706 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3707 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3708 1, 0);
3709
3710 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3711 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3712 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3713
3714 if (bp->wol)
3715 pmcsr |= 3;
3716 }
3717 else {
3718 pmcsr |= 3;
3719 }
3720 if (bp->wol) {
3721 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3722 }
3723 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3724 pmcsr);
3725
3726 /* No more memory access after this point until
3727 * device is brought back to D0.
3728 */
3729 udelay(50);
3730 break;
3731 }
3732 default:
3733 return -EINVAL;
3734 }
3735 return 0;
3736 }
3737
3738 static int
3739 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3740 {
3741 u32 val;
3742 int j;
3743
3744 /* Request access to the flash interface. */
3745 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3746 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3747 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3748 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3749 break;
3750
3751 udelay(5);
3752 }
3753
3754 if (j >= NVRAM_TIMEOUT_COUNT)
3755 return -EBUSY;
3756
3757 return 0;
3758 }
3759
3760 static int
3761 bnx2_release_nvram_lock(struct bnx2 *bp)
3762 {
3763 int j;
3764 u32 val;
3765
3766 /* Relinquish nvram interface. */
3767 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3768
3769 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3770 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3771 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3772 break;
3773
3774 udelay(5);
3775 }
3776
3777 if (j >= NVRAM_TIMEOUT_COUNT)
3778 return -EBUSY;
3779
3780 return 0;
3781 }
3782
3783
3784 static int
3785 bnx2_enable_nvram_write(struct bnx2 *bp)
3786 {
3787 u32 val;
3788
3789 val = REG_RD(bp, BNX2_MISC_CFG);
3790 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3791
3792 if (bp->flash_info->flags & BNX2_NV_WREN) {
3793 int j;
3794
3795 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3796 REG_WR(bp, BNX2_NVM_COMMAND,
3797 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3798
3799 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3800 udelay(5);
3801
3802 val = REG_RD(bp, BNX2_NVM_COMMAND);
3803 if (val & BNX2_NVM_COMMAND_DONE)
3804 break;
3805 }
3806
3807 if (j >= NVRAM_TIMEOUT_COUNT)
3808 return -EBUSY;
3809 }
3810 return 0;
3811 }
3812
3813 static void
3814 bnx2_disable_nvram_write(struct bnx2 *bp)
3815 {
3816 u32 val;
3817
3818 val = REG_RD(bp, BNX2_MISC_CFG);
3819 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3820 }
3821
3822
3823 static void
3824 bnx2_enable_nvram_access(struct bnx2 *bp)
3825 {
3826 u32 val;
3827
3828 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3829 /* Enable both bits, even on read. */
3830 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3831 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3832 }
3833
3834 static void
3835 bnx2_disable_nvram_access(struct bnx2 *bp)
3836 {
3837 u32 val;
3838
3839 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3840 /* Disable both bits, even after read. */
3841 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3842 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3843 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3844 }
3845
3846 static int
3847 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3848 {
3849 u32 cmd;
3850 int j;
3851
3852 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3853 /* Buffered flash, no erase needed */
3854 return 0;
3855
3856 /* Build an erase command */
3857 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3858 BNX2_NVM_COMMAND_DOIT;
3859
3860 /* Need to clear DONE bit separately. */
3861 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3862
3863 /* Address of the NVRAM to read from. */
3864 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3865
3866 /* Issue an erase command. */
3867 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3868
3869 /* Wait for completion. */
3870 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3871 u32 val;
3872
3873 udelay(5);
3874
3875 val = REG_RD(bp, BNX2_NVM_COMMAND);
3876 if (val & BNX2_NVM_COMMAND_DONE)
3877 break;
3878 }
3879
3880 if (j >= NVRAM_TIMEOUT_COUNT)
3881 return -EBUSY;
3882
3883 return 0;
3884 }
3885
3886 static int
3887 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3888 {
3889 u32 cmd;
3890 int j;
3891
3892 /* Build the command word. */
3893 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3894
3895 /* Calculate an offset of a buffered flash, not needed for 5709. */
3896 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3897 offset = ((offset / bp->flash_info->page_size) <<
3898 bp->flash_info->page_bits) +
3899 (offset % bp->flash_info->page_size);
3900 }
3901
3902 /* Need to clear DONE bit separately. */
3903 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3904
3905 /* Address of the NVRAM to read from. */
3906 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3907
3908 /* Issue a read command. */
3909 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3910
3911 /* Wait for completion. */
3912 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3913 u32 val;
3914
3915 udelay(5);
3916
3917 val = REG_RD(bp, BNX2_NVM_COMMAND);
3918 if (val & BNX2_NVM_COMMAND_DONE) {
3919 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3920 memcpy(ret_val, &v, 4);
3921 break;
3922 }
3923 }
3924 if (j >= NVRAM_TIMEOUT_COUNT)
3925 return -EBUSY;
3926
3927 return 0;
3928 }
3929
3930
3931 static int
3932 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3933 {
3934 u32 cmd;
3935 __be32 val32;
3936 int j;
3937
3938 /* Build the command word. */
3939 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3940
3941 /* Calculate an offset of a buffered flash, not needed for 5709. */
3942 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3943 offset = ((offset / bp->flash_info->page_size) <<
3944 bp->flash_info->page_bits) +
3945 (offset % bp->flash_info->page_size);
3946 }
3947
3948 /* Need to clear DONE bit separately. */
3949 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3950
3951 memcpy(&val32, val, 4);
3952
3953 /* Write the data. */
3954 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3955
3956 /* Address of the NVRAM to write to. */
3957 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3958
3959 /* Issue the write command. */
3960 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3961
3962 /* Wait for completion. */
3963 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3964 udelay(5);
3965
3966 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3967 break;
3968 }
3969 if (j >= NVRAM_TIMEOUT_COUNT)
3970 return -EBUSY;
3971
3972 return 0;
3973 }
3974
3975 static int
3976 bnx2_init_nvram(struct bnx2 *bp)
3977 {
3978 u32 val;
3979 int j, entry_count, rc = 0;
3980 struct flash_spec *flash;
3981
3982 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3983 bp->flash_info = &flash_5709;
3984 goto get_flash_size;
3985 }
3986
3987 /* Determine the selected interface. */
3988 val = REG_RD(bp, BNX2_NVM_CFG1);
3989
3990 entry_count = ARRAY_SIZE(flash_table);
3991
3992 if (val & 0x40000000) {
3993
3994 /* Flash interface has been reconfigured */
3995 for (j = 0, flash = &flash_table[0]; j < entry_count;
3996 j++, flash++) {
3997 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3998 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3999 bp->flash_info = flash;
4000 break;
4001 }
4002 }
4003 }
4004 else {
4005 u32 mask;
4006 /* Not yet been reconfigured */
4007
4008 if (val & (1 << 23))
4009 mask = FLASH_BACKUP_STRAP_MASK;
4010 else
4011 mask = FLASH_STRAP_MASK;
4012
4013 for (j = 0, flash = &flash_table[0]; j < entry_count;
4014 j++, flash++) {
4015
4016 if ((val & mask) == (flash->strapping & mask)) {
4017 bp->flash_info = flash;
4018
4019 /* Request access to the flash interface. */
4020 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4021 return rc;
4022
4023 /* Enable access to flash interface */
4024 bnx2_enable_nvram_access(bp);
4025
4026 /* Reconfigure the flash interface */
4027 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4028 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4029 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4030 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4031
4032 /* Disable access to flash interface */
4033 bnx2_disable_nvram_access(bp);
4034 bnx2_release_nvram_lock(bp);
4035
4036 break;
4037 }
4038 }
4039 } /* if (val & 0x40000000) */
4040
4041 if (j == entry_count) {
4042 bp->flash_info = NULL;
4043 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4044 return -ENODEV;
4045 }
4046
4047 get_flash_size:
4048 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4049 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4050 if (val)
4051 bp->flash_size = val;
4052 else
4053 bp->flash_size = bp->flash_info->total_size;
4054
4055 return rc;
4056 }
4057
4058 static int
4059 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4060 int buf_size)
4061 {
4062 int rc = 0;
4063 u32 cmd_flags, offset32, len32, extra;
4064
4065 if (buf_size == 0)
4066 return 0;
4067
4068 /* Request access to the flash interface. */
4069 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4070 return rc;
4071
4072 /* Enable access to flash interface */
4073 bnx2_enable_nvram_access(bp);
4074
4075 len32 = buf_size;
4076 offset32 = offset;
4077 extra = 0;
4078
4079 cmd_flags = 0;
4080
4081 if (offset32 & 3) {
4082 u8 buf[4];
4083 u32 pre_len;
4084
4085 offset32 &= ~3;
4086 pre_len = 4 - (offset & 3);
4087
4088 if (pre_len >= len32) {
4089 pre_len = len32;
4090 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4091 BNX2_NVM_COMMAND_LAST;
4092 }
4093 else {
4094 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4095 }
4096
4097 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4098
4099 if (rc)
4100 return rc;
4101
4102 memcpy(ret_buf, buf + (offset & 3), pre_len);
4103
4104 offset32 += 4;
4105 ret_buf += pre_len;
4106 len32 -= pre_len;
4107 }
4108 if (len32 & 3) {
4109 extra = 4 - (len32 & 3);
4110 len32 = (len32 + 4) & ~3;
4111 }
4112
4113 if (len32 == 4) {
4114 u8 buf[4];
4115
4116 if (cmd_flags)
4117 cmd_flags = BNX2_NVM_COMMAND_LAST;
4118 else
4119 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4120 BNX2_NVM_COMMAND_LAST;
4121
4122 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4123
4124 memcpy(ret_buf, buf, 4 - extra);
4125 }
4126 else if (len32 > 0) {
4127 u8 buf[4];
4128
4129 /* Read the first word. */
4130 if (cmd_flags)
4131 cmd_flags = 0;
4132 else
4133 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4134
4135 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4136
4137 /* Advance to the next dword. */
4138 offset32 += 4;
4139 ret_buf += 4;
4140 len32 -= 4;
4141
4142 while (len32 > 4 && rc == 0) {
4143 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4144
4145 /* Advance to the next dword. */
4146 offset32 += 4;
4147 ret_buf += 4;
4148 len32 -= 4;
4149 }
4150
4151 if (rc)
4152 return rc;
4153
4154 cmd_flags = BNX2_NVM_COMMAND_LAST;
4155 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4156
4157 memcpy(ret_buf, buf, 4 - extra);
4158 }
4159
4160 /* Disable access to flash interface */
4161 bnx2_disable_nvram_access(bp);
4162
4163 bnx2_release_nvram_lock(bp);
4164
4165 return rc;
4166 }
4167
4168 static int
4169 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4170 int buf_size)
4171 {
4172 u32 written, offset32, len32;
4173 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4174 int rc = 0;
4175 int align_start, align_end;
4176
4177 buf = data_buf;
4178 offset32 = offset;
4179 len32 = buf_size;
4180 align_start = align_end = 0;
4181
4182 if ((align_start = (offset32 & 3))) {
4183 offset32 &= ~3;
4184 len32 += align_start;
4185 if (len32 < 4)
4186 len32 = 4;
4187 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4188 return rc;
4189 }
4190
4191 if (len32 & 3) {
4192 align_end = 4 - (len32 & 3);
4193 len32 += align_end;
4194 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4195 return rc;
4196 }
4197
4198 if (align_start || align_end) {
4199 align_buf = kmalloc(len32, GFP_KERNEL);
4200 if (align_buf == NULL)
4201 return -ENOMEM;
4202 if (align_start) {
4203 memcpy(align_buf, start, 4);
4204 }
4205 if (align_end) {
4206 memcpy(align_buf + len32 - 4, end, 4);
4207 }
4208 memcpy(align_buf + align_start, data_buf, buf_size);
4209 buf = align_buf;
4210 }
4211
4212 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4213 flash_buffer = kmalloc(264, GFP_KERNEL);
4214 if (flash_buffer == NULL) {
4215 rc = -ENOMEM;
4216 goto nvram_write_end;
4217 }
4218 }
4219
4220 written = 0;
4221 while ((written < len32) && (rc == 0)) {
4222 u32 page_start, page_end, data_start, data_end;
4223 u32 addr, cmd_flags;
4224 int i;
4225
4226 /* Find the page_start addr */
4227 page_start = offset32 + written;
4228 page_start -= (page_start % bp->flash_info->page_size);
4229 /* Find the page_end addr */
4230 page_end = page_start + bp->flash_info->page_size;
4231 /* Find the data_start addr */
4232 data_start = (written == 0) ? offset32 : page_start;
4233 /* Find the data_end addr */
4234 data_end = (page_end > offset32 + len32) ?
4235 (offset32 + len32) : page_end;
4236
4237 /* Request access to the flash interface. */
4238 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4239 goto nvram_write_end;
4240
4241 /* Enable access to flash interface */
4242 bnx2_enable_nvram_access(bp);
4243
4244 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4245 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4246 int j;
4247
4248 /* Read the whole page into the buffer
4249 * (non-buffer flash only) */
4250 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4251 if (j == (bp->flash_info->page_size - 4)) {
4252 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4253 }
4254 rc = bnx2_nvram_read_dword(bp,
4255 page_start + j,
4256 &flash_buffer[j],
4257 cmd_flags);
4258
4259 if (rc)
4260 goto nvram_write_end;
4261
4262 cmd_flags = 0;
4263 }
4264 }
4265
4266 /* Enable writes to flash interface (unlock write-protect) */
4267 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4268 goto nvram_write_end;
4269
4270 /* Loop to write back the buffer data from page_start to
4271 * data_start */
4272 i = 0;
4273 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4274 /* Erase the page */
4275 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4276 goto nvram_write_end;
4277
4278 /* Re-enable the write again for the actual write */
4279 bnx2_enable_nvram_write(bp);
4280
4281 for (addr = page_start; addr < data_start;
4282 addr += 4, i += 4) {
4283
4284 rc = bnx2_nvram_write_dword(bp, addr,
4285 &flash_buffer[i], cmd_flags);
4286
4287 if (rc != 0)
4288 goto nvram_write_end;
4289
4290 cmd_flags = 0;
4291 }
4292 }
4293
4294 /* Loop to write the new data from data_start to data_end */
4295 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4296 if ((addr == page_end - 4) ||
4297 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4298 (addr == data_end - 4))) {
4299
4300 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4301 }
4302 rc = bnx2_nvram_write_dword(bp, addr, buf,
4303 cmd_flags);
4304
4305 if (rc != 0)
4306 goto nvram_write_end;
4307
4308 cmd_flags = 0;
4309 buf += 4;
4310 }
4311
4312 /* Loop to write back the buffer data from data_end
4313 * to page_end */
4314 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4315 for (addr = data_end; addr < page_end;
4316 addr += 4, i += 4) {
4317
4318 if (addr == page_end-4) {
4319 cmd_flags = BNX2_NVM_COMMAND_LAST;
4320 }
4321 rc = bnx2_nvram_write_dword(bp, addr,
4322 &flash_buffer[i], cmd_flags);
4323
4324 if (rc != 0)
4325 goto nvram_write_end;
4326
4327 cmd_flags = 0;
4328 }
4329 }
4330
4331 /* Disable writes to flash interface (lock write-protect) */
4332 bnx2_disable_nvram_write(bp);
4333
4334 /* Disable access to flash interface */
4335 bnx2_disable_nvram_access(bp);
4336 bnx2_release_nvram_lock(bp);
4337
4338 /* Increment written */
4339 written += data_end - data_start;
4340 }
4341
4342 nvram_write_end:
4343 kfree(flash_buffer);
4344 kfree(align_buf);
4345 return rc;
4346 }
4347
4348 static void
4349 bnx2_init_fw_cap(struct bnx2 *bp)
4350 {
4351 u32 val, sig = 0;
4352
4353 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4354 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4355
4356 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4357 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4358
4359 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4360 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4361 return;
4362
4363 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4364 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4365 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4366 }
4367
4368 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4369 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4370 u32 link;
4371
4372 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4373
4374 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4375 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4376 bp->phy_port = PORT_FIBRE;
4377 else
4378 bp->phy_port = PORT_TP;
4379
4380 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4381 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4382 }
4383
4384 if (netif_running(bp->dev) && sig)
4385 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4386 }
4387
4388 static void
4389 bnx2_setup_msix_tbl(struct bnx2 *bp)
4390 {
4391 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4392
4393 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4394 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4395 }
4396
4397 static int
4398 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4399 {
4400 u32 val;
4401 int i, rc = 0;
4402 u8 old_port;
4403
4404 /* Wait for the current PCI transaction to complete before
4405 * issuing a reset. */
4406 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4407 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4408 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4409 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4410 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4411 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4412 udelay(5);
4413
4414 /* Wait for the firmware to tell us it is ok to issue a reset. */
4415 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4416
4417 /* Deposit a driver reset signature so the firmware knows that
4418 * this is a soft reset. */
4419 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4420 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4421
4422 /* Do a dummy read to force the chip to complete all current transaction
4423 * before we issue a reset. */
4424 val = REG_RD(bp, BNX2_MISC_ID);
4425
4426 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4427 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4428 REG_RD(bp, BNX2_MISC_COMMAND);
4429 udelay(5);
4430
4431 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4432 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4433
4434 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4435
4436 } else {
4437 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4438 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4439 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4440
4441 /* Chip reset. */
4442 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4443
4444 /* Reading back any register after chip reset will hang the
4445 * bus on 5706 A0 and A1. The msleep below provides plenty
4446 * of margin for write posting.
4447 */
4448 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4449 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4450 msleep(20);
4451
4452 /* Reset takes approximate 30 usec */
4453 for (i = 0; i < 10; i++) {
4454 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4455 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4456 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4457 break;
4458 udelay(10);
4459 }
4460
4461 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4462 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4463 printk(KERN_ERR PFX "Chip reset did not complete\n");
4464 return -EBUSY;
4465 }
4466 }
4467
4468 /* Make sure byte swapping is properly configured. */
4469 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4470 if (val != 0x01020304) {
4471 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4472 return -ENODEV;
4473 }
4474
4475 /* Wait for the firmware to finish its initialization. */
4476 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4477 if (rc)
4478 return rc;
4479
4480 spin_lock_bh(&bp->phy_lock);
4481 old_port = bp->phy_port;
4482 bnx2_init_fw_cap(bp);
4483 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4484 old_port != bp->phy_port)
4485 bnx2_set_default_remote_link(bp);
4486 spin_unlock_bh(&bp->phy_lock);
4487
4488 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4489 /* Adjust the voltage regular to two steps lower. The default
4490 * of this register is 0x0000000e. */
4491 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4492
4493 /* Remove bad rbuf memory from the free pool. */
4494 rc = bnx2_alloc_bad_rbuf(bp);
4495 }
4496
4497 if (bp->flags & BNX2_FLAG_USING_MSIX)
4498 bnx2_setup_msix_tbl(bp);
4499
4500 return rc;
4501 }
4502
4503 static int
4504 bnx2_init_chip(struct bnx2 *bp)
4505 {
4506 u32 val, mtu;
4507 int rc, i;
4508
4509 /* Make sure the interrupt is not active. */
4510 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4511
4512 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4513 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4514 #ifdef __BIG_ENDIAN
4515 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4516 #endif
4517 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4518 DMA_READ_CHANS << 12 |
4519 DMA_WRITE_CHANS << 16;
4520
4521 val |= (0x2 << 20) | (1 << 11);
4522
4523 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4524 val |= (1 << 23);
4525
4526 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4527 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4528 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4529
4530 REG_WR(bp, BNX2_DMA_CONFIG, val);
4531
4532 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4533 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4534 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4535 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4536 }
4537
4538 if (bp->flags & BNX2_FLAG_PCIX) {
4539 u16 val16;
4540
4541 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4542 &val16);
4543 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4544 val16 & ~PCI_X_CMD_ERO);
4545 }
4546
4547 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4548 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4549 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4550 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4551
4552 /* Initialize context mapping and zero out the quick contexts. The
4553 * context block must have already been enabled. */
4554 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4555 rc = bnx2_init_5709_context(bp);
4556 if (rc)
4557 return rc;
4558 } else
4559 bnx2_init_context(bp);
4560
4561 if ((rc = bnx2_init_cpus(bp)) != 0)
4562 return rc;
4563
4564 bnx2_init_nvram(bp);
4565
4566 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4567
4568 val = REG_RD(bp, BNX2_MQ_CONFIG);
4569 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4570 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4571 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4572 val |= BNX2_MQ_CONFIG_HALT_DIS;
4573
4574 REG_WR(bp, BNX2_MQ_CONFIG, val);
4575
4576 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4577 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4578 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4579
4580 val = (BCM_PAGE_BITS - 8) << 24;
4581 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4582
4583 /* Configure page size. */
4584 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4585 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4586 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4587 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4588
4589 val = bp->mac_addr[0] +
4590 (bp->mac_addr[1] << 8) +
4591 (bp->mac_addr[2] << 16) +
4592 bp->mac_addr[3] +
4593 (bp->mac_addr[4] << 8) +
4594 (bp->mac_addr[5] << 16);
4595 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4596
4597 /* Program the MTU. Also include 4 bytes for CRC32. */
4598 mtu = bp->dev->mtu;
4599 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4600 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4601 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4602 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4603
4604 if (mtu < 1500)
4605 mtu = 1500;
4606
4607 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4608 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4609 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4610
4611 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4612 bp->bnx2_napi[i].last_status_idx = 0;
4613
4614 bp->idle_chk_status_idx = 0xffff;
4615
4616 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4617
4618 /* Set up how to generate a link change interrupt. */
4619 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4620
4621 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4622 (u64) bp->status_blk_mapping & 0xffffffff);
4623 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4624
4625 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4626 (u64) bp->stats_blk_mapping & 0xffffffff);
4627 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4628 (u64) bp->stats_blk_mapping >> 32);
4629
4630 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4631 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4632
4633 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4634 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4635
4636 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4637 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4638
4639 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4640
4641 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4642
4643 REG_WR(bp, BNX2_HC_COM_TICKS,
4644 (bp->com_ticks_int << 16) | bp->com_ticks);
4645
4646 REG_WR(bp, BNX2_HC_CMD_TICKS,
4647 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4648
4649 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4650 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4651 else
4652 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4653 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4654
4655 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4656 val = BNX2_HC_CONFIG_COLLECT_STATS;
4657 else {
4658 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4659 BNX2_HC_CONFIG_COLLECT_STATS;
4660 }
4661
4662 if (bp->irq_nvecs > 1) {
4663 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4664 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4665
4666 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4667 }
4668
4669 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4670 val |= BNX2_HC_CONFIG_ONE_SHOT;
4671
4672 REG_WR(bp, BNX2_HC_CONFIG, val);
4673
4674 for (i = 1; i < bp->irq_nvecs; i++) {
4675 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4676 BNX2_HC_SB_CONFIG_1;
4677
4678 REG_WR(bp, base,
4679 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4680 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4681 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4682
4683 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4684 (bp->tx_quick_cons_trip_int << 16) |
4685 bp->tx_quick_cons_trip);
4686
4687 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4688 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4689
4690 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4691 (bp->rx_quick_cons_trip_int << 16) |
4692 bp->rx_quick_cons_trip);
4693
4694 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4695 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4696 }
4697
4698 /* Clear internal stats counters. */
4699 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4700
4701 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4702
4703 /* Initialize the receive filter. */
4704 bnx2_set_rx_mode(bp->dev);
4705
4706 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4707 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4708 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4709 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4710 }
4711 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4712 1, 0);
4713
4714 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4715 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4716
4717 udelay(20);
4718
4719 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4720
4721 return rc;
4722 }
4723
4724 static void
4725 bnx2_clear_ring_states(struct bnx2 *bp)
4726 {
4727 struct bnx2_napi *bnapi;
4728 struct bnx2_tx_ring_info *txr;
4729 struct bnx2_rx_ring_info *rxr;
4730 int i;
4731
4732 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4733 bnapi = &bp->bnx2_napi[i];
4734 txr = &bnapi->tx_ring;
4735 rxr = &bnapi->rx_ring;
4736
4737 txr->tx_cons = 0;
4738 txr->hw_tx_cons = 0;
4739 rxr->rx_prod_bseq = 0;
4740 rxr->rx_prod = 0;
4741 rxr->rx_cons = 0;
4742 rxr->rx_pg_prod = 0;
4743 rxr->rx_pg_cons = 0;
4744 }
4745 }
4746
4747 static void
4748 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4749 {
4750 u32 val, offset0, offset1, offset2, offset3;
4751 u32 cid_addr = GET_CID_ADDR(cid);
4752
4753 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4754 offset0 = BNX2_L2CTX_TYPE_XI;
4755 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4756 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4757 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4758 } else {
4759 offset0 = BNX2_L2CTX_TYPE;
4760 offset1 = BNX2_L2CTX_CMD_TYPE;
4761 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4762 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4763 }
4764 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4765 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4766
4767 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4768 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4769
4770 val = (u64) txr->tx_desc_mapping >> 32;
4771 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4772
4773 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4774 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4775 }
4776
4777 static void
4778 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4779 {
4780 struct tx_bd *txbd;
4781 u32 cid = TX_CID;
4782 struct bnx2_napi *bnapi;
4783 struct bnx2_tx_ring_info *txr;
4784
4785 bnapi = &bp->bnx2_napi[ring_num];
4786 txr = &bnapi->tx_ring;
4787
4788 if (ring_num == 0)
4789 cid = TX_CID;
4790 else
4791 cid = TX_TSS_CID + ring_num - 1;
4792
4793 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4794
4795 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4796
4797 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4798 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4799
4800 txr->tx_prod = 0;
4801 txr->tx_prod_bseq = 0;
4802
4803 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4804 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4805
4806 bnx2_init_tx_context(bp, cid, txr);
4807 }
4808
4809 static void
4810 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4811 int num_rings)
4812 {
4813 int i;
4814 struct rx_bd *rxbd;
4815
4816 for (i = 0; i < num_rings; i++) {
4817 int j;
4818
4819 rxbd = &rx_ring[i][0];
4820 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4821 rxbd->rx_bd_len = buf_size;
4822 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4823 }
4824 if (i == (num_rings - 1))
4825 j = 0;
4826 else
4827 j = i + 1;
4828 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4829 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4830 }
4831 }
4832
4833 static void
4834 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4835 {
4836 int i;
4837 u16 prod, ring_prod;
4838 u32 cid, rx_cid_addr, val;
4839 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4840 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4841
4842 if (ring_num == 0)
4843 cid = RX_CID;
4844 else
4845 cid = RX_RSS_CID + ring_num - 1;
4846
4847 rx_cid_addr = GET_CID_ADDR(cid);
4848
4849 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4850 bp->rx_buf_use_size, bp->rx_max_ring);
4851
4852 bnx2_init_rx_context(bp, cid);
4853
4854 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4855 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4856 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4857 }
4858
4859 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4860 if (bp->rx_pg_ring_size) {
4861 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4862 rxr->rx_pg_desc_mapping,
4863 PAGE_SIZE, bp->rx_max_pg_ring);
4864 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4865 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4866 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4867 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4868
4869 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4870 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4871
4872 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4873 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4874
4875 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4876 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4877 }
4878
4879 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4880 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4881
4882 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4883 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4884
4885 ring_prod = prod = rxr->rx_pg_prod;
4886 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4887 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4888 break;
4889 prod = NEXT_RX_BD(prod);
4890 ring_prod = RX_PG_RING_IDX(prod);
4891 }
4892 rxr->rx_pg_prod = prod;
4893
4894 ring_prod = prod = rxr->rx_prod;
4895 for (i = 0; i < bp->rx_ring_size; i++) {
4896 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4897 break;
4898 prod = NEXT_RX_BD(prod);
4899 ring_prod = RX_RING_IDX(prod);
4900 }
4901 rxr->rx_prod = prod;
4902
4903 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4904 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4905 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4906
4907 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4908 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4909
4910 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4911 }
4912
4913 static void
4914 bnx2_init_all_rings(struct bnx2 *bp)
4915 {
4916 int i;
4917 u32 val;
4918
4919 bnx2_clear_ring_states(bp);
4920
4921 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4922 for (i = 0; i < bp->num_tx_rings; i++)
4923 bnx2_init_tx_ring(bp, i);
4924
4925 if (bp->num_tx_rings > 1)
4926 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4927 (TX_TSS_CID << 7));
4928
4929 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4930 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4931
4932 for (i = 0; i < bp->num_rx_rings; i++)
4933 bnx2_init_rx_ring(bp, i);
4934
4935 if (bp->num_rx_rings > 1) {
4936 u32 tbl_32;
4937 u8 *tbl = (u8 *) &tbl_32;
4938
4939 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4940 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4941
4942 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4943 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4944 if ((i % 4) == 3)
4945 bnx2_reg_wr_ind(bp,
4946 BNX2_RXP_SCRATCH_RSS_TBL + i,
4947 cpu_to_be32(tbl_32));
4948 }
4949
4950 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4951 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4952
4953 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4954
4955 }
4956 }
4957
4958 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4959 {
4960 u32 max, num_rings = 1;
4961
4962 while (ring_size > MAX_RX_DESC_CNT) {
4963 ring_size -= MAX_RX_DESC_CNT;
4964 num_rings++;
4965 }
4966 /* round to next power of 2 */
4967 max = max_size;
4968 while ((max & num_rings) == 0)
4969 max >>= 1;
4970
4971 if (num_rings != max)
4972 max <<= 1;
4973
4974 return max;
4975 }
4976
4977 static void
4978 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4979 {
4980 u32 rx_size, rx_space, jumbo_size;
4981
4982 /* 8 for CRC and VLAN */
4983 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4984
4985 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4986 sizeof(struct skb_shared_info);
4987
4988 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4989 bp->rx_pg_ring_size = 0;
4990 bp->rx_max_pg_ring = 0;
4991 bp->rx_max_pg_ring_idx = 0;
4992 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4993 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4994
4995 jumbo_size = size * pages;
4996 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4997 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4998
4999 bp->rx_pg_ring_size = jumbo_size;
5000 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5001 MAX_RX_PG_RINGS);
5002 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5003 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5004 bp->rx_copy_thresh = 0;
5005 }
5006
5007 bp->rx_buf_use_size = rx_size;
5008 /* hw alignment */
5009 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5010 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5011 bp->rx_ring_size = size;
5012 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5013 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5014 }
5015
5016 static void
5017 bnx2_free_tx_skbs(struct bnx2 *bp)
5018 {
5019 int i;
5020
5021 for (i = 0; i < bp->num_tx_rings; i++) {
5022 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5023 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5024 int j;
5025
5026 if (txr->tx_buf_ring == NULL)
5027 continue;
5028
5029 for (j = 0; j < TX_DESC_CNT; ) {
5030 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5031 struct sk_buff *skb = tx_buf->skb;
5032
5033 if (skb == NULL) {
5034 j++;
5035 continue;
5036 }
5037
5038 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5039
5040 tx_buf->skb = NULL;
5041
5042 j += skb_shinfo(skb)->nr_frags + 1;
5043 dev_kfree_skb(skb);
5044 }
5045 }
5046 }
5047
5048 static void
5049 bnx2_free_rx_skbs(struct bnx2 *bp)
5050 {
5051 int i;
5052
5053 for (i = 0; i < bp->num_rx_rings; i++) {
5054 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5055 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5056 int j;
5057
5058 if (rxr->rx_buf_ring == NULL)
5059 return;
5060
5061 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5062 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5063 struct sk_buff *skb = rx_buf->skb;
5064
5065 if (skb == NULL)
5066 continue;
5067
5068 pci_unmap_single(bp->pdev,
5069 pci_unmap_addr(rx_buf, mapping),
5070 bp->rx_buf_use_size,
5071 PCI_DMA_FROMDEVICE);
5072
5073 rx_buf->skb = NULL;
5074
5075 dev_kfree_skb(skb);
5076 }
5077 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5078 bnx2_free_rx_page(bp, rxr, j);
5079 }
5080 }
5081
5082 static void
5083 bnx2_free_skbs(struct bnx2 *bp)
5084 {
5085 bnx2_free_tx_skbs(bp);
5086 bnx2_free_rx_skbs(bp);
5087 }
5088
5089 static int
5090 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5091 {
5092 int rc;
5093
5094 rc = bnx2_reset_chip(bp, reset_code);
5095 bnx2_free_skbs(bp);
5096 if (rc)
5097 return rc;
5098
5099 if ((rc = bnx2_init_chip(bp)) != 0)
5100 return rc;
5101
5102 bnx2_init_all_rings(bp);
5103 return 0;
5104 }
5105
5106 static int
5107 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5108 {
5109 int rc;
5110
5111 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5112 return rc;
5113
5114 spin_lock_bh(&bp->phy_lock);
5115 bnx2_init_phy(bp, reset_phy);
5116 bnx2_set_link(bp);
5117 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5118 bnx2_remote_phy_event(bp);
5119 spin_unlock_bh(&bp->phy_lock);
5120 return 0;
5121 }
5122
5123 static int
5124 bnx2_shutdown_chip(struct bnx2 *bp)
5125 {
5126 u32 reset_code;
5127
5128 if (bp->flags & BNX2_FLAG_NO_WOL)
5129 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5130 else if (bp->wol)
5131 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5132 else
5133 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5134
5135 return bnx2_reset_chip(bp, reset_code);
5136 }
5137
5138 static int
5139 bnx2_test_registers(struct bnx2 *bp)
5140 {
5141 int ret;
5142 int i, is_5709;
5143 static const struct {
5144 u16 offset;
5145 u16 flags;
5146 #define BNX2_FL_NOT_5709 1
5147 u32 rw_mask;
5148 u32 ro_mask;
5149 } reg_tbl[] = {
5150 { 0x006c, 0, 0x00000000, 0x0000003f },
5151 { 0x0090, 0, 0xffffffff, 0x00000000 },
5152 { 0x0094, 0, 0x00000000, 0x00000000 },
5153
5154 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5155 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5156 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5157 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5158 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5159 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5160 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5161 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5162 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5163
5164 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5165 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5166 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5167 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5168 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5169 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5170
5171 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5172 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5173 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5174
5175 { 0x1000, 0, 0x00000000, 0x00000001 },
5176 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5177
5178 { 0x1408, 0, 0x01c00800, 0x00000000 },
5179 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5180 { 0x14a8, 0, 0x00000000, 0x000001ff },
5181 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5182 { 0x14b0, 0, 0x00000002, 0x00000001 },
5183 { 0x14b8, 0, 0x00000000, 0x00000000 },
5184 { 0x14c0, 0, 0x00000000, 0x00000009 },
5185 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5186 { 0x14cc, 0, 0x00000000, 0x00000001 },
5187 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5188
5189 { 0x1800, 0, 0x00000000, 0x00000001 },
5190 { 0x1804, 0, 0x00000000, 0x00000003 },
5191
5192 { 0x2800, 0, 0x00000000, 0x00000001 },
5193 { 0x2804, 0, 0x00000000, 0x00003f01 },
5194 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5195 { 0x2810, 0, 0xffff0000, 0x00000000 },
5196 { 0x2814, 0, 0xffff0000, 0x00000000 },
5197 { 0x2818, 0, 0xffff0000, 0x00000000 },
5198 { 0x281c, 0, 0xffff0000, 0x00000000 },
5199 { 0x2834, 0, 0xffffffff, 0x00000000 },
5200 { 0x2840, 0, 0x00000000, 0xffffffff },
5201 { 0x2844, 0, 0x00000000, 0xffffffff },
5202 { 0x2848, 0, 0xffffffff, 0x00000000 },
5203 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5204
5205 { 0x2c00, 0, 0x00000000, 0x00000011 },
5206 { 0x2c04, 0, 0x00000000, 0x00030007 },
5207
5208 { 0x3c00, 0, 0x00000000, 0x00000001 },
5209 { 0x3c04, 0, 0x00000000, 0x00070000 },
5210 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5211 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5212 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5213 { 0x3c14, 0, 0x00000000, 0xffffffff },
5214 { 0x3c18, 0, 0x00000000, 0xffffffff },
5215 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5216 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5217
5218 { 0x5004, 0, 0x00000000, 0x0000007f },
5219 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5220
5221 { 0x5c00, 0, 0x00000000, 0x00000001 },
5222 { 0x5c04, 0, 0x00000000, 0x0003000f },
5223 { 0x5c08, 0, 0x00000003, 0x00000000 },
5224 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5225 { 0x5c10, 0, 0x00000000, 0xffffffff },
5226 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5227 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5228 { 0x5c88, 0, 0x00000000, 0x00077373 },
5229 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5230
5231 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5232 { 0x680c, 0, 0xffffffff, 0x00000000 },
5233 { 0x6810, 0, 0xffffffff, 0x00000000 },
5234 { 0x6814, 0, 0xffffffff, 0x00000000 },
5235 { 0x6818, 0, 0xffffffff, 0x00000000 },
5236 { 0x681c, 0, 0xffffffff, 0x00000000 },
5237 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5238 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5239 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5240 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5241 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5242 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5243 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5244 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5245 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5246 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5247 { 0x684c, 0, 0xffffffff, 0x00000000 },
5248 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5249 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5250 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5251 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5252 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5253 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5254
5255 { 0xffff, 0, 0x00000000, 0x00000000 },
5256 };
5257
5258 ret = 0;
5259 is_5709 = 0;
5260 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5261 is_5709 = 1;
5262
5263 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5264 u32 offset, rw_mask, ro_mask, save_val, val;
5265 u16 flags = reg_tbl[i].flags;
5266
5267 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5268 continue;
5269
5270 offset = (u32) reg_tbl[i].offset;
5271 rw_mask = reg_tbl[i].rw_mask;
5272 ro_mask = reg_tbl[i].ro_mask;
5273
5274 save_val = readl(bp->regview + offset);
5275
5276 writel(0, bp->regview + offset);
5277
5278 val = readl(bp->regview + offset);
5279 if ((val & rw_mask) != 0) {
5280 goto reg_test_err;
5281 }
5282
5283 if ((val & ro_mask) != (save_val & ro_mask)) {
5284 goto reg_test_err;
5285 }
5286
5287 writel(0xffffffff, bp->regview + offset);
5288
5289 val = readl(bp->regview + offset);
5290 if ((val & rw_mask) != rw_mask) {
5291 goto reg_test_err;
5292 }
5293
5294 if ((val & ro_mask) != (save_val & ro_mask)) {
5295 goto reg_test_err;
5296 }
5297
5298 writel(save_val, bp->regview + offset);
5299 continue;
5300
5301 reg_test_err:
5302 writel(save_val, bp->regview + offset);
5303 ret = -ENODEV;
5304 break;
5305 }
5306 return ret;
5307 }
5308
5309 static int
5310 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5311 {
5312 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5313 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5314 int i;
5315
5316 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5317 u32 offset;
5318
5319 for (offset = 0; offset < size; offset += 4) {
5320
5321 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5322
5323 if (bnx2_reg_rd_ind(bp, start + offset) !=
5324 test_pattern[i]) {
5325 return -ENODEV;
5326 }
5327 }
5328 }
5329 return 0;
5330 }
5331
5332 static int
5333 bnx2_test_memory(struct bnx2 *bp)
5334 {
5335 int ret = 0;
5336 int i;
5337 static struct mem_entry {
5338 u32 offset;
5339 u32 len;
5340 } mem_tbl_5706[] = {
5341 { 0x60000, 0x4000 },
5342 { 0xa0000, 0x3000 },
5343 { 0xe0000, 0x4000 },
5344 { 0x120000, 0x4000 },
5345 { 0x1a0000, 0x4000 },
5346 { 0x160000, 0x4000 },
5347 { 0xffffffff, 0 },
5348 },
5349 mem_tbl_5709[] = {
5350 { 0x60000, 0x4000 },
5351 { 0xa0000, 0x3000 },
5352 { 0xe0000, 0x4000 },
5353 { 0x120000, 0x4000 },
5354 { 0x1a0000, 0x4000 },
5355 { 0xffffffff, 0 },
5356 };
5357 struct mem_entry *mem_tbl;
5358
5359 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5360 mem_tbl = mem_tbl_5709;
5361 else
5362 mem_tbl = mem_tbl_5706;
5363
5364 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5365 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5366 mem_tbl[i].len)) != 0) {
5367 return ret;
5368 }
5369 }
5370
5371 return ret;
5372 }
5373
5374 #define BNX2_MAC_LOOPBACK 0
5375 #define BNX2_PHY_LOOPBACK 1
5376
5377 static int
5378 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5379 {
5380 unsigned int pkt_size, num_pkts, i;
5381 struct sk_buff *skb, *rx_skb;
5382 unsigned char *packet;
5383 u16 rx_start_idx, rx_idx;
5384 dma_addr_t map;
5385 struct tx_bd *txbd;
5386 struct sw_bd *rx_buf;
5387 struct l2_fhdr *rx_hdr;
5388 int ret = -ENODEV;
5389 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5390 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5391 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5392
5393 tx_napi = bnapi;
5394
5395 txr = &tx_napi->tx_ring;
5396 rxr = &bnapi->rx_ring;
5397 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5398 bp->loopback = MAC_LOOPBACK;
5399 bnx2_set_mac_loopback(bp);
5400 }
5401 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5402 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5403 return 0;
5404
5405 bp->loopback = PHY_LOOPBACK;
5406 bnx2_set_phy_loopback(bp);
5407 }
5408 else
5409 return -EINVAL;
5410
5411 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5412 skb = netdev_alloc_skb(bp->dev, pkt_size);
5413 if (!skb)
5414 return -ENOMEM;
5415 packet = skb_put(skb, pkt_size);
5416 memcpy(packet, bp->dev->dev_addr, 6);
5417 memset(packet + 6, 0x0, 8);
5418 for (i = 14; i < pkt_size; i++)
5419 packet[i] = (unsigned char) (i & 0xff);
5420
5421 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5422 dev_kfree_skb(skb);
5423 return -EIO;
5424 }
5425 map = skb_shinfo(skb)->dma_maps[0];
5426
5427 REG_WR(bp, BNX2_HC_COMMAND,
5428 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5429
5430 REG_RD(bp, BNX2_HC_COMMAND);
5431
5432 udelay(5);
5433 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5434
5435 num_pkts = 0;
5436
5437 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5438
5439 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5440 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5441 txbd->tx_bd_mss_nbytes = pkt_size;
5442 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5443
5444 num_pkts++;
5445 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5446 txr->tx_prod_bseq += pkt_size;
5447
5448 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5449 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5450
5451 udelay(100);
5452
5453 REG_WR(bp, BNX2_HC_COMMAND,
5454 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5455
5456 REG_RD(bp, BNX2_HC_COMMAND);
5457
5458 udelay(5);
5459
5460 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5461 dev_kfree_skb(skb);
5462
5463 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5464 goto loopback_test_done;
5465
5466 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5467 if (rx_idx != rx_start_idx + num_pkts) {
5468 goto loopback_test_done;
5469 }
5470
5471 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5472 rx_skb = rx_buf->skb;
5473
5474 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5475 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5476
5477 pci_dma_sync_single_for_cpu(bp->pdev,
5478 pci_unmap_addr(rx_buf, mapping),
5479 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5480
5481 if (rx_hdr->l2_fhdr_status &
5482 (L2_FHDR_ERRORS_BAD_CRC |
5483 L2_FHDR_ERRORS_PHY_DECODE |
5484 L2_FHDR_ERRORS_ALIGNMENT |
5485 L2_FHDR_ERRORS_TOO_SHORT |
5486 L2_FHDR_ERRORS_GIANT_FRAME)) {
5487
5488 goto loopback_test_done;
5489 }
5490
5491 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5492 goto loopback_test_done;
5493 }
5494
5495 for (i = 14; i < pkt_size; i++) {
5496 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5497 goto loopback_test_done;
5498 }
5499 }
5500
5501 ret = 0;
5502
5503 loopback_test_done:
5504 bp->loopback = 0;
5505 return ret;
5506 }
5507
5508 #define BNX2_MAC_LOOPBACK_FAILED 1
5509 #define BNX2_PHY_LOOPBACK_FAILED 2
5510 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5511 BNX2_PHY_LOOPBACK_FAILED)
5512
5513 static int
5514 bnx2_test_loopback(struct bnx2 *bp)
5515 {
5516 int rc = 0;
5517
5518 if (!netif_running(bp->dev))
5519 return BNX2_LOOPBACK_FAILED;
5520
5521 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5522 spin_lock_bh(&bp->phy_lock);
5523 bnx2_init_phy(bp, 1);
5524 spin_unlock_bh(&bp->phy_lock);
5525 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5526 rc |= BNX2_MAC_LOOPBACK_FAILED;
5527 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5528 rc |= BNX2_PHY_LOOPBACK_FAILED;
5529 return rc;
5530 }
5531
5532 #define NVRAM_SIZE 0x200
5533 #define CRC32_RESIDUAL 0xdebb20e3
5534
5535 static int
5536 bnx2_test_nvram(struct bnx2 *bp)
5537 {
5538 __be32 buf[NVRAM_SIZE / 4];
5539 u8 *data = (u8 *) buf;
5540 int rc = 0;
5541 u32 magic, csum;
5542
5543 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5544 goto test_nvram_done;
5545
5546 magic = be32_to_cpu(buf[0]);
5547 if (magic != 0x669955aa) {
5548 rc = -ENODEV;
5549 goto test_nvram_done;
5550 }
5551
5552 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5553 goto test_nvram_done;
5554
5555 csum = ether_crc_le(0x100, data);
5556 if (csum != CRC32_RESIDUAL) {
5557 rc = -ENODEV;
5558 goto test_nvram_done;
5559 }
5560
5561 csum = ether_crc_le(0x100, data + 0x100);
5562 if (csum != CRC32_RESIDUAL) {
5563 rc = -ENODEV;
5564 }
5565
5566 test_nvram_done:
5567 return rc;
5568 }
5569
5570 static int
5571 bnx2_test_link(struct bnx2 *bp)
5572 {
5573 u32 bmsr;
5574
5575 if (!netif_running(bp->dev))
5576 return -ENODEV;
5577
5578 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5579 if (bp->link_up)
5580 return 0;
5581 return -ENODEV;
5582 }
5583 spin_lock_bh(&bp->phy_lock);
5584 bnx2_enable_bmsr1(bp);
5585 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5586 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5587 bnx2_disable_bmsr1(bp);
5588 spin_unlock_bh(&bp->phy_lock);
5589
5590 if (bmsr & BMSR_LSTATUS) {
5591 return 0;
5592 }
5593 return -ENODEV;
5594 }
5595
5596 static int
5597 bnx2_test_intr(struct bnx2 *bp)
5598 {
5599 int i;
5600 u16 status_idx;
5601
5602 if (!netif_running(bp->dev))
5603 return -ENODEV;
5604
5605 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5606
5607 /* This register is not touched during run-time. */
5608 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5609 REG_RD(bp, BNX2_HC_COMMAND);
5610
5611 for (i = 0; i < 10; i++) {
5612 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5613 status_idx) {
5614
5615 break;
5616 }
5617
5618 msleep_interruptible(10);
5619 }
5620 if (i < 10)
5621 return 0;
5622
5623 return -ENODEV;
5624 }
5625
5626 /* Determining link for parallel detection. */
5627 static int
5628 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5629 {
5630 u32 mode_ctl, an_dbg, exp;
5631
5632 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5633 return 0;
5634
5635 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5636 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5637
5638 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5639 return 0;
5640
5641 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5642 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5643 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5644
5645 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5646 return 0;
5647
5648 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5649 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5650 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5651
5652 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5653 return 0;
5654
5655 return 1;
5656 }
5657
5658 static void
5659 bnx2_5706_serdes_timer(struct bnx2 *bp)
5660 {
5661 int check_link = 1;
5662
5663 spin_lock(&bp->phy_lock);
5664 if (bp->serdes_an_pending) {
5665 bp->serdes_an_pending--;
5666 check_link = 0;
5667 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5668 u32 bmcr;
5669
5670 bp->current_interval = BNX2_TIMER_INTERVAL;
5671
5672 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5673
5674 if (bmcr & BMCR_ANENABLE) {
5675 if (bnx2_5706_serdes_has_link(bp)) {
5676 bmcr &= ~BMCR_ANENABLE;
5677 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5678 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5679 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5680 }
5681 }
5682 }
5683 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5684 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5685 u32 phy2;
5686
5687 bnx2_write_phy(bp, 0x17, 0x0f01);
5688 bnx2_read_phy(bp, 0x15, &phy2);
5689 if (phy2 & 0x20) {
5690 u32 bmcr;
5691
5692 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5693 bmcr |= BMCR_ANENABLE;
5694 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5695
5696 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5697 }
5698 } else
5699 bp->current_interval = BNX2_TIMER_INTERVAL;
5700
5701 if (check_link) {
5702 u32 val;
5703
5704 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5705 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5706 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5707
5708 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5709 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5710 bnx2_5706s_force_link_dn(bp, 1);
5711 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5712 } else
5713 bnx2_set_link(bp);
5714 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5715 bnx2_set_link(bp);
5716 }
5717 spin_unlock(&bp->phy_lock);
5718 }
5719
5720 static void
5721 bnx2_5708_serdes_timer(struct bnx2 *bp)
5722 {
5723 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5724 return;
5725
5726 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5727 bp->serdes_an_pending = 0;
5728 return;
5729 }
5730
5731 spin_lock(&bp->phy_lock);
5732 if (bp->serdes_an_pending)
5733 bp->serdes_an_pending--;
5734 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5735 u32 bmcr;
5736
5737 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5738 if (bmcr & BMCR_ANENABLE) {
5739 bnx2_enable_forced_2g5(bp);
5740 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5741 } else {
5742 bnx2_disable_forced_2g5(bp);
5743 bp->serdes_an_pending = 2;
5744 bp->current_interval = BNX2_TIMER_INTERVAL;
5745 }
5746
5747 } else
5748 bp->current_interval = BNX2_TIMER_INTERVAL;
5749
5750 spin_unlock(&bp->phy_lock);
5751 }
5752
5753 static void
5754 bnx2_timer(unsigned long data)
5755 {
5756 struct bnx2 *bp = (struct bnx2 *) data;
5757
5758 if (!netif_running(bp->dev))
5759 return;
5760
5761 if (atomic_read(&bp->intr_sem) != 0)
5762 goto bnx2_restart_timer;
5763
5764 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5765 BNX2_FLAG_USING_MSI)
5766 bnx2_chk_missed_msi(bp);
5767
5768 bnx2_send_heart_beat(bp);
5769
5770 bp->stats_blk->stat_FwRxDrop =
5771 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5772
5773 /* workaround occasional corrupted counters */
5774 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5775 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5776 BNX2_HC_COMMAND_STATS_NOW);
5777
5778 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5779 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5780 bnx2_5706_serdes_timer(bp);
5781 else
5782 bnx2_5708_serdes_timer(bp);
5783 }
5784
5785 bnx2_restart_timer:
5786 mod_timer(&bp->timer, jiffies + bp->current_interval);
5787 }
5788
5789 static int
5790 bnx2_request_irq(struct bnx2 *bp)
5791 {
5792 unsigned long flags;
5793 struct bnx2_irq *irq;
5794 int rc = 0, i;
5795
5796 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5797 flags = 0;
5798 else
5799 flags = IRQF_SHARED;
5800
5801 for (i = 0; i < bp->irq_nvecs; i++) {
5802 irq = &bp->irq_tbl[i];
5803 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5804 &bp->bnx2_napi[i]);
5805 if (rc)
5806 break;
5807 irq->requested = 1;
5808 }
5809 return rc;
5810 }
5811
5812 static void
5813 bnx2_free_irq(struct bnx2 *bp)
5814 {
5815 struct bnx2_irq *irq;
5816 int i;
5817
5818 for (i = 0; i < bp->irq_nvecs; i++) {
5819 irq = &bp->irq_tbl[i];
5820 if (irq->requested)
5821 free_irq(irq->vector, &bp->bnx2_napi[i]);
5822 irq->requested = 0;
5823 }
5824 if (bp->flags & BNX2_FLAG_USING_MSI)
5825 pci_disable_msi(bp->pdev);
5826 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5827 pci_disable_msix(bp->pdev);
5828
5829 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5830 }
5831
5832 static void
5833 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5834 {
5835 int i, rc;
5836 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5837 struct net_device *dev = bp->dev;
5838 const int len = sizeof(bp->irq_tbl[0].name);
5839
5840 bnx2_setup_msix_tbl(bp);
5841 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5842 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5843 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5844
5845 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5846 msix_ent[i].entry = i;
5847 msix_ent[i].vector = 0;
5848
5849 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5850 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5851 }
5852
5853 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5854 if (rc != 0)
5855 return;
5856
5857 bp->irq_nvecs = msix_vecs;
5858 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5859 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5860 bp->irq_tbl[i].vector = msix_ent[i].vector;
5861 }
5862
5863 static void
5864 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5865 {
5866 int cpus = num_online_cpus();
5867 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5868
5869 bp->irq_tbl[0].handler = bnx2_interrupt;
5870 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5871 bp->irq_nvecs = 1;
5872 bp->irq_tbl[0].vector = bp->pdev->irq;
5873
5874 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5875 bnx2_enable_msix(bp, msix_vecs);
5876
5877 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5878 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5879 if (pci_enable_msi(bp->pdev) == 0) {
5880 bp->flags |= BNX2_FLAG_USING_MSI;
5881 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5882 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5883 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5884 } else
5885 bp->irq_tbl[0].handler = bnx2_msi;
5886
5887 bp->irq_tbl[0].vector = bp->pdev->irq;
5888 }
5889 }
5890
5891 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5892 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5893
5894 bp->num_rx_rings = bp->irq_nvecs;
5895 }
5896
5897 /* Called with rtnl_lock */
5898 static int
5899 bnx2_open(struct net_device *dev)
5900 {
5901 struct bnx2 *bp = netdev_priv(dev);
5902 int rc;
5903
5904 netif_carrier_off(dev);
5905
5906 bnx2_set_power_state(bp, PCI_D0);
5907 bnx2_disable_int(bp);
5908
5909 bnx2_setup_int_mode(bp, disable_msi);
5910 bnx2_napi_enable(bp);
5911 rc = bnx2_alloc_mem(bp);
5912 if (rc)
5913 goto open_err;
5914
5915 rc = bnx2_request_irq(bp);
5916 if (rc)
5917 goto open_err;
5918
5919 rc = bnx2_init_nic(bp, 1);
5920 if (rc)
5921 goto open_err;
5922
5923 mod_timer(&bp->timer, jiffies + bp->current_interval);
5924
5925 atomic_set(&bp->intr_sem, 0);
5926
5927 bnx2_enable_int(bp);
5928
5929 if (bp->flags & BNX2_FLAG_USING_MSI) {
5930 /* Test MSI to make sure it is working
5931 * If MSI test fails, go back to INTx mode
5932 */
5933 if (bnx2_test_intr(bp) != 0) {
5934 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5935 " using MSI, switching to INTx mode. Please"
5936 " report this failure to the PCI maintainer"
5937 " and include system chipset information.\n",
5938 bp->dev->name);
5939
5940 bnx2_disable_int(bp);
5941 bnx2_free_irq(bp);
5942
5943 bnx2_setup_int_mode(bp, 1);
5944
5945 rc = bnx2_init_nic(bp, 0);
5946
5947 if (!rc)
5948 rc = bnx2_request_irq(bp);
5949
5950 if (rc) {
5951 del_timer_sync(&bp->timer);
5952 goto open_err;
5953 }
5954 bnx2_enable_int(bp);
5955 }
5956 }
5957 if (bp->flags & BNX2_FLAG_USING_MSI)
5958 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5959 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5960 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5961
5962 netif_tx_start_all_queues(dev);
5963
5964 return 0;
5965
5966 open_err:
5967 bnx2_napi_disable(bp);
5968 bnx2_free_skbs(bp);
5969 bnx2_free_irq(bp);
5970 bnx2_free_mem(bp);
5971 return rc;
5972 }
5973
5974 static void
5975 bnx2_reset_task(struct work_struct *work)
5976 {
5977 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5978
5979 if (!netif_running(bp->dev))
5980 return;
5981
5982 bnx2_netif_stop(bp);
5983
5984 bnx2_init_nic(bp, 1);
5985
5986 atomic_set(&bp->intr_sem, 1);
5987 bnx2_netif_start(bp);
5988 }
5989
5990 static void
5991 bnx2_tx_timeout(struct net_device *dev)
5992 {
5993 struct bnx2 *bp = netdev_priv(dev);
5994
5995 /* This allows the netif to be shutdown gracefully before resetting */
5996 schedule_work(&bp->reset_task);
5997 }
5998
5999 #ifdef BCM_VLAN
6000 /* Called with rtnl_lock */
6001 static void
6002 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6003 {
6004 struct bnx2 *bp = netdev_priv(dev);
6005
6006 bnx2_netif_stop(bp);
6007
6008 bp->vlgrp = vlgrp;
6009 bnx2_set_rx_mode(dev);
6010 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6011 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6012
6013 bnx2_netif_start(bp);
6014 }
6015 #endif
6016
6017 /* Called with netif_tx_lock.
6018 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6019 * netif_wake_queue().
6020 */
6021 static int
6022 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6023 {
6024 struct bnx2 *bp = netdev_priv(dev);
6025 dma_addr_t mapping;
6026 struct tx_bd *txbd;
6027 struct sw_tx_bd *tx_buf;
6028 u32 len, vlan_tag_flags, last_frag, mss;
6029 u16 prod, ring_prod;
6030 int i;
6031 struct bnx2_napi *bnapi;
6032 struct bnx2_tx_ring_info *txr;
6033 struct netdev_queue *txq;
6034 struct skb_shared_info *sp;
6035
6036 /* Determine which tx ring we will be placed on */
6037 i = skb_get_queue_mapping(skb);
6038 bnapi = &bp->bnx2_napi[i];
6039 txr = &bnapi->tx_ring;
6040 txq = netdev_get_tx_queue(dev, i);
6041
6042 if (unlikely(bnx2_tx_avail(bp, txr) <
6043 (skb_shinfo(skb)->nr_frags + 1))) {
6044 netif_tx_stop_queue(txq);
6045 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6046 dev->name);
6047
6048 return NETDEV_TX_BUSY;
6049 }
6050 len = skb_headlen(skb);
6051 prod = txr->tx_prod;
6052 ring_prod = TX_RING_IDX(prod);
6053
6054 vlan_tag_flags = 0;
6055 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6056 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6057 }
6058
6059 #ifdef BCM_VLAN
6060 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6061 vlan_tag_flags |=
6062 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6063 }
6064 #endif
6065 if ((mss = skb_shinfo(skb)->gso_size)) {
6066 u32 tcp_opt_len;
6067 struct iphdr *iph;
6068
6069 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6070
6071 tcp_opt_len = tcp_optlen(skb);
6072
6073 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6074 u32 tcp_off = skb_transport_offset(skb) -
6075 sizeof(struct ipv6hdr) - ETH_HLEN;
6076
6077 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6078 TX_BD_FLAGS_SW_FLAGS;
6079 if (likely(tcp_off == 0))
6080 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6081 else {
6082 tcp_off >>= 3;
6083 vlan_tag_flags |= ((tcp_off & 0x3) <<
6084 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6085 ((tcp_off & 0x10) <<
6086 TX_BD_FLAGS_TCP6_OFF4_SHL);
6087 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6088 }
6089 } else {
6090 iph = ip_hdr(skb);
6091 if (tcp_opt_len || (iph->ihl > 5)) {
6092 vlan_tag_flags |= ((iph->ihl - 5) +
6093 (tcp_opt_len >> 2)) << 8;
6094 }
6095 }
6096 } else
6097 mss = 0;
6098
6099 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6100 dev_kfree_skb(skb);
6101 return NETDEV_TX_OK;
6102 }
6103
6104 sp = skb_shinfo(skb);
6105 mapping = sp->dma_maps[0];
6106
6107 tx_buf = &txr->tx_buf_ring[ring_prod];
6108 tx_buf->skb = skb;
6109
6110 txbd = &txr->tx_desc_ring[ring_prod];
6111
6112 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6113 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6114 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6115 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6116
6117 last_frag = skb_shinfo(skb)->nr_frags;
6118
6119 for (i = 0; i < last_frag; i++) {
6120 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6121
6122 prod = NEXT_TX_BD(prod);
6123 ring_prod = TX_RING_IDX(prod);
6124 txbd = &txr->tx_desc_ring[ring_prod];
6125
6126 len = frag->size;
6127 mapping = sp->dma_maps[i + 1];
6128
6129 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6130 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6131 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6132 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6133
6134 }
6135 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6136
6137 prod = NEXT_TX_BD(prod);
6138 txr->tx_prod_bseq += skb->len;
6139
6140 REG_WR16(bp, txr->tx_bidx_addr, prod);
6141 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6142
6143 mmiowb();
6144
6145 txr->tx_prod = prod;
6146 dev->trans_start = jiffies;
6147
6148 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6149 netif_tx_stop_queue(txq);
6150 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6151 netif_tx_wake_queue(txq);
6152 }
6153
6154 return NETDEV_TX_OK;
6155 }
6156
6157 /* Called with rtnl_lock */
6158 static int
6159 bnx2_close(struct net_device *dev)
6160 {
6161 struct bnx2 *bp = netdev_priv(dev);
6162
6163 cancel_work_sync(&bp->reset_task);
6164
6165 bnx2_disable_int_sync(bp);
6166 bnx2_napi_disable(bp);
6167 del_timer_sync(&bp->timer);
6168 bnx2_shutdown_chip(bp);
6169 bnx2_free_irq(bp);
6170 bnx2_free_skbs(bp);
6171 bnx2_free_mem(bp);
6172 bp->link_up = 0;
6173 netif_carrier_off(bp->dev);
6174 bnx2_set_power_state(bp, PCI_D3hot);
6175 return 0;
6176 }
6177
6178 #define GET_NET_STATS64(ctr) \
6179 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6180 (unsigned long) (ctr##_lo)
6181
6182 #define GET_NET_STATS32(ctr) \
6183 (ctr##_lo)
6184
6185 #if (BITS_PER_LONG == 64)
6186 #define GET_NET_STATS GET_NET_STATS64
6187 #else
6188 #define GET_NET_STATS GET_NET_STATS32
6189 #endif
6190
6191 static struct net_device_stats *
6192 bnx2_get_stats(struct net_device *dev)
6193 {
6194 struct bnx2 *bp = netdev_priv(dev);
6195 struct statistics_block *stats_blk = bp->stats_blk;
6196 struct net_device_stats *net_stats = &dev->stats;
6197
6198 if (bp->stats_blk == NULL) {
6199 return net_stats;
6200 }
6201 net_stats->rx_packets =
6202 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6203 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6204 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6205
6206 net_stats->tx_packets =
6207 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6208 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6209 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6210
6211 net_stats->rx_bytes =
6212 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6213
6214 net_stats->tx_bytes =
6215 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6216
6217 net_stats->multicast =
6218 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6219
6220 net_stats->collisions =
6221 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6222
6223 net_stats->rx_length_errors =
6224 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6225 stats_blk->stat_EtherStatsOverrsizePkts);
6226
6227 net_stats->rx_over_errors =
6228 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6229
6230 net_stats->rx_frame_errors =
6231 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6232
6233 net_stats->rx_crc_errors =
6234 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6235
6236 net_stats->rx_errors = net_stats->rx_length_errors +
6237 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6238 net_stats->rx_crc_errors;
6239
6240 net_stats->tx_aborted_errors =
6241 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6242 stats_blk->stat_Dot3StatsLateCollisions);
6243
6244 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6245 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6246 net_stats->tx_carrier_errors = 0;
6247 else {
6248 net_stats->tx_carrier_errors =
6249 (unsigned long)
6250 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6251 }
6252
6253 net_stats->tx_errors =
6254 (unsigned long)
6255 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6256 +
6257 net_stats->tx_aborted_errors +
6258 net_stats->tx_carrier_errors;
6259
6260 net_stats->rx_missed_errors =
6261 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6262 stats_blk->stat_FwRxDrop);
6263
6264 return net_stats;
6265 }
6266
6267 /* All ethtool functions called with rtnl_lock */
6268
6269 static int
6270 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6271 {
6272 struct bnx2 *bp = netdev_priv(dev);
6273 int support_serdes = 0, support_copper = 0;
6274
6275 cmd->supported = SUPPORTED_Autoneg;
6276 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6277 support_serdes = 1;
6278 support_copper = 1;
6279 } else if (bp->phy_port == PORT_FIBRE)
6280 support_serdes = 1;
6281 else
6282 support_copper = 1;
6283
6284 if (support_serdes) {
6285 cmd->supported |= SUPPORTED_1000baseT_Full |
6286 SUPPORTED_FIBRE;
6287 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6288 cmd->supported |= SUPPORTED_2500baseX_Full;
6289
6290 }
6291 if (support_copper) {
6292 cmd->supported |= SUPPORTED_10baseT_Half |
6293 SUPPORTED_10baseT_Full |
6294 SUPPORTED_100baseT_Half |
6295 SUPPORTED_100baseT_Full |
6296 SUPPORTED_1000baseT_Full |
6297 SUPPORTED_TP;
6298
6299 }
6300
6301 spin_lock_bh(&bp->phy_lock);
6302 cmd->port = bp->phy_port;
6303 cmd->advertising = bp->advertising;
6304
6305 if (bp->autoneg & AUTONEG_SPEED) {
6306 cmd->autoneg = AUTONEG_ENABLE;
6307 }
6308 else {
6309 cmd->autoneg = AUTONEG_DISABLE;
6310 }
6311
6312 if (netif_carrier_ok(dev)) {
6313 cmd->speed = bp->line_speed;
6314 cmd->duplex = bp->duplex;
6315 }
6316 else {
6317 cmd->speed = -1;
6318 cmd->duplex = -1;
6319 }
6320 spin_unlock_bh(&bp->phy_lock);
6321
6322 cmd->transceiver = XCVR_INTERNAL;
6323 cmd->phy_address = bp->phy_addr;
6324
6325 return 0;
6326 }
6327
6328 static int
6329 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6330 {
6331 struct bnx2 *bp = netdev_priv(dev);
6332 u8 autoneg = bp->autoneg;
6333 u8 req_duplex = bp->req_duplex;
6334 u16 req_line_speed = bp->req_line_speed;
6335 u32 advertising = bp->advertising;
6336 int err = -EINVAL;
6337
6338 spin_lock_bh(&bp->phy_lock);
6339
6340 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6341 goto err_out_unlock;
6342
6343 if (cmd->port != bp->phy_port &&
6344 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6345 goto err_out_unlock;
6346
6347 /* If device is down, we can store the settings only if the user
6348 * is setting the currently active port.
6349 */
6350 if (!netif_running(dev) && cmd->port != bp->phy_port)
6351 goto err_out_unlock;
6352
6353 if (cmd->autoneg == AUTONEG_ENABLE) {
6354 autoneg |= AUTONEG_SPEED;
6355
6356 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6357
6358 /* allow advertising 1 speed */
6359 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6360 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6361 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6362 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6363
6364 if (cmd->port == PORT_FIBRE)
6365 goto err_out_unlock;
6366
6367 advertising = cmd->advertising;
6368
6369 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6370 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6371 (cmd->port == PORT_TP))
6372 goto err_out_unlock;
6373 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6374 advertising = cmd->advertising;
6375 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6376 goto err_out_unlock;
6377 else {
6378 if (cmd->port == PORT_FIBRE)
6379 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6380 else
6381 advertising = ETHTOOL_ALL_COPPER_SPEED;
6382 }
6383 advertising |= ADVERTISED_Autoneg;
6384 }
6385 else {
6386 if (cmd->port == PORT_FIBRE) {
6387 if ((cmd->speed != SPEED_1000 &&
6388 cmd->speed != SPEED_2500) ||
6389 (cmd->duplex != DUPLEX_FULL))
6390 goto err_out_unlock;
6391
6392 if (cmd->speed == SPEED_2500 &&
6393 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6394 goto err_out_unlock;
6395 }
6396 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6397 goto err_out_unlock;
6398
6399 autoneg &= ~AUTONEG_SPEED;
6400 req_line_speed = cmd->speed;
6401 req_duplex = cmd->duplex;
6402 advertising = 0;
6403 }
6404
6405 bp->autoneg = autoneg;
6406 bp->advertising = advertising;
6407 bp->req_line_speed = req_line_speed;
6408 bp->req_duplex = req_duplex;
6409
6410 err = 0;
6411 /* If device is down, the new settings will be picked up when it is
6412 * brought up.
6413 */
6414 if (netif_running(dev))
6415 err = bnx2_setup_phy(bp, cmd->port);
6416
6417 err_out_unlock:
6418 spin_unlock_bh(&bp->phy_lock);
6419
6420 return err;
6421 }
6422
6423 static void
6424 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6425 {
6426 struct bnx2 *bp = netdev_priv(dev);
6427
6428 strcpy(info->driver, DRV_MODULE_NAME);
6429 strcpy(info->version, DRV_MODULE_VERSION);
6430 strcpy(info->bus_info, pci_name(bp->pdev));
6431 strcpy(info->fw_version, bp->fw_version);
6432 }
6433
6434 #define BNX2_REGDUMP_LEN (32 * 1024)
6435
6436 static int
6437 bnx2_get_regs_len(struct net_device *dev)
6438 {
6439 return BNX2_REGDUMP_LEN;
6440 }
6441
6442 static void
6443 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6444 {
6445 u32 *p = _p, i, offset;
6446 u8 *orig_p = _p;
6447 struct bnx2 *bp = netdev_priv(dev);
6448 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6449 0x0800, 0x0880, 0x0c00, 0x0c10,
6450 0x0c30, 0x0d08, 0x1000, 0x101c,
6451 0x1040, 0x1048, 0x1080, 0x10a4,
6452 0x1400, 0x1490, 0x1498, 0x14f0,
6453 0x1500, 0x155c, 0x1580, 0x15dc,
6454 0x1600, 0x1658, 0x1680, 0x16d8,
6455 0x1800, 0x1820, 0x1840, 0x1854,
6456 0x1880, 0x1894, 0x1900, 0x1984,
6457 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6458 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6459 0x2000, 0x2030, 0x23c0, 0x2400,
6460 0x2800, 0x2820, 0x2830, 0x2850,
6461 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6462 0x3c00, 0x3c94, 0x4000, 0x4010,
6463 0x4080, 0x4090, 0x43c0, 0x4458,
6464 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6465 0x4fc0, 0x5010, 0x53c0, 0x5444,
6466 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6467 0x5fc0, 0x6000, 0x6400, 0x6428,
6468 0x6800, 0x6848, 0x684c, 0x6860,
6469 0x6888, 0x6910, 0x8000 };
6470
6471 regs->version = 0;
6472
6473 memset(p, 0, BNX2_REGDUMP_LEN);
6474
6475 if (!netif_running(bp->dev))
6476 return;
6477
6478 i = 0;
6479 offset = reg_boundaries[0];
6480 p += offset;
6481 while (offset < BNX2_REGDUMP_LEN) {
6482 *p++ = REG_RD(bp, offset);
6483 offset += 4;
6484 if (offset == reg_boundaries[i + 1]) {
6485 offset = reg_boundaries[i + 2];
6486 p = (u32 *) (orig_p + offset);
6487 i += 2;
6488 }
6489 }
6490 }
6491
6492 static void
6493 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6494 {
6495 struct bnx2 *bp = netdev_priv(dev);
6496
6497 if (bp->flags & BNX2_FLAG_NO_WOL) {
6498 wol->supported = 0;
6499 wol->wolopts = 0;
6500 }
6501 else {
6502 wol->supported = WAKE_MAGIC;
6503 if (bp->wol)
6504 wol->wolopts = WAKE_MAGIC;
6505 else
6506 wol->wolopts = 0;
6507 }
6508 memset(&wol->sopass, 0, sizeof(wol->sopass));
6509 }
6510
6511 static int
6512 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6513 {
6514 struct bnx2 *bp = netdev_priv(dev);
6515
6516 if (wol->wolopts & ~WAKE_MAGIC)
6517 return -EINVAL;
6518
6519 if (wol->wolopts & WAKE_MAGIC) {
6520 if (bp->flags & BNX2_FLAG_NO_WOL)
6521 return -EINVAL;
6522
6523 bp->wol = 1;
6524 }
6525 else {
6526 bp->wol = 0;
6527 }
6528 return 0;
6529 }
6530
6531 static int
6532 bnx2_nway_reset(struct net_device *dev)
6533 {
6534 struct bnx2 *bp = netdev_priv(dev);
6535 u32 bmcr;
6536
6537 if (!netif_running(dev))
6538 return -EAGAIN;
6539
6540 if (!(bp->autoneg & AUTONEG_SPEED)) {
6541 return -EINVAL;
6542 }
6543
6544 spin_lock_bh(&bp->phy_lock);
6545
6546 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6547 int rc;
6548
6549 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6550 spin_unlock_bh(&bp->phy_lock);
6551 return rc;
6552 }
6553
6554 /* Force a link down visible on the other side */
6555 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6556 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6557 spin_unlock_bh(&bp->phy_lock);
6558
6559 msleep(20);
6560
6561 spin_lock_bh(&bp->phy_lock);
6562
6563 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6564 bp->serdes_an_pending = 1;
6565 mod_timer(&bp->timer, jiffies + bp->current_interval);
6566 }
6567
6568 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6569 bmcr &= ~BMCR_LOOPBACK;
6570 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6571
6572 spin_unlock_bh(&bp->phy_lock);
6573
6574 return 0;
6575 }
6576
6577 static int
6578 bnx2_get_eeprom_len(struct net_device *dev)
6579 {
6580 struct bnx2 *bp = netdev_priv(dev);
6581
6582 if (bp->flash_info == NULL)
6583 return 0;
6584
6585 return (int) bp->flash_size;
6586 }
6587
6588 static int
6589 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6590 u8 *eebuf)
6591 {
6592 struct bnx2 *bp = netdev_priv(dev);
6593 int rc;
6594
6595 if (!netif_running(dev))
6596 return -EAGAIN;
6597
6598 /* parameters already validated in ethtool_get_eeprom */
6599
6600 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6601
6602 return rc;
6603 }
6604
6605 static int
6606 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6607 u8 *eebuf)
6608 {
6609 struct bnx2 *bp = netdev_priv(dev);
6610 int rc;
6611
6612 if (!netif_running(dev))
6613 return -EAGAIN;
6614
6615 /* parameters already validated in ethtool_set_eeprom */
6616
6617 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6618
6619 return rc;
6620 }
6621
6622 static int
6623 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6624 {
6625 struct bnx2 *bp = netdev_priv(dev);
6626
6627 memset(coal, 0, sizeof(struct ethtool_coalesce));
6628
6629 coal->rx_coalesce_usecs = bp->rx_ticks;
6630 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6631 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6632 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6633
6634 coal->tx_coalesce_usecs = bp->tx_ticks;
6635 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6636 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6637 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6638
6639 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6640
6641 return 0;
6642 }
6643
6644 static int
6645 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6646 {
6647 struct bnx2 *bp = netdev_priv(dev);
6648
6649 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6650 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6651
6652 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6653 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6654
6655 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6656 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6657
6658 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6659 if (bp->rx_quick_cons_trip_int > 0xff)
6660 bp->rx_quick_cons_trip_int = 0xff;
6661
6662 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6663 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6664
6665 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6666 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6667
6668 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6669 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6670
6671 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6672 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6673 0xff;
6674
6675 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6676 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6677 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6678 bp->stats_ticks = USEC_PER_SEC;
6679 }
6680 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6681 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6682 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6683
6684 if (netif_running(bp->dev)) {
6685 bnx2_netif_stop(bp);
6686 bnx2_init_nic(bp, 0);
6687 bnx2_netif_start(bp);
6688 }
6689
6690 return 0;
6691 }
6692
6693 static void
6694 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6695 {
6696 struct bnx2 *bp = netdev_priv(dev);
6697
6698 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6699 ering->rx_mini_max_pending = 0;
6700 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6701
6702 ering->rx_pending = bp->rx_ring_size;
6703 ering->rx_mini_pending = 0;
6704 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6705
6706 ering->tx_max_pending = MAX_TX_DESC_CNT;
6707 ering->tx_pending = bp->tx_ring_size;
6708 }
6709
6710 static int
6711 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6712 {
6713 if (netif_running(bp->dev)) {
6714 bnx2_netif_stop(bp);
6715 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6716 bnx2_free_skbs(bp);
6717 bnx2_free_mem(bp);
6718 }
6719
6720 bnx2_set_rx_ring_size(bp, rx);
6721 bp->tx_ring_size = tx;
6722
6723 if (netif_running(bp->dev)) {
6724 int rc;
6725
6726 rc = bnx2_alloc_mem(bp);
6727 if (rc)
6728 return rc;
6729 bnx2_init_nic(bp, 0);
6730 bnx2_netif_start(bp);
6731 }
6732 return 0;
6733 }
6734
6735 static int
6736 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6737 {
6738 struct bnx2 *bp = netdev_priv(dev);
6739 int rc;
6740
6741 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6742 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6743 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6744
6745 return -EINVAL;
6746 }
6747 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6748 return rc;
6749 }
6750
6751 static void
6752 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6753 {
6754 struct bnx2 *bp = netdev_priv(dev);
6755
6756 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6757 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6758 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6759 }
6760
6761 static int
6762 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6763 {
6764 struct bnx2 *bp = netdev_priv(dev);
6765
6766 bp->req_flow_ctrl = 0;
6767 if (epause->rx_pause)
6768 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6769 if (epause->tx_pause)
6770 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6771
6772 if (epause->autoneg) {
6773 bp->autoneg |= AUTONEG_FLOW_CTRL;
6774 }
6775 else {
6776 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6777 }
6778
6779 if (netif_running(dev)) {
6780 spin_lock_bh(&bp->phy_lock);
6781 bnx2_setup_phy(bp, bp->phy_port);
6782 spin_unlock_bh(&bp->phy_lock);
6783 }
6784
6785 return 0;
6786 }
6787
6788 static u32
6789 bnx2_get_rx_csum(struct net_device *dev)
6790 {
6791 struct bnx2 *bp = netdev_priv(dev);
6792
6793 return bp->rx_csum;
6794 }
6795
6796 static int
6797 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6798 {
6799 struct bnx2 *bp = netdev_priv(dev);
6800
6801 bp->rx_csum = data;
6802 return 0;
6803 }
6804
6805 static int
6806 bnx2_set_tso(struct net_device *dev, u32 data)
6807 {
6808 struct bnx2 *bp = netdev_priv(dev);
6809
6810 if (data) {
6811 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6812 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6813 dev->features |= NETIF_F_TSO6;
6814 } else
6815 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6816 NETIF_F_TSO_ECN);
6817 return 0;
6818 }
6819
6820 #define BNX2_NUM_STATS 46
6821
6822 static struct {
6823 char string[ETH_GSTRING_LEN];
6824 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6825 { "rx_bytes" },
6826 { "rx_error_bytes" },
6827 { "tx_bytes" },
6828 { "tx_error_bytes" },
6829 { "rx_ucast_packets" },
6830 { "rx_mcast_packets" },
6831 { "rx_bcast_packets" },
6832 { "tx_ucast_packets" },
6833 { "tx_mcast_packets" },
6834 { "tx_bcast_packets" },
6835 { "tx_mac_errors" },
6836 { "tx_carrier_errors" },
6837 { "rx_crc_errors" },
6838 { "rx_align_errors" },
6839 { "tx_single_collisions" },
6840 { "tx_multi_collisions" },
6841 { "tx_deferred" },
6842 { "tx_excess_collisions" },
6843 { "tx_late_collisions" },
6844 { "tx_total_collisions" },
6845 { "rx_fragments" },
6846 { "rx_jabbers" },
6847 { "rx_undersize_packets" },
6848 { "rx_oversize_packets" },
6849 { "rx_64_byte_packets" },
6850 { "rx_65_to_127_byte_packets" },
6851 { "rx_128_to_255_byte_packets" },
6852 { "rx_256_to_511_byte_packets" },
6853 { "rx_512_to_1023_byte_packets" },
6854 { "rx_1024_to_1522_byte_packets" },
6855 { "rx_1523_to_9022_byte_packets" },
6856 { "tx_64_byte_packets" },
6857 { "tx_65_to_127_byte_packets" },
6858 { "tx_128_to_255_byte_packets" },
6859 { "tx_256_to_511_byte_packets" },
6860 { "tx_512_to_1023_byte_packets" },
6861 { "tx_1024_to_1522_byte_packets" },
6862 { "tx_1523_to_9022_byte_packets" },
6863 { "rx_xon_frames" },
6864 { "rx_xoff_frames" },
6865 { "tx_xon_frames" },
6866 { "tx_xoff_frames" },
6867 { "rx_mac_ctrl_frames" },
6868 { "rx_filtered_packets" },
6869 { "rx_discards" },
6870 { "rx_fw_discards" },
6871 };
6872
6873 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6874
6875 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6876 STATS_OFFSET32(stat_IfHCInOctets_hi),
6877 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6878 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6879 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6880 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6881 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6882 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6883 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6884 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6885 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6886 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6887 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6888 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6889 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6890 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6891 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6892 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6893 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6894 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6895 STATS_OFFSET32(stat_EtherStatsCollisions),
6896 STATS_OFFSET32(stat_EtherStatsFragments),
6897 STATS_OFFSET32(stat_EtherStatsJabbers),
6898 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6899 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6900 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6901 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6902 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6903 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6904 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6905 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6906 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6907 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6908 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6909 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6910 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6911 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6912 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6913 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6914 STATS_OFFSET32(stat_XonPauseFramesReceived),
6915 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6916 STATS_OFFSET32(stat_OutXonSent),
6917 STATS_OFFSET32(stat_OutXoffSent),
6918 STATS_OFFSET32(stat_MacControlFramesReceived),
6919 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6920 STATS_OFFSET32(stat_IfInMBUFDiscards),
6921 STATS_OFFSET32(stat_FwRxDrop),
6922 };
6923
6924 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6925 * skipped because of errata.
6926 */
6927 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6928 8,0,8,8,8,8,8,8,8,8,
6929 4,0,4,4,4,4,4,4,4,4,
6930 4,4,4,4,4,4,4,4,4,4,
6931 4,4,4,4,4,4,4,4,4,4,
6932 4,4,4,4,4,4,
6933 };
6934
6935 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6936 8,0,8,8,8,8,8,8,8,8,
6937 4,4,4,4,4,4,4,4,4,4,
6938 4,4,4,4,4,4,4,4,4,4,
6939 4,4,4,4,4,4,4,4,4,4,
6940 4,4,4,4,4,4,
6941 };
6942
6943 #define BNX2_NUM_TESTS 6
6944
6945 static struct {
6946 char string[ETH_GSTRING_LEN];
6947 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6948 { "register_test (offline)" },
6949 { "memory_test (offline)" },
6950 { "loopback_test (offline)" },
6951 { "nvram_test (online)" },
6952 { "interrupt_test (online)" },
6953 { "link_test (online)" },
6954 };
6955
6956 static int
6957 bnx2_get_sset_count(struct net_device *dev, int sset)
6958 {
6959 switch (sset) {
6960 case ETH_SS_TEST:
6961 return BNX2_NUM_TESTS;
6962 case ETH_SS_STATS:
6963 return BNX2_NUM_STATS;
6964 default:
6965 return -EOPNOTSUPP;
6966 }
6967 }
6968
6969 static void
6970 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6971 {
6972 struct bnx2 *bp = netdev_priv(dev);
6973
6974 bnx2_set_power_state(bp, PCI_D0);
6975
6976 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6977 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6978 int i;
6979
6980 bnx2_netif_stop(bp);
6981 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6982 bnx2_free_skbs(bp);
6983
6984 if (bnx2_test_registers(bp) != 0) {
6985 buf[0] = 1;
6986 etest->flags |= ETH_TEST_FL_FAILED;
6987 }
6988 if (bnx2_test_memory(bp) != 0) {
6989 buf[1] = 1;
6990 etest->flags |= ETH_TEST_FL_FAILED;
6991 }
6992 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6993 etest->flags |= ETH_TEST_FL_FAILED;
6994
6995 if (!netif_running(bp->dev))
6996 bnx2_shutdown_chip(bp);
6997 else {
6998 bnx2_init_nic(bp, 1);
6999 bnx2_netif_start(bp);
7000 }
7001
7002 /* wait for link up */
7003 for (i = 0; i < 7; i++) {
7004 if (bp->link_up)
7005 break;
7006 msleep_interruptible(1000);
7007 }
7008 }
7009
7010 if (bnx2_test_nvram(bp) != 0) {
7011 buf[3] = 1;
7012 etest->flags |= ETH_TEST_FL_FAILED;
7013 }
7014 if (bnx2_test_intr(bp) != 0) {
7015 buf[4] = 1;
7016 etest->flags |= ETH_TEST_FL_FAILED;
7017 }
7018
7019 if (bnx2_test_link(bp) != 0) {
7020 buf[5] = 1;
7021 etest->flags |= ETH_TEST_FL_FAILED;
7022
7023 }
7024 if (!netif_running(bp->dev))
7025 bnx2_set_power_state(bp, PCI_D3hot);
7026 }
7027
7028 static void
7029 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7030 {
7031 switch (stringset) {
7032 case ETH_SS_STATS:
7033 memcpy(buf, bnx2_stats_str_arr,
7034 sizeof(bnx2_stats_str_arr));
7035 break;
7036 case ETH_SS_TEST:
7037 memcpy(buf, bnx2_tests_str_arr,
7038 sizeof(bnx2_tests_str_arr));
7039 break;
7040 }
7041 }
7042
7043 static void
7044 bnx2_get_ethtool_stats(struct net_device *dev,
7045 struct ethtool_stats *stats, u64 *buf)
7046 {
7047 struct bnx2 *bp = netdev_priv(dev);
7048 int i;
7049 u32 *hw_stats = (u32 *) bp->stats_blk;
7050 u8 *stats_len_arr = NULL;
7051
7052 if (hw_stats == NULL) {
7053 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7054 return;
7055 }
7056
7057 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7058 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7059 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7060 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7061 stats_len_arr = bnx2_5706_stats_len_arr;
7062 else
7063 stats_len_arr = bnx2_5708_stats_len_arr;
7064
7065 for (i = 0; i < BNX2_NUM_STATS; i++) {
7066 if (stats_len_arr[i] == 0) {
7067 /* skip this counter */
7068 buf[i] = 0;
7069 continue;
7070 }
7071 if (stats_len_arr[i] == 4) {
7072 /* 4-byte counter */
7073 buf[i] = (u64)
7074 *(hw_stats + bnx2_stats_offset_arr[i]);
7075 continue;
7076 }
7077 /* 8-byte counter */
7078 buf[i] = (((u64) *(hw_stats +
7079 bnx2_stats_offset_arr[i])) << 32) +
7080 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7081 }
7082 }
7083
7084 static int
7085 bnx2_phys_id(struct net_device *dev, u32 data)
7086 {
7087 struct bnx2 *bp = netdev_priv(dev);
7088 int i;
7089 u32 save;
7090
7091 bnx2_set_power_state(bp, PCI_D0);
7092
7093 if (data == 0)
7094 data = 2;
7095
7096 save = REG_RD(bp, BNX2_MISC_CFG);
7097 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7098
7099 for (i = 0; i < (data * 2); i++) {
7100 if ((i % 2) == 0) {
7101 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7102 }
7103 else {
7104 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7105 BNX2_EMAC_LED_1000MB_OVERRIDE |
7106 BNX2_EMAC_LED_100MB_OVERRIDE |
7107 BNX2_EMAC_LED_10MB_OVERRIDE |
7108 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7109 BNX2_EMAC_LED_TRAFFIC);
7110 }
7111 msleep_interruptible(500);
7112 if (signal_pending(current))
7113 break;
7114 }
7115 REG_WR(bp, BNX2_EMAC_LED, 0);
7116 REG_WR(bp, BNX2_MISC_CFG, save);
7117
7118 if (!netif_running(dev))
7119 bnx2_set_power_state(bp, PCI_D3hot);
7120
7121 return 0;
7122 }
7123
7124 static int
7125 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7126 {
7127 struct bnx2 *bp = netdev_priv(dev);
7128
7129 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7130 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7131 else
7132 return (ethtool_op_set_tx_csum(dev, data));
7133 }
7134
7135 static const struct ethtool_ops bnx2_ethtool_ops = {
7136 .get_settings = bnx2_get_settings,
7137 .set_settings = bnx2_set_settings,
7138 .get_drvinfo = bnx2_get_drvinfo,
7139 .get_regs_len = bnx2_get_regs_len,
7140 .get_regs = bnx2_get_regs,
7141 .get_wol = bnx2_get_wol,
7142 .set_wol = bnx2_set_wol,
7143 .nway_reset = bnx2_nway_reset,
7144 .get_link = ethtool_op_get_link,
7145 .get_eeprom_len = bnx2_get_eeprom_len,
7146 .get_eeprom = bnx2_get_eeprom,
7147 .set_eeprom = bnx2_set_eeprom,
7148 .get_coalesce = bnx2_get_coalesce,
7149 .set_coalesce = bnx2_set_coalesce,
7150 .get_ringparam = bnx2_get_ringparam,
7151 .set_ringparam = bnx2_set_ringparam,
7152 .get_pauseparam = bnx2_get_pauseparam,
7153 .set_pauseparam = bnx2_set_pauseparam,
7154 .get_rx_csum = bnx2_get_rx_csum,
7155 .set_rx_csum = bnx2_set_rx_csum,
7156 .set_tx_csum = bnx2_set_tx_csum,
7157 .set_sg = ethtool_op_set_sg,
7158 .set_tso = bnx2_set_tso,
7159 .self_test = bnx2_self_test,
7160 .get_strings = bnx2_get_strings,
7161 .phys_id = bnx2_phys_id,
7162 .get_ethtool_stats = bnx2_get_ethtool_stats,
7163 .get_sset_count = bnx2_get_sset_count,
7164 };
7165
7166 /* Called with rtnl_lock */
7167 static int
7168 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7169 {
7170 struct mii_ioctl_data *data = if_mii(ifr);
7171 struct bnx2 *bp = netdev_priv(dev);
7172 int err;
7173
7174 switch(cmd) {
7175 case SIOCGMIIPHY:
7176 data->phy_id = bp->phy_addr;
7177
7178 /* fallthru */
7179 case SIOCGMIIREG: {
7180 u32 mii_regval;
7181
7182 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7183 return -EOPNOTSUPP;
7184
7185 if (!netif_running(dev))
7186 return -EAGAIN;
7187
7188 spin_lock_bh(&bp->phy_lock);
7189 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7190 spin_unlock_bh(&bp->phy_lock);
7191
7192 data->val_out = mii_regval;
7193
7194 return err;
7195 }
7196
7197 case SIOCSMIIREG:
7198 if (!capable(CAP_NET_ADMIN))
7199 return -EPERM;
7200
7201 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7202 return -EOPNOTSUPP;
7203
7204 if (!netif_running(dev))
7205 return -EAGAIN;
7206
7207 spin_lock_bh(&bp->phy_lock);
7208 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7209 spin_unlock_bh(&bp->phy_lock);
7210
7211 return err;
7212
7213 default:
7214 /* do nothing */
7215 break;
7216 }
7217 return -EOPNOTSUPP;
7218 }
7219
7220 /* Called with rtnl_lock */
7221 static int
7222 bnx2_change_mac_addr(struct net_device *dev, void *p)
7223 {
7224 struct sockaddr *addr = p;
7225 struct bnx2 *bp = netdev_priv(dev);
7226
7227 if (!is_valid_ether_addr(addr->sa_data))
7228 return -EINVAL;
7229
7230 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7231 if (netif_running(dev))
7232 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7233
7234 return 0;
7235 }
7236
7237 /* Called with rtnl_lock */
7238 static int
7239 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7240 {
7241 struct bnx2 *bp = netdev_priv(dev);
7242
7243 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7244 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7245 return -EINVAL;
7246
7247 dev->mtu = new_mtu;
7248 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7249 }
7250
7251 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7252 static void
7253 poll_bnx2(struct net_device *dev)
7254 {
7255 struct bnx2 *bp = netdev_priv(dev);
7256 int i;
7257
7258 for (i = 0; i < bp->irq_nvecs; i++) {
7259 disable_irq(bp->irq_tbl[i].vector);
7260 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7261 enable_irq(bp->irq_tbl[i].vector);
7262 }
7263 }
7264 #endif
7265
7266 static void __devinit
7267 bnx2_get_5709_media(struct bnx2 *bp)
7268 {
7269 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7270 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7271 u32 strap;
7272
7273 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7274 return;
7275 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7276 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7277 return;
7278 }
7279
7280 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7281 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7282 else
7283 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7284
7285 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7286 switch (strap) {
7287 case 0x4:
7288 case 0x5:
7289 case 0x6:
7290 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7291 return;
7292 }
7293 } else {
7294 switch (strap) {
7295 case 0x1:
7296 case 0x2:
7297 case 0x4:
7298 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7299 return;
7300 }
7301 }
7302 }
7303
7304 static void __devinit
7305 bnx2_get_pci_speed(struct bnx2 *bp)
7306 {
7307 u32 reg;
7308
7309 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7310 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7311 u32 clkreg;
7312
7313 bp->flags |= BNX2_FLAG_PCIX;
7314
7315 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7316
7317 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7318 switch (clkreg) {
7319 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7320 bp->bus_speed_mhz = 133;
7321 break;
7322
7323 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7324 bp->bus_speed_mhz = 100;
7325 break;
7326
7327 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7328 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7329 bp->bus_speed_mhz = 66;
7330 break;
7331
7332 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7333 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7334 bp->bus_speed_mhz = 50;
7335 break;
7336
7337 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7338 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7339 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7340 bp->bus_speed_mhz = 33;
7341 break;
7342 }
7343 }
7344 else {
7345 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7346 bp->bus_speed_mhz = 66;
7347 else
7348 bp->bus_speed_mhz = 33;
7349 }
7350
7351 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7352 bp->flags |= BNX2_FLAG_PCI_32BIT;
7353
7354 }
7355
7356 static int __devinit
7357 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7358 {
7359 struct bnx2 *bp;
7360 unsigned long mem_len;
7361 int rc, i, j;
7362 u32 reg;
7363 u64 dma_mask, persist_dma_mask;
7364
7365 SET_NETDEV_DEV(dev, &pdev->dev);
7366 bp = netdev_priv(dev);
7367
7368 bp->flags = 0;
7369 bp->phy_flags = 0;
7370
7371 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7372 rc = pci_enable_device(pdev);
7373 if (rc) {
7374 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7375 goto err_out;
7376 }
7377
7378 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7379 dev_err(&pdev->dev,
7380 "Cannot find PCI device base address, aborting.\n");
7381 rc = -ENODEV;
7382 goto err_out_disable;
7383 }
7384
7385 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7386 if (rc) {
7387 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7388 goto err_out_disable;
7389 }
7390
7391 pci_set_master(pdev);
7392 pci_save_state(pdev);
7393
7394 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7395 if (bp->pm_cap == 0) {
7396 dev_err(&pdev->dev,
7397 "Cannot find power management capability, aborting.\n");
7398 rc = -EIO;
7399 goto err_out_release;
7400 }
7401
7402 bp->dev = dev;
7403 bp->pdev = pdev;
7404
7405 spin_lock_init(&bp->phy_lock);
7406 spin_lock_init(&bp->indirect_lock);
7407 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7408
7409 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7410 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7411 dev->mem_end = dev->mem_start + mem_len;
7412 dev->irq = pdev->irq;
7413
7414 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7415
7416 if (!bp->regview) {
7417 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7418 rc = -ENOMEM;
7419 goto err_out_release;
7420 }
7421
7422 /* Configure byte swap and enable write to the reg_window registers.
7423 * Rely on CPU to do target byte swapping on big endian systems
7424 * The chip's target access swapping will not swap all accesses
7425 */
7426 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7427 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7428 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7429
7430 bnx2_set_power_state(bp, PCI_D0);
7431
7432 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7433
7434 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7435 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7436 dev_err(&pdev->dev,
7437 "Cannot find PCIE capability, aborting.\n");
7438 rc = -EIO;
7439 goto err_out_unmap;
7440 }
7441 bp->flags |= BNX2_FLAG_PCIE;
7442 if (CHIP_REV(bp) == CHIP_REV_Ax)
7443 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7444 } else {
7445 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7446 if (bp->pcix_cap == 0) {
7447 dev_err(&pdev->dev,
7448 "Cannot find PCIX capability, aborting.\n");
7449 rc = -EIO;
7450 goto err_out_unmap;
7451 }
7452 }
7453
7454 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7455 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7456 bp->flags |= BNX2_FLAG_MSIX_CAP;
7457 }
7458
7459 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7460 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7461 bp->flags |= BNX2_FLAG_MSI_CAP;
7462 }
7463
7464 /* 5708 cannot support DMA addresses > 40-bit. */
7465 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7466 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7467 else
7468 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7469
7470 /* Configure DMA attributes. */
7471 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7472 dev->features |= NETIF_F_HIGHDMA;
7473 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7474 if (rc) {
7475 dev_err(&pdev->dev,
7476 "pci_set_consistent_dma_mask failed, aborting.\n");
7477 goto err_out_unmap;
7478 }
7479 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7480 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7481 goto err_out_unmap;
7482 }
7483
7484 if (!(bp->flags & BNX2_FLAG_PCIE))
7485 bnx2_get_pci_speed(bp);
7486
7487 /* 5706A0 may falsely detect SERR and PERR. */
7488 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7489 reg = REG_RD(bp, PCI_COMMAND);
7490 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7491 REG_WR(bp, PCI_COMMAND, reg);
7492 }
7493 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7494 !(bp->flags & BNX2_FLAG_PCIX)) {
7495
7496 dev_err(&pdev->dev,
7497 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7498 goto err_out_unmap;
7499 }
7500
7501 bnx2_init_nvram(bp);
7502
7503 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7504
7505 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7506 BNX2_SHM_HDR_SIGNATURE_SIG) {
7507 u32 off = PCI_FUNC(pdev->devfn) << 2;
7508
7509 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7510 } else
7511 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7512
7513 /* Get the permanent MAC address. First we need to make sure the
7514 * firmware is actually running.
7515 */
7516 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7517
7518 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7519 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7520 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7521 rc = -ENODEV;
7522 goto err_out_unmap;
7523 }
7524
7525 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7526 for (i = 0, j = 0; i < 3; i++) {
7527 u8 num, k, skip0;
7528
7529 num = (u8) (reg >> (24 - (i * 8)));
7530 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7531 if (num >= k || !skip0 || k == 1) {
7532 bp->fw_version[j++] = (num / k) + '0';
7533 skip0 = 0;
7534 }
7535 }
7536 if (i != 2)
7537 bp->fw_version[j++] = '.';
7538 }
7539 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7540 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7541 bp->wol = 1;
7542
7543 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7544 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7545
7546 for (i = 0; i < 30; i++) {
7547 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7548 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7549 break;
7550 msleep(10);
7551 }
7552 }
7553 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7554 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7555 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7556 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7557 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7558
7559 bp->fw_version[j++] = ' ';
7560 for (i = 0; i < 3; i++) {
7561 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7562 reg = swab32(reg);
7563 memcpy(&bp->fw_version[j], &reg, 4);
7564 j += 4;
7565 }
7566 }
7567
7568 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7569 bp->mac_addr[0] = (u8) (reg >> 8);
7570 bp->mac_addr[1] = (u8) reg;
7571
7572 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7573 bp->mac_addr[2] = (u8) (reg >> 24);
7574 bp->mac_addr[3] = (u8) (reg >> 16);
7575 bp->mac_addr[4] = (u8) (reg >> 8);
7576 bp->mac_addr[5] = (u8) reg;
7577
7578 bp->tx_ring_size = MAX_TX_DESC_CNT;
7579 bnx2_set_rx_ring_size(bp, 255);
7580
7581 bp->rx_csum = 1;
7582
7583 bp->tx_quick_cons_trip_int = 20;
7584 bp->tx_quick_cons_trip = 20;
7585 bp->tx_ticks_int = 80;
7586 bp->tx_ticks = 80;
7587
7588 bp->rx_quick_cons_trip_int = 6;
7589 bp->rx_quick_cons_trip = 6;
7590 bp->rx_ticks_int = 18;
7591 bp->rx_ticks = 18;
7592
7593 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7594
7595 bp->current_interval = BNX2_TIMER_INTERVAL;
7596
7597 bp->phy_addr = 1;
7598
7599 /* Disable WOL support if we are running on a SERDES chip. */
7600 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7601 bnx2_get_5709_media(bp);
7602 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7603 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7604
7605 bp->phy_port = PORT_TP;
7606 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7607 bp->phy_port = PORT_FIBRE;
7608 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7609 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7610 bp->flags |= BNX2_FLAG_NO_WOL;
7611 bp->wol = 0;
7612 }
7613 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7614 /* Don't do parallel detect on this board because of
7615 * some board problems. The link will not go down
7616 * if we do parallel detect.
7617 */
7618 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7619 pdev->subsystem_device == 0x310c)
7620 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7621 } else {
7622 bp->phy_addr = 2;
7623 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7624 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7625 }
7626 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7627 CHIP_NUM(bp) == CHIP_NUM_5708)
7628 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7629 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7630 (CHIP_REV(bp) == CHIP_REV_Ax ||
7631 CHIP_REV(bp) == CHIP_REV_Bx))
7632 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7633
7634 bnx2_init_fw_cap(bp);
7635
7636 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7637 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7638 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7639 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7640 bp->flags |= BNX2_FLAG_NO_WOL;
7641 bp->wol = 0;
7642 }
7643
7644 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7645 bp->tx_quick_cons_trip_int =
7646 bp->tx_quick_cons_trip;
7647 bp->tx_ticks_int = bp->tx_ticks;
7648 bp->rx_quick_cons_trip_int =
7649 bp->rx_quick_cons_trip;
7650 bp->rx_ticks_int = bp->rx_ticks;
7651 bp->comp_prod_trip_int = bp->comp_prod_trip;
7652 bp->com_ticks_int = bp->com_ticks;
7653 bp->cmd_ticks_int = bp->cmd_ticks;
7654 }
7655
7656 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7657 *
7658 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7659 * with byte enables disabled on the unused 32-bit word. This is legal
7660 * but causes problems on the AMD 8132 which will eventually stop
7661 * responding after a while.
7662 *
7663 * AMD believes this incompatibility is unique to the 5706, and
7664 * prefers to locally disable MSI rather than globally disabling it.
7665 */
7666 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7667 struct pci_dev *amd_8132 = NULL;
7668
7669 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7670 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7671 amd_8132))) {
7672
7673 if (amd_8132->revision >= 0x10 &&
7674 amd_8132->revision <= 0x13) {
7675 disable_msi = 1;
7676 pci_dev_put(amd_8132);
7677 break;
7678 }
7679 }
7680 }
7681
7682 bnx2_set_default_link(bp);
7683 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7684
7685 init_timer(&bp->timer);
7686 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7687 bp->timer.data = (unsigned long) bp;
7688 bp->timer.function = bnx2_timer;
7689
7690 return 0;
7691
7692 err_out_unmap:
7693 if (bp->regview) {
7694 iounmap(bp->regview);
7695 bp->regview = NULL;
7696 }
7697
7698 err_out_release:
7699 pci_release_regions(pdev);
7700
7701 err_out_disable:
7702 pci_disable_device(pdev);
7703 pci_set_drvdata(pdev, NULL);
7704
7705 err_out:
7706 return rc;
7707 }
7708
7709 static char * __devinit
7710 bnx2_bus_string(struct bnx2 *bp, char *str)
7711 {
7712 char *s = str;
7713
7714 if (bp->flags & BNX2_FLAG_PCIE) {
7715 s += sprintf(s, "PCI Express");
7716 } else {
7717 s += sprintf(s, "PCI");
7718 if (bp->flags & BNX2_FLAG_PCIX)
7719 s += sprintf(s, "-X");
7720 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7721 s += sprintf(s, " 32-bit");
7722 else
7723 s += sprintf(s, " 64-bit");
7724 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7725 }
7726 return str;
7727 }
7728
7729 static void __devinit
7730 bnx2_init_napi(struct bnx2 *bp)
7731 {
7732 int i;
7733
7734 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7735 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7736 int (*poll)(struct napi_struct *, int);
7737
7738 if (i == 0)
7739 poll = bnx2_poll;
7740 else
7741 poll = bnx2_poll_msix;
7742
7743 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7744 bnapi->bp = bp;
7745 }
7746 }
7747
7748 static const struct net_device_ops bnx2_netdev_ops = {
7749 .ndo_open = bnx2_open,
7750 .ndo_start_xmit = bnx2_start_xmit,
7751 .ndo_stop = bnx2_close,
7752 .ndo_get_stats = bnx2_get_stats,
7753 .ndo_set_rx_mode = bnx2_set_rx_mode,
7754 .ndo_do_ioctl = bnx2_ioctl,
7755 .ndo_validate_addr = eth_validate_addr,
7756 .ndo_set_mac_address = bnx2_change_mac_addr,
7757 .ndo_change_mtu = bnx2_change_mtu,
7758 .ndo_tx_timeout = bnx2_tx_timeout,
7759 #ifdef BCM_VLAN
7760 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7761 #endif
7762 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7763 .ndo_poll_controller = poll_bnx2,
7764 #endif
7765 };
7766
7767 static int __devinit
7768 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7769 {
7770 static int version_printed = 0;
7771 struct net_device *dev = NULL;
7772 struct bnx2 *bp;
7773 int rc;
7774 char str[40];
7775
7776 if (version_printed++ == 0)
7777 printk(KERN_INFO "%s", version);
7778
7779 /* dev zeroed in init_etherdev */
7780 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7781
7782 if (!dev)
7783 return -ENOMEM;
7784
7785 rc = bnx2_init_board(pdev, dev);
7786 if (rc < 0) {
7787 free_netdev(dev);
7788 return rc;
7789 }
7790
7791 dev->netdev_ops = &bnx2_netdev_ops;
7792 dev->watchdog_timeo = TX_TIMEOUT;
7793 dev->ethtool_ops = &bnx2_ethtool_ops;
7794
7795 bp = netdev_priv(dev);
7796 bnx2_init_napi(bp);
7797
7798 pci_set_drvdata(pdev, dev);
7799
7800 memcpy(dev->dev_addr, bp->mac_addr, 6);
7801 memcpy(dev->perm_addr, bp->mac_addr, 6);
7802
7803 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7804 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7805 dev->features |= NETIF_F_IPV6_CSUM;
7806
7807 #ifdef BCM_VLAN
7808 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7809 #endif
7810 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7811 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7812 dev->features |= NETIF_F_TSO6;
7813
7814 if ((rc = register_netdev(dev))) {
7815 dev_err(&pdev->dev, "Cannot register net device\n");
7816 if (bp->regview)
7817 iounmap(bp->regview);
7818 pci_release_regions(pdev);
7819 pci_disable_device(pdev);
7820 pci_set_drvdata(pdev, NULL);
7821 free_netdev(dev);
7822 return rc;
7823 }
7824
7825 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7826 "IRQ %d, node addr %pM\n",
7827 dev->name,
7828 board_info[ent->driver_data].name,
7829 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7830 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7831 bnx2_bus_string(bp, str),
7832 dev->base_addr,
7833 bp->pdev->irq, dev->dev_addr);
7834
7835 return 0;
7836 }
7837
7838 static void __devexit
7839 bnx2_remove_one(struct pci_dev *pdev)
7840 {
7841 struct net_device *dev = pci_get_drvdata(pdev);
7842 struct bnx2 *bp = netdev_priv(dev);
7843
7844 flush_scheduled_work();
7845
7846 unregister_netdev(dev);
7847
7848 if (bp->regview)
7849 iounmap(bp->regview);
7850
7851 free_netdev(dev);
7852 pci_release_regions(pdev);
7853 pci_disable_device(pdev);
7854 pci_set_drvdata(pdev, NULL);
7855 }
7856
7857 static int
7858 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7859 {
7860 struct net_device *dev = pci_get_drvdata(pdev);
7861 struct bnx2 *bp = netdev_priv(dev);
7862
7863 /* PCI register 4 needs to be saved whether netif_running() or not.
7864 * MSI address and data need to be saved if using MSI and
7865 * netif_running().
7866 */
7867 pci_save_state(pdev);
7868 if (!netif_running(dev))
7869 return 0;
7870
7871 flush_scheduled_work();
7872 bnx2_netif_stop(bp);
7873 netif_device_detach(dev);
7874 del_timer_sync(&bp->timer);
7875 bnx2_shutdown_chip(bp);
7876 bnx2_free_skbs(bp);
7877 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7878 return 0;
7879 }
7880
7881 static int
7882 bnx2_resume(struct pci_dev *pdev)
7883 {
7884 struct net_device *dev = pci_get_drvdata(pdev);
7885 struct bnx2 *bp = netdev_priv(dev);
7886
7887 pci_restore_state(pdev);
7888 if (!netif_running(dev))
7889 return 0;
7890
7891 bnx2_set_power_state(bp, PCI_D0);
7892 netif_device_attach(dev);
7893 bnx2_init_nic(bp, 1);
7894 bnx2_netif_start(bp);
7895 return 0;
7896 }
7897
7898 /**
7899 * bnx2_io_error_detected - called when PCI error is detected
7900 * @pdev: Pointer to PCI device
7901 * @state: The current pci connection state
7902 *
7903 * This function is called after a PCI bus error affecting
7904 * this device has been detected.
7905 */
7906 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7907 pci_channel_state_t state)
7908 {
7909 struct net_device *dev = pci_get_drvdata(pdev);
7910 struct bnx2 *bp = netdev_priv(dev);
7911
7912 rtnl_lock();
7913 netif_device_detach(dev);
7914
7915 if (netif_running(dev)) {
7916 bnx2_netif_stop(bp);
7917 del_timer_sync(&bp->timer);
7918 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7919 }
7920
7921 pci_disable_device(pdev);
7922 rtnl_unlock();
7923
7924 /* Request a slot slot reset. */
7925 return PCI_ERS_RESULT_NEED_RESET;
7926 }
7927
7928 /**
7929 * bnx2_io_slot_reset - called after the pci bus has been reset.
7930 * @pdev: Pointer to PCI device
7931 *
7932 * Restart the card from scratch, as if from a cold-boot.
7933 */
7934 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7935 {
7936 struct net_device *dev = pci_get_drvdata(pdev);
7937 struct bnx2 *bp = netdev_priv(dev);
7938
7939 rtnl_lock();
7940 if (pci_enable_device(pdev)) {
7941 dev_err(&pdev->dev,
7942 "Cannot re-enable PCI device after reset.\n");
7943 rtnl_unlock();
7944 return PCI_ERS_RESULT_DISCONNECT;
7945 }
7946 pci_set_master(pdev);
7947 pci_restore_state(pdev);
7948
7949 if (netif_running(dev)) {
7950 bnx2_set_power_state(bp, PCI_D0);
7951 bnx2_init_nic(bp, 1);
7952 }
7953
7954 rtnl_unlock();
7955 return PCI_ERS_RESULT_RECOVERED;
7956 }
7957
7958 /**
7959 * bnx2_io_resume - called when traffic can start flowing again.
7960 * @pdev: Pointer to PCI device
7961 *
7962 * This callback is called when the error recovery driver tells us that
7963 * its OK to resume normal operation.
7964 */
7965 static void bnx2_io_resume(struct pci_dev *pdev)
7966 {
7967 struct net_device *dev = pci_get_drvdata(pdev);
7968 struct bnx2 *bp = netdev_priv(dev);
7969
7970 rtnl_lock();
7971 if (netif_running(dev))
7972 bnx2_netif_start(bp);
7973
7974 netif_device_attach(dev);
7975 rtnl_unlock();
7976 }
7977
7978 static struct pci_error_handlers bnx2_err_handler = {
7979 .error_detected = bnx2_io_error_detected,
7980 .slot_reset = bnx2_io_slot_reset,
7981 .resume = bnx2_io_resume,
7982 };
7983
7984 static struct pci_driver bnx2_pci_driver = {
7985 .name = DRV_MODULE_NAME,
7986 .id_table = bnx2_pci_tbl,
7987 .probe = bnx2_init_one,
7988 .remove = __devexit_p(bnx2_remove_one),
7989 .suspend = bnx2_suspend,
7990 .resume = bnx2_resume,
7991 .err_handler = &bnx2_err_handler,
7992 };
7993
7994 static int __init bnx2_init(void)
7995 {
7996 return pci_register_driver(&bnx2_pci_driver);
7997 }
7998
7999 static void __exit bnx2_cleanup(void)
8000 {
8001 pci_unregister_driver(&bnx2_pci_driver);
8002 }
8003
8004 module_init(bnx2_init);
8005 module_exit(bnx2_cleanup);
8006
8007
8008
This page took 0.231397 seconds and 5 git commands to generate.