2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
10 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <linux/config.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/pci.h>
35 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/crc32.h>
39 #include <linux/dma-mapping.h>
44 #define DRV_NAME "skge"
45 #define DRV_VERSION "0.6"
46 #define PFX DRV_NAME " "
48 #define DEFAULT_TX_RING_SIZE 128
49 #define DEFAULT_RX_RING_SIZE 512
50 #define MAX_TX_RING_SIZE 1024
51 #define MAX_RX_RING_SIZE 4096
52 #define PHY_RETRIES 1000
53 #define ETH_JUMBO_MTU 9000
54 #define TX_WATCHDOG (5 * HZ)
55 #define NAPI_WEIGHT 64
56 #define BLINK_HZ (HZ/4)
57 #define LINK_POLL_HZ (HZ/10)
59 MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
60 MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
61 MODULE_LICENSE("GPL");
62 MODULE_VERSION(DRV_VERSION
);
64 static const u32 default_msg
65 = NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
66 | NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
;
68 static int debug
= -1; /* defaults above */
69 module_param(debug
, int, 0);
70 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
72 static const struct pci_device_id skge_id_table
[] = {
73 { PCI_DEVICE(PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940
) },
74 { PCI_DEVICE(PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940B
) },
75 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_GE
) },
76 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_YU
) },
77 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, 0x9E00) }, /* SK-9Exx */
78 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, PCI_DEVICE_ID_DLINK_DGE510T
), },
79 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x4320) },
80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5005) }, /* Belkin */
81 { PCI_DEVICE(PCI_VENDOR_ID_CNET
, PCI_DEVICE_ID_CNET_GIGACARD
) },
82 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS
, PCI_DEVICE_ID_LINKSYS_EG1032
) },
83 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS
, PCI_DEVICE_ID_LINKSYS_EG1064
) },
86 MODULE_DEVICE_TABLE(pci
, skge_id_table
);
88 static int skge_up(struct net_device
*dev
);
89 static int skge_down(struct net_device
*dev
);
90 static void skge_tx_clean(struct skge_port
*skge
);
91 static void xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
92 static void gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
93 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
);
94 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
);
95 static void yukon_init(struct skge_hw
*hw
, int port
);
96 static void yukon_reset(struct skge_hw
*hw
, int port
);
97 static void genesis_mac_init(struct skge_hw
*hw
, int port
);
98 static void genesis_reset(struct skge_hw
*hw
, int port
);
100 static const int txqaddr
[] = { Q_XA1
, Q_XA2
};
101 static const int rxqaddr
[] = { Q_R1
, Q_R2
};
102 static const u32 rxirqmask
[] = { IS_R1_F
, IS_R2_F
};
103 static const u32 txirqmask
[] = { IS_XA1_F
, IS_XA2_F
};
105 /* Don't need to look at whole 16K.
106 * last interesting register is descriptor poll timer.
108 #define SKGE_REGS_LEN (29*128)
110 static int skge_get_regs_len(struct net_device
*dev
)
112 return SKGE_REGS_LEN
;
116 * Returns copy of control register region
117 * I/O region is divided into banks and certain regions are unreadable
119 static void skge_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
122 const struct skge_port
*skge
= netdev_priv(dev
);
124 const void __iomem
*io
= skge
->hw
->regs
;
125 static const unsigned long bankmap
126 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
127 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
128 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
129 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
132 for (offs
= 0; offs
< regs
->len
; offs
+= 128) {
133 u32 len
= min_t(u32
, 128, regs
->len
- offs
);
135 if (bankmap
& (1<<(offs
/128)))
136 memcpy_fromio(p
+ offs
, io
+ offs
, len
);
138 memset(p
+ offs
, 0, len
);
142 /* Wake on Lan only supported on Yukon chps with rev 1 or above */
143 static int wol_supported(const struct skge_hw
*hw
)
145 return !((hw
->chip_id
== CHIP_ID_GENESIS
||
146 (hw
->chip_id
== CHIP_ID_YUKON
&& hw
->chip_rev
== 0)));
149 static void skge_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
151 struct skge_port
*skge
= netdev_priv(dev
);
153 wol
->supported
= wol_supported(skge
->hw
) ? WAKE_MAGIC
: 0;
154 wol
->wolopts
= skge
->wol
? WAKE_MAGIC
: 0;
157 static int skge_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
159 struct skge_port
*skge
= netdev_priv(dev
);
160 struct skge_hw
*hw
= skge
->hw
;
162 if (wol
->wolopts
!= WAKE_MAGIC
&& wol
->wolopts
!= 0)
165 if (wol
->wolopts
== WAKE_MAGIC
&& !wol_supported(hw
))
168 skge
->wol
= wol
->wolopts
== WAKE_MAGIC
;
171 memcpy_toio(hw
->regs
+ WOL_MAC_ADDR
, dev
->dev_addr
, ETH_ALEN
);
173 skge_write16(hw
, WOL_CTRL_STAT
,
174 WOL_CTL_ENA_PME_ON_MAGIC_PKT
|
175 WOL_CTL_ENA_MAGIC_PKT_UNIT
);
177 skge_write16(hw
, WOL_CTRL_STAT
, WOL_CTL_DEFAULT
);
183 static int skge_get_settings(struct net_device
*dev
,
184 struct ethtool_cmd
*ecmd
)
186 struct skge_port
*skge
= netdev_priv(dev
);
187 struct skge_hw
*hw
= skge
->hw
;
189 ecmd
->transceiver
= XCVR_INTERNAL
;
192 if (hw
->chip_id
== CHIP_ID_GENESIS
)
193 ecmd
->supported
= SUPPORTED_1000baseT_Full
194 | SUPPORTED_1000baseT_Half
195 | SUPPORTED_Autoneg
| SUPPORTED_TP
;
197 ecmd
->supported
= SUPPORTED_10baseT_Half
198 | SUPPORTED_10baseT_Full
199 | SUPPORTED_100baseT_Half
200 | SUPPORTED_100baseT_Full
201 | SUPPORTED_1000baseT_Half
202 | SUPPORTED_1000baseT_Full
203 | SUPPORTED_Autoneg
| SUPPORTED_TP
;
205 if (hw
->chip_id
== CHIP_ID_YUKON
)
206 ecmd
->supported
&= ~SUPPORTED_1000baseT_Half
;
208 else if (hw
->chip_id
== CHIP_ID_YUKON_FE
)
209 ecmd
->supported
&= ~(SUPPORTED_1000baseT_Half
210 | SUPPORTED_1000baseT_Full
);
213 ecmd
->port
= PORT_TP
;
214 ecmd
->phy_address
= hw
->phy_addr
;
216 ecmd
->supported
= SUPPORTED_1000baseT_Full
220 ecmd
->port
= PORT_FIBRE
;
223 ecmd
->advertising
= skge
->advertising
;
224 ecmd
->autoneg
= skge
->autoneg
;
225 ecmd
->speed
= skge
->speed
;
226 ecmd
->duplex
= skge
->duplex
;
230 static u32
skge_modes(const struct skge_hw
*hw
)
232 u32 modes
= ADVERTISED_Autoneg
233 | ADVERTISED_1000baseT_Full
| ADVERTISED_1000baseT_Half
234 | ADVERTISED_100baseT_Full
| ADVERTISED_100baseT_Half
235 | ADVERTISED_10baseT_Full
| ADVERTISED_10baseT_Half
;
238 modes
|= ADVERTISED_TP
;
239 switch (hw
->chip_id
) {
240 case CHIP_ID_GENESIS
:
241 modes
&= ~(ADVERTISED_100baseT_Full
242 | ADVERTISED_100baseT_Half
243 | ADVERTISED_10baseT_Full
244 | ADVERTISED_10baseT_Half
);
248 modes
&= ~ADVERTISED_1000baseT_Half
;
251 case CHIP_ID_YUKON_FE
:
252 modes
&= ~(ADVERTISED_1000baseT_Half
|ADVERTISED_1000baseT_Full
);
256 modes
|= ADVERTISED_FIBRE
;
257 modes
&= ~ADVERTISED_1000baseT_Half
;
262 static int skge_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
264 struct skge_port
*skge
= netdev_priv(dev
);
265 const struct skge_hw
*hw
= skge
->hw
;
267 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
268 if (ecmd
->advertising
& skge_modes(hw
))
271 switch (ecmd
->speed
) {
273 if (hw
->chip_id
== CHIP_ID_YUKON_FE
)
278 if (iscopper(hw
) || hw
->chip_id
== CHIP_ID_GENESIS
)
286 skge
->autoneg
= ecmd
->autoneg
;
287 skge
->speed
= ecmd
->speed
;
288 skge
->duplex
= ecmd
->duplex
;
289 skge
->advertising
= ecmd
->advertising
;
291 if (netif_running(dev
)) {
298 static void skge_get_drvinfo(struct net_device
*dev
,
299 struct ethtool_drvinfo
*info
)
301 struct skge_port
*skge
= netdev_priv(dev
);
303 strcpy(info
->driver
, DRV_NAME
);
304 strcpy(info
->version
, DRV_VERSION
);
305 strcpy(info
->fw_version
, "N/A");
306 strcpy(info
->bus_info
, pci_name(skge
->hw
->pdev
));
309 static const struct skge_stat
{
310 char name
[ETH_GSTRING_LEN
];
314 { "tx_bytes", XM_TXO_OK_HI
, GM_TXO_OK_HI
},
315 { "rx_bytes", XM_RXO_OK_HI
, GM_RXO_OK_HI
},
317 { "tx_broadcast", XM_TXF_BC_OK
, GM_TXF_BC_OK
},
318 { "rx_broadcast", XM_RXF_BC_OK
, GM_RXF_BC_OK
},
319 { "tx_multicast", XM_TXF_MC_OK
, GM_TXF_MC_OK
},
320 { "rx_multicast", XM_RXF_MC_OK
, GM_RXF_MC_OK
},
321 { "tx_unicast", XM_TXF_UC_OK
, GM_TXF_UC_OK
},
322 { "rx_unicast", XM_RXF_UC_OK
, GM_RXF_UC_OK
},
323 { "tx_mac_pause", XM_TXF_MPAUSE
, GM_TXF_MPAUSE
},
324 { "rx_mac_pause", XM_RXF_MPAUSE
, GM_RXF_MPAUSE
},
326 { "collisions", XM_TXF_SNG_COL
, GM_TXF_SNG_COL
},
327 { "multi_collisions", XM_TXF_MUL_COL
, GM_TXF_MUL_COL
},
328 { "aborted", XM_TXF_ABO_COL
, GM_TXF_ABO_COL
},
329 { "late_collision", XM_TXF_LAT_COL
, GM_TXF_LAT_COL
},
330 { "fifo_underrun", XM_TXE_FIFO_UR
, GM_TXE_FIFO_UR
},
331 { "fifo_overflow", XM_RXE_FIFO_OV
, GM_RXE_FIFO_OV
},
333 { "rx_toolong", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
334 { "rx_jabber", XM_RXF_JAB_PKT
, GM_RXF_JAB_PKT
},
335 { "rx_runt", XM_RXE_RUNT
, GM_RXE_FRAG
},
336 { "rx_too_long", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
337 { "rx_fcs_error", XM_RXF_FCS_ERR
, GM_RXF_FCS_ERR
},
340 static int skge_get_stats_count(struct net_device
*dev
)
342 return ARRAY_SIZE(skge_stats
);
345 static void skge_get_ethtool_stats(struct net_device
*dev
,
346 struct ethtool_stats
*stats
, u64
*data
)
348 struct skge_port
*skge
= netdev_priv(dev
);
350 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
351 genesis_get_stats(skge
, data
);
353 yukon_get_stats(skge
, data
);
356 /* Use hardware MIB variables for critical path statistics and
357 * transmit feedback not reported at interrupt.
358 * Other errors are accounted for in interrupt handler.
360 static struct net_device_stats
*skge_get_stats(struct net_device
*dev
)
362 struct skge_port
*skge
= netdev_priv(dev
);
363 u64 data
[ARRAY_SIZE(skge_stats
)];
365 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
366 genesis_get_stats(skge
, data
);
368 yukon_get_stats(skge
, data
);
370 skge
->net_stats
.tx_bytes
= data
[0];
371 skge
->net_stats
.rx_bytes
= data
[1];
372 skge
->net_stats
.tx_packets
= data
[2] + data
[4] + data
[6];
373 skge
->net_stats
.rx_packets
= data
[3] + data
[5] + data
[7];
374 skge
->net_stats
.multicast
= data
[5] + data
[7];
375 skge
->net_stats
.collisions
= data
[10];
376 skge
->net_stats
.tx_aborted_errors
= data
[12];
378 return &skge
->net_stats
;
381 static void skge_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
387 for (i
= 0; i
< ARRAY_SIZE(skge_stats
); i
++)
388 memcpy(data
+ i
* ETH_GSTRING_LEN
,
389 skge_stats
[i
].name
, ETH_GSTRING_LEN
);
394 static void skge_get_ring_param(struct net_device
*dev
,
395 struct ethtool_ringparam
*p
)
397 struct skge_port
*skge
= netdev_priv(dev
);
399 p
->rx_max_pending
= MAX_RX_RING_SIZE
;
400 p
->tx_max_pending
= MAX_TX_RING_SIZE
;
401 p
->rx_mini_max_pending
= 0;
402 p
->rx_jumbo_max_pending
= 0;
404 p
->rx_pending
= skge
->rx_ring
.count
;
405 p
->tx_pending
= skge
->tx_ring
.count
;
406 p
->rx_mini_pending
= 0;
407 p
->rx_jumbo_pending
= 0;
410 static int skge_set_ring_param(struct net_device
*dev
,
411 struct ethtool_ringparam
*p
)
413 struct skge_port
*skge
= netdev_priv(dev
);
415 if (p
->rx_pending
== 0 || p
->rx_pending
> MAX_RX_RING_SIZE
||
416 p
->tx_pending
== 0 || p
->tx_pending
> MAX_TX_RING_SIZE
)
419 skge
->rx_ring
.count
= p
->rx_pending
;
420 skge
->tx_ring
.count
= p
->tx_pending
;
422 if (netif_running(dev
)) {
430 static u32
skge_get_msglevel(struct net_device
*netdev
)
432 struct skge_port
*skge
= netdev_priv(netdev
);
433 return skge
->msg_enable
;
436 static void skge_set_msglevel(struct net_device
*netdev
, u32 value
)
438 struct skge_port
*skge
= netdev_priv(netdev
);
439 skge
->msg_enable
= value
;
442 static int skge_nway_reset(struct net_device
*dev
)
444 struct skge_port
*skge
= netdev_priv(dev
);
445 struct skge_hw
*hw
= skge
->hw
;
446 int port
= skge
->port
;
448 if (skge
->autoneg
!= AUTONEG_ENABLE
|| !netif_running(dev
))
451 spin_lock_bh(&hw
->phy_lock
);
452 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
453 genesis_reset(hw
, port
);
454 genesis_mac_init(hw
, port
);
456 yukon_reset(hw
, port
);
457 yukon_init(hw
, port
);
459 spin_unlock_bh(&hw
->phy_lock
);
463 static int skge_set_sg(struct net_device
*dev
, u32 data
)
465 struct skge_port
*skge
= netdev_priv(dev
);
466 struct skge_hw
*hw
= skge
->hw
;
468 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
470 return ethtool_op_set_sg(dev
, data
);
473 static int skge_set_tx_csum(struct net_device
*dev
, u32 data
)
475 struct skge_port
*skge
= netdev_priv(dev
);
476 struct skge_hw
*hw
= skge
->hw
;
478 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
481 return ethtool_op_set_tx_csum(dev
, data
);
484 static u32
skge_get_rx_csum(struct net_device
*dev
)
486 struct skge_port
*skge
= netdev_priv(dev
);
488 return skge
->rx_csum
;
491 /* Only Yukon supports checksum offload. */
492 static int skge_set_rx_csum(struct net_device
*dev
, u32 data
)
494 struct skge_port
*skge
= netdev_priv(dev
);
496 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
499 skge
->rx_csum
= data
;
503 static void skge_get_pauseparam(struct net_device
*dev
,
504 struct ethtool_pauseparam
*ecmd
)
506 struct skge_port
*skge
= netdev_priv(dev
);
508 ecmd
->tx_pause
= (skge
->flow_control
== FLOW_MODE_LOC_SEND
)
509 || (skge
->flow_control
== FLOW_MODE_SYMMETRIC
);
510 ecmd
->rx_pause
= (skge
->flow_control
== FLOW_MODE_REM_SEND
)
511 || (skge
->flow_control
== FLOW_MODE_SYMMETRIC
);
513 ecmd
->autoneg
= skge
->autoneg
;
516 static int skge_set_pauseparam(struct net_device
*dev
,
517 struct ethtool_pauseparam
*ecmd
)
519 struct skge_port
*skge
= netdev_priv(dev
);
521 skge
->autoneg
= ecmd
->autoneg
;
522 if (ecmd
->rx_pause
&& ecmd
->tx_pause
)
523 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
524 else if (ecmd
->rx_pause
&& !ecmd
->tx_pause
)
525 skge
->flow_control
= FLOW_MODE_REM_SEND
;
526 else if (!ecmd
->rx_pause
&& ecmd
->tx_pause
)
527 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
529 skge
->flow_control
= FLOW_MODE_NONE
;
531 if (netif_running(dev
)) {
538 /* Chip internal frequency for clock calculations */
539 static inline u32
hwkhz(const struct skge_hw
*hw
)
541 if (hw
->chip_id
== CHIP_ID_GENESIS
)
542 return 53215; /* or: 53.125 MHz */
543 else if (hw
->chip_id
== CHIP_ID_YUKON_EC
)
544 return 125000; /* or: 125.000 MHz */
546 return 78215; /* or: 78.125 MHz */
549 /* Chip hz to microseconds */
550 static inline u32
skge_clk2usec(const struct skge_hw
*hw
, u32 ticks
)
552 return (ticks
* 1000) / hwkhz(hw
);
555 /* Microseconds to chip hz */
556 static inline u32
skge_usecs2clk(const struct skge_hw
*hw
, u32 usec
)
558 return hwkhz(hw
) * usec
/ 1000;
561 static int skge_get_coalesce(struct net_device
*dev
,
562 struct ethtool_coalesce
*ecmd
)
564 struct skge_port
*skge
= netdev_priv(dev
);
565 struct skge_hw
*hw
= skge
->hw
;
566 int port
= skge
->port
;
568 ecmd
->rx_coalesce_usecs
= 0;
569 ecmd
->tx_coalesce_usecs
= 0;
571 if (skge_read32(hw
, B2_IRQM_CTRL
) & TIM_START
) {
572 u32 delay
= skge_clk2usec(hw
, skge_read32(hw
, B2_IRQM_INI
));
573 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
575 if (msk
& rxirqmask
[port
])
576 ecmd
->rx_coalesce_usecs
= delay
;
577 if (msk
& txirqmask
[port
])
578 ecmd
->tx_coalesce_usecs
= delay
;
584 /* Note: interrupt timer is per board, but can turn on/off per port */
585 static int skge_set_coalesce(struct net_device
*dev
,
586 struct ethtool_coalesce
*ecmd
)
588 struct skge_port
*skge
= netdev_priv(dev
);
589 struct skge_hw
*hw
= skge
->hw
;
590 int port
= skge
->port
;
591 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
594 if (ecmd
->rx_coalesce_usecs
== 0)
595 msk
&= ~rxirqmask
[port
];
596 else if (ecmd
->rx_coalesce_usecs
< 25 ||
597 ecmd
->rx_coalesce_usecs
> 33333)
600 msk
|= rxirqmask
[port
];
601 delay
= ecmd
->rx_coalesce_usecs
;
604 if (ecmd
->tx_coalesce_usecs
== 0)
605 msk
&= ~txirqmask
[port
];
606 else if (ecmd
->tx_coalesce_usecs
< 25 ||
607 ecmd
->tx_coalesce_usecs
> 33333)
610 msk
|= txirqmask
[port
];
611 delay
= min(delay
, ecmd
->rx_coalesce_usecs
);
614 skge_write32(hw
, B2_IRQM_MSK
, msk
);
616 skge_write32(hw
, B2_IRQM_CTRL
, TIM_STOP
);
618 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, delay
));
619 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
624 static void skge_led_on(struct skge_hw
*hw
, int port
)
626 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
627 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_ON
);
628 skge_write8(hw
, B0_LED
, LED_STAT_ON
);
630 skge_write8(hw
, SK_REG(port
, RX_LED_TST
), LED_T_ON
);
631 skge_write32(hw
, SK_REG(port
, RX_LED_VAL
), 100);
632 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_START
);
634 switch (hw
->phy_type
) {
636 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
,
640 skge_write8(hw
, SK_REG(port
, TX_LED_TST
), LED_T_ON
);
641 skge_write32(hw
, SK_REG(port
, TX_LED_VAL
), 100);
642 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_START
);
645 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
646 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
647 PHY_M_LED_MO_DUP(MO_LED_ON
) |
648 PHY_M_LED_MO_10(MO_LED_ON
) |
649 PHY_M_LED_MO_100(MO_LED_ON
) |
650 PHY_M_LED_MO_1000(MO_LED_ON
) |
651 PHY_M_LED_MO_RX(MO_LED_ON
));
655 static void skge_led_off(struct skge_hw
*hw
, int port
)
657 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
658 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_OFF
);
659 skge_write8(hw
, B0_LED
, LED_STAT_OFF
);
661 skge_write32(hw
, SK_REG(port
, RX_LED_VAL
), 0);
662 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_T_OFF
);
664 switch (hw
->phy_type
) {
666 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
,
670 skge_write32(hw
, SK_REG(port
, TX_LED_VAL
), 0);
671 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_T_OFF
);
674 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
675 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
676 PHY_M_LED_MO_DUP(MO_LED_OFF
) |
677 PHY_M_LED_MO_10(MO_LED_OFF
) |
678 PHY_M_LED_MO_100(MO_LED_OFF
) |
679 PHY_M_LED_MO_1000(MO_LED_OFF
) |
680 PHY_M_LED_MO_RX(MO_LED_OFF
));
684 static void skge_blink_timer(unsigned long data
)
686 struct skge_port
*skge
= (struct skge_port
*) data
;
687 struct skge_hw
*hw
= skge
->hw
;
690 spin_lock_irqsave(&hw
->phy_lock
, flags
);
692 skge_led_on(hw
, skge
->port
);
694 skge_led_off(hw
, skge
->port
);
695 spin_unlock_irqrestore(&hw
->phy_lock
, flags
);
697 skge
->blink_on
= !skge
->blink_on
;
698 mod_timer(&skge
->led_blink
, jiffies
+ BLINK_HZ
);
701 /* blink LED's for finding board */
702 static int skge_phys_id(struct net_device
*dev
, u32 data
)
704 struct skge_port
*skge
= netdev_priv(dev
);
706 if (!data
|| data
> (u32
)(MAX_SCHEDULE_TIMEOUT
/ HZ
))
707 data
= (u32
)(MAX_SCHEDULE_TIMEOUT
/ HZ
);
711 mod_timer(&skge
->led_blink
, jiffies
+1);
713 msleep_interruptible(data
* 1000);
714 del_timer_sync(&skge
->led_blink
);
716 skge_led_off(skge
->hw
, skge
->port
);
721 static struct ethtool_ops skge_ethtool_ops
= {
722 .get_settings
= skge_get_settings
,
723 .set_settings
= skge_set_settings
,
724 .get_drvinfo
= skge_get_drvinfo
,
725 .get_regs_len
= skge_get_regs_len
,
726 .get_regs
= skge_get_regs
,
727 .get_wol
= skge_get_wol
,
728 .set_wol
= skge_set_wol
,
729 .get_msglevel
= skge_get_msglevel
,
730 .set_msglevel
= skge_set_msglevel
,
731 .nway_reset
= skge_nway_reset
,
732 .get_link
= ethtool_op_get_link
,
733 .get_ringparam
= skge_get_ring_param
,
734 .set_ringparam
= skge_set_ring_param
,
735 .get_pauseparam
= skge_get_pauseparam
,
736 .set_pauseparam
= skge_set_pauseparam
,
737 .get_coalesce
= skge_get_coalesce
,
738 .set_coalesce
= skge_set_coalesce
,
739 .get_sg
= ethtool_op_get_sg
,
740 .set_sg
= skge_set_sg
,
741 .get_tx_csum
= ethtool_op_get_tx_csum
,
742 .set_tx_csum
= skge_set_tx_csum
,
743 .get_rx_csum
= skge_get_rx_csum
,
744 .set_rx_csum
= skge_set_rx_csum
,
745 .get_strings
= skge_get_strings
,
746 .phys_id
= skge_phys_id
,
747 .get_stats_count
= skge_get_stats_count
,
748 .get_ethtool_stats
= skge_get_ethtool_stats
,
752 * Allocate ring elements and chain them together
753 * One-to-one association of board descriptors with ring elements
755 static int skge_ring_alloc(struct skge_ring
*ring
, void *vaddr
, u64 base
)
757 struct skge_tx_desc
*d
;
758 struct skge_element
*e
;
761 ring
->start
= kmalloc(sizeof(*e
)*ring
->count
, GFP_KERNEL
);
765 for (i
= 0, e
= ring
->start
, d
= vaddr
; i
< ring
->count
; i
++, e
++, d
++) {
767 if (i
== ring
->count
- 1) {
768 e
->next
= ring
->start
;
769 d
->next_offset
= base
;
772 d
->next_offset
= base
+ (i
+1) * sizeof(*d
);
775 ring
->to_use
= ring
->to_clean
= ring
->start
;
780 /* Setup buffer for receiving */
781 static inline int skge_rx_alloc(struct skge_port
*skge
,
782 struct skge_element
*e
)
784 unsigned long bufsize
= skge
->netdev
->mtu
+ ETH_HLEN
; /* VLAN? */
785 struct skge_rx_desc
*rd
= e
->desc
;
789 skb
= dev_alloc_skb(bufsize
+ NET_IP_ALIGN
);
790 if (unlikely(!skb
)) {
791 printk(KERN_DEBUG PFX
"%s: out of memory for receive\n",
796 skb
->dev
= skge
->netdev
;
797 skb_reserve(skb
, NET_IP_ALIGN
);
799 map
= pci_map_single(skge
->hw
->pdev
, skb
->data
, bufsize
,
803 rd
->dma_hi
= map
>> 32;
805 rd
->csum1_start
= ETH_HLEN
;
806 rd
->csum2_start
= ETH_HLEN
;
812 rd
->control
= BMU_OWN
| BMU_STF
| BMU_IRQ_EOF
| BMU_TCP_CHECK
| bufsize
;
813 pci_unmap_addr_set(e
, mapaddr
, map
);
814 pci_unmap_len_set(e
, maplen
, bufsize
);
818 /* Free all unused buffers in receive ring, assumes receiver stopped */
819 static void skge_rx_clean(struct skge_port
*skge
)
821 struct skge_hw
*hw
= skge
->hw
;
822 struct skge_ring
*ring
= &skge
->rx_ring
;
823 struct skge_element
*e
;
825 for (e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
826 struct skge_rx_desc
*rd
= e
->desc
;
829 pci_unmap_single(hw
->pdev
,
830 pci_unmap_addr(e
, mapaddr
),
831 pci_unmap_len(e
, maplen
),
833 dev_kfree_skb(e
->skb
);
839 /* Allocate buffers for receive ring
840 * For receive: to_use is refill location
841 * to_clean is next received frame.
843 * if (to_use == to_clean)
844 * then ring all frames in ring need buffers
845 * if (to_use->next == to_clean)
846 * then ring all frames in ring have buffers
848 static int skge_rx_fill(struct skge_port
*skge
)
850 struct skge_ring
*ring
= &skge
->rx_ring
;
851 struct skge_element
*e
;
854 for (e
= ring
->to_use
; e
->next
!= ring
->to_clean
; e
= e
->next
) {
855 if (skge_rx_alloc(skge
, e
)) {
866 static void skge_link_up(struct skge_port
*skge
)
868 netif_carrier_on(skge
->netdev
);
869 if (skge
->tx_avail
> MAX_SKB_FRAGS
+ 1)
870 netif_wake_queue(skge
->netdev
);
872 if (netif_msg_link(skge
))
874 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
875 skge
->netdev
->name
, skge
->speed
,
876 skge
->duplex
== DUPLEX_FULL
? "full" : "half",
877 (skge
->flow_control
== FLOW_MODE_NONE
) ? "none" :
878 (skge
->flow_control
== FLOW_MODE_LOC_SEND
) ? "tx only" :
879 (skge
->flow_control
== FLOW_MODE_REM_SEND
) ? "rx only" :
880 (skge
->flow_control
== FLOW_MODE_SYMMETRIC
) ? "tx and rx" :
884 static void skge_link_down(struct skge_port
*skge
)
886 netif_carrier_off(skge
->netdev
);
887 netif_stop_queue(skge
->netdev
);
889 if (netif_msg_link(skge
))
890 printk(KERN_INFO PFX
"%s: Link is down.\n", skge
->netdev
->name
);
893 static u16
xm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
898 xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
899 v
= xm_read16(hw
, port
, XM_PHY_DATA
);
900 if (hw
->phy_type
!= SK_PHY_XMAC
) {
901 for (i
= 0; i
< PHY_RETRIES
; i
++) {
903 if (xm_read16(hw
, port
, XM_MMU_CMD
)
908 printk(KERN_WARNING PFX
"%s: phy read timed out\n",
909 hw
->dev
[port
]->name
);
912 v
= xm_read16(hw
, port
, XM_PHY_DATA
);
918 static void xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
922 xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
923 for (i
= 0; i
< PHY_RETRIES
; i
++) {
924 if (!(xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
928 printk(KERN_WARNING PFX
"%s: phy write failed to come ready\n",
929 hw
->dev
[port
]->name
);
933 xm_write16(hw
, port
, XM_PHY_DATA
, val
);
934 for (i
= 0; i
< PHY_RETRIES
; i
++) {
936 if (!(xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
939 printk(KERN_WARNING PFX
"%s: phy write timed out\n",
940 hw
->dev
[port
]->name
);
943 static void genesis_init(struct skge_hw
*hw
)
945 /* set blink source counter */
946 skge_write32(hw
, B2_BSC_INI
, (SK_BLK_DUR
* SK_FACT_53
) / 100);
947 skge_write8(hw
, B2_BSC_CTRL
, BSC_START
);
949 /* configure mac arbiter */
950 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
952 /* configure mac arbiter timeout values */
953 skge_write8(hw
, B3_MA_TOINI_RX1
, SK_MAC_TO_53
);
954 skge_write8(hw
, B3_MA_TOINI_RX2
, SK_MAC_TO_53
);
955 skge_write8(hw
, B3_MA_TOINI_TX1
, SK_MAC_TO_53
);
956 skge_write8(hw
, B3_MA_TOINI_TX2
, SK_MAC_TO_53
);
958 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
959 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
960 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
961 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
963 /* configure packet arbiter timeout */
964 skge_write16(hw
, B3_PA_CTRL
, PA_RST_CLR
);
965 skge_write16(hw
, B3_PA_TOINI_RX1
, SK_PKT_TO_MAX
);
966 skge_write16(hw
, B3_PA_TOINI_TX1
, SK_PKT_TO_MAX
);
967 skge_write16(hw
, B3_PA_TOINI_RX2
, SK_PKT_TO_MAX
);
968 skge_write16(hw
, B3_PA_TOINI_TX2
, SK_PKT_TO_MAX
);
971 static void genesis_reset(struct skge_hw
*hw
, int port
)
976 /* reset the statistics module */
977 xm_write32(hw
, port
, XM_GP_PORT
, XM_GP_RES_STAT
);
978 xm_write16(hw
, port
, XM_IMSK
, 0xffff); /* disable XMAC IRQs */
979 xm_write32(hw
, port
, XM_MODE
, 0); /* clear Mode Reg */
980 xm_write16(hw
, port
, XM_TX_CMD
, 0); /* reset TX CMD Reg */
981 xm_write16(hw
, port
, XM_RX_CMD
, 0); /* reset RX CMD Reg */
983 /* disable all PHY IRQs */
984 if (hw
->phy_type
== SK_PHY_BCOM
)
985 xm_write16(hw
, port
, PHY_BCOM_INT_MASK
, 0xffff);
987 xm_outhash(hw
, port
, XM_HSM
, (u8
*) &zero
);
988 for (i
= 0; i
< 15; i
++)
989 xm_outaddr(hw
, port
, XM_EXM(i
), (u8
*) &zero
);
990 xm_outhash(hw
, port
, XM_SRC_CHK
, (u8
*) &zero
);
994 static void genesis_mac_init(struct skge_hw
*hw
, int port
)
996 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1000 u16 ctrl1
, ctrl2
, ctrl3
, ctrl4
, ctrl5
;
1002 /* magic workaround patterns for Broadcom */
1003 static const struct {
1007 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1008 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1009 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1010 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1012 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1013 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1017 /* initialize Rx, Tx and Link LED */
1018 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_ON
);
1019 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_LINKSYNC_ON
);
1021 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_START
);
1022 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_START
);
1024 /* Unreset the XMAC. */
1025 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_CLR_MAC_RST
);
1028 * Perform additional initialization for external PHYs,
1029 * namely for the 1000baseTX cards that use the XMAC's
1032 spin_lock_bh(&hw
->phy_lock
);
1033 if (hw
->phy_type
!= SK_PHY_XMAC
) {
1034 /* Take PHY out of reset. */
1035 r
= skge_read32(hw
, B2_GP_IO
);
1037 r
|= GP_DIR_0
|GP_IO_0
;
1039 r
|= GP_DIR_2
|GP_IO_2
;
1041 skge_write32(hw
, B2_GP_IO
, r
);
1042 skge_read32(hw
, B2_GP_IO
);
1044 /* Enable GMII mode on the XMAC. */
1045 xm_write16(hw
, port
, XM_HW_CFG
, XM_HW_GMII_MD
);
1047 id1
= xm_phy_read(hw
, port
, PHY_XMAC_ID1
);
1049 /* Optimize MDIO transfer by suppressing preamble. */
1050 xm_write16(hw
, port
, XM_MMU_CMD
,
1051 xm_read16(hw
, port
, XM_MMU_CMD
)
1054 if (id1
== PHY_BCOM_ID1_C0
) {
1056 * Workaround BCOM Errata for the C0 type.
1057 * Write magic patterns to reserved registers.
1059 for (i
= 0; i
< ARRAY_SIZE(C0hack
); i
++)
1060 xm_phy_write(hw
, port
,
1061 C0hack
[i
].reg
, C0hack
[i
].val
);
1063 } else if (id1
== PHY_BCOM_ID1_A1
) {
1065 * Workaround BCOM Errata for the A1 type.
1066 * Write magic patterns to reserved registers.
1068 for (i
= 0; i
< ARRAY_SIZE(A1hack
); i
++)
1069 xm_phy_write(hw
, port
,
1070 A1hack
[i
].reg
, A1hack
[i
].val
);
1074 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1075 * Disable Power Management after reset.
1077 r
= xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
);
1078 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
, r
| PHY_B_AC_DIS_PM
);
1082 xm_read16(hw
, port
, XM_ISRC
);
1084 r
= xm_read32(hw
, port
, XM_MODE
);
1085 xm_write32(hw
, port
, XM_MODE
, r
|XM_MD_CSA
);
1087 /* We don't need the FCS appended to the packet. */
1088 r
= xm_read16(hw
, port
, XM_RX_CMD
);
1089 xm_write16(hw
, port
, XM_RX_CMD
, r
| XM_RX_STRIP_FCS
);
1091 /* We want short frames padded to 60 bytes. */
1092 r
= xm_read16(hw
, port
, XM_TX_CMD
);
1093 xm_write16(hw
, port
, XM_TX_CMD
, r
| XM_TX_AUTO_PAD
);
1096 * Enable the reception of all error frames. This is is
1097 * a necessary evil due to the design of the XMAC. The
1098 * XMAC's receive FIFO is only 8K in size, however jumbo
1099 * frames can be up to 9000 bytes in length. When bad
1100 * frame filtering is enabled, the XMAC's RX FIFO operates
1101 * in 'store and forward' mode. For this to work, the
1102 * entire frame has to fit into the FIFO, but that means
1103 * that jumbo frames larger than 8192 bytes will be
1104 * truncated. Disabling all bad frame filtering causes
1105 * the RX FIFO to operate in streaming mode, in which
1106 * case the XMAC will start transfering frames out of the
1107 * RX FIFO as soon as the FIFO threshold is reached.
1109 r
= xm_read32(hw
, port
, XM_MODE
);
1110 xm_write32(hw
, port
, XM_MODE
,
1111 XM_MD_RX_CRCE
|XM_MD_RX_LONG
|XM_MD_RX_RUNT
|
1112 XM_MD_RX_ERR
|XM_MD_RX_IRLE
);
1114 xm_outaddr(hw
, port
, XM_SA
, hw
->dev
[port
]->dev_addr
);
1115 xm_outaddr(hw
, port
, XM_EXM(0), hw
->dev
[port
]->dev_addr
);
1118 * Bump up the transmit threshold. This helps hold off transmit
1119 * underruns when we're blasting traffic from both ports at once.
1121 xm_write16(hw
, port
, XM_TX_THR
, 512);
1123 /* Configure MAC arbiter */
1124 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
1126 /* configure timeout values */
1127 skge_write8(hw
, B3_MA_TOINI_RX1
, 72);
1128 skge_write8(hw
, B3_MA_TOINI_RX2
, 72);
1129 skge_write8(hw
, B3_MA_TOINI_TX1
, 72);
1130 skge_write8(hw
, B3_MA_TOINI_TX2
, 72);
1132 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
1133 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
1134 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
1135 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
1137 /* Configure Rx MAC FIFO */
1138 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_RST_CLR
);
1139 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_ENA_TIM_PAT
);
1140 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1142 /* Configure Tx MAC FIFO */
1143 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_RST_CLR
);
1144 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_TX_CTRL_DEF
);
1145 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1147 if (hw
->dev
[port
]->mtu
> ETH_DATA_LEN
) {
1148 /* Enable frame flushing if jumbo frames used */
1149 skge_write16(hw
, SK_REG(port
,RX_MFF_CTRL1
), MFF_ENA_FLUSH
);
1151 /* enable timeout timers if normal frames */
1152 skge_write16(hw
, B3_PA_CTRL
,
1153 port
== 0 ? PA_ENA_TO_TX1
: PA_ENA_TO_TX2
);
1157 r
= xm_read16(hw
, port
, XM_RX_CMD
);
1158 if (hw
->dev
[port
]->mtu
> ETH_DATA_LEN
)
1159 xm_write16(hw
, port
, XM_RX_CMD
, r
| XM_RX_BIG_PK_OK
);
1161 xm_write16(hw
, port
, XM_RX_CMD
, r
& ~(XM_RX_BIG_PK_OK
));
1163 switch (hw
->phy_type
) {
1165 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1166 ctrl1
= PHY_X_AN_FD
| PHY_X_AN_HD
;
1168 switch (skge
->flow_control
) {
1169 case FLOW_MODE_NONE
:
1170 ctrl1
|= PHY_X_P_NO_PAUSE
;
1172 case FLOW_MODE_LOC_SEND
:
1173 ctrl1
|= PHY_X_P_ASYM_MD
;
1175 case FLOW_MODE_SYMMETRIC
:
1176 ctrl1
|= PHY_X_P_SYM_MD
;
1178 case FLOW_MODE_REM_SEND
:
1179 ctrl1
|= PHY_X_P_BOTH_MD
;
1183 xm_phy_write(hw
, port
, PHY_XMAC_AUNE_ADV
, ctrl1
);
1184 ctrl2
= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1187 if (skge
->duplex
== DUPLEX_FULL
)
1188 ctrl2
|= PHY_CT_DUP_MD
;
1191 xm_phy_write(hw
, port
, PHY_XMAC_CTRL
, ctrl2
);
1195 ctrl1
= PHY_CT_SP1000
;
1197 ctrl3
= PHY_AN_CSMA
;
1198 ctrl4
= PHY_B_PEC_EN_LTR
;
1199 ctrl5
= PHY_B_AC_TX_TST
;
1201 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1203 * Workaround BCOM Errata #1 for the C5 type.
1204 * 1000Base-T Link Acquisition Failure in Slave Mode
1205 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1207 ctrl2
|= PHY_B_1000C_RD
;
1208 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1209 ctrl2
|= PHY_B_1000C_AHD
;
1210 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1211 ctrl2
|= PHY_B_1000C_AFD
;
1213 /* Set Flow-control capabilities */
1214 switch (skge
->flow_control
) {
1215 case FLOW_MODE_NONE
:
1216 ctrl3
|= PHY_B_P_NO_PAUSE
;
1218 case FLOW_MODE_LOC_SEND
:
1219 ctrl3
|= PHY_B_P_ASYM_MD
;
1221 case FLOW_MODE_SYMMETRIC
:
1222 ctrl3
|= PHY_B_P_SYM_MD
;
1224 case FLOW_MODE_REM_SEND
:
1225 ctrl3
|= PHY_B_P_BOTH_MD
;
1229 /* Restart Auto-negotiation */
1230 ctrl1
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1232 if (skge
->duplex
== DUPLEX_FULL
)
1233 ctrl1
|= PHY_CT_DUP_MD
;
1235 ctrl2
|= PHY_B_1000C_MSE
; /* set it to Slave */
1238 xm_phy_write(hw
, port
, PHY_BCOM_1000T_CTRL
, ctrl2
);
1239 xm_phy_write(hw
, port
, PHY_BCOM_AUNE_ADV
, ctrl3
);
1241 if (skge
->netdev
->mtu
> ETH_DATA_LEN
) {
1242 ctrl4
|= PHY_B_PEC_HIGH_LA
;
1243 ctrl5
|= PHY_B_AC_LONG_PACK
;
1245 xm_phy_write(hw
, port
,PHY_BCOM_AUX_CTRL
, ctrl5
);
1248 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, ctrl4
);
1249 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
, ctrl1
);
1252 spin_unlock_bh(&hw
->phy_lock
);
1254 /* Clear MIB counters */
1255 xm_write16(hw
, port
, XM_STAT_CMD
,
1256 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1257 /* Clear two times according to Errata #3 */
1258 xm_write16(hw
, port
, XM_STAT_CMD
,
1259 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1261 /* Start polling for link status */
1262 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1265 static void genesis_stop(struct skge_port
*skge
)
1267 struct skge_hw
*hw
= skge
->hw
;
1268 int port
= skge
->port
;
1270 /* Clear Tx packet arbiter timeout IRQ */
1271 skge_write16(hw
, B3_PA_CTRL
,
1272 port
== 0 ? PA_CLR_TO_TX1
: PA_CLR_TO_TX2
);
1275 * If the transfer stucks at the MAC the STOP command will not
1276 * terminate if we don't flush the XMAC's transmit FIFO !
1278 xm_write32(hw
, port
, XM_MODE
,
1279 xm_read32(hw
, port
, XM_MODE
)|XM_MD_FTF
);
1283 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_SET_MAC_RST
);
1285 /* For external PHYs there must be special handling */
1286 if (hw
->phy_type
!= SK_PHY_XMAC
) {
1287 u32 reg
= skge_read32(hw
, B2_GP_IO
);
1296 skge_write32(hw
, B2_GP_IO
, reg
);
1297 skge_read32(hw
, B2_GP_IO
);
1300 xm_write16(hw
, port
, XM_MMU_CMD
,
1301 xm_read16(hw
, port
, XM_MMU_CMD
)
1302 & ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
));
1304 xm_read16(hw
, port
, XM_MMU_CMD
);
1308 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
)
1310 struct skge_hw
*hw
= skge
->hw
;
1311 int port
= skge
->port
;
1313 unsigned long timeout
= jiffies
+ HZ
;
1315 xm_write16(hw
, port
,
1316 XM_STAT_CMD
, XM_SC_SNP_TXC
| XM_SC_SNP_RXC
);
1318 /* wait for update to complete */
1319 while (xm_read16(hw
, port
, XM_STAT_CMD
)
1320 & (XM_SC_SNP_TXC
| XM_SC_SNP_RXC
)) {
1321 if (time_after(jiffies
, timeout
))
1326 /* special case for 64 bit octet counter */
1327 data
[0] = (u64
) xm_read32(hw
, port
, XM_TXO_OK_HI
) << 32
1328 | xm_read32(hw
, port
, XM_TXO_OK_LO
);
1329 data
[1] = (u64
) xm_read32(hw
, port
, XM_RXO_OK_HI
) << 32
1330 | xm_read32(hw
, port
, XM_RXO_OK_LO
);
1332 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
1333 data
[i
] = xm_read32(hw
, port
, skge_stats
[i
].xmac_offset
);
1336 static void genesis_mac_intr(struct skge_hw
*hw
, int port
)
1338 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1339 u16 status
= xm_read16(hw
, port
, XM_ISRC
);
1341 pr_debug("genesis_intr status %x\n", status
);
1342 if (hw
->phy_type
== SK_PHY_XMAC
) {
1343 /* LInk down, start polling for state change */
1344 if (status
& XM_IS_INP_ASS
) {
1345 xm_write16(hw
, port
, XM_IMSK
,
1346 xm_read16(hw
, port
, XM_IMSK
) | XM_IS_INP_ASS
);
1347 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1349 else if (status
& XM_IS_AND
)
1350 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1353 if (status
& XM_IS_TXF_UR
) {
1354 xm_write32(hw
, port
, XM_MODE
, XM_MD_FTF
);
1355 ++skge
->net_stats
.tx_fifo_errors
;
1357 if (status
& XM_IS_RXF_OV
) {
1358 xm_write32(hw
, port
, XM_MODE
, XM_MD_FRF
);
1359 ++skge
->net_stats
.rx_fifo_errors
;
1363 static void gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
1367 gma_write16(hw
, port
, GM_SMI_DATA
, val
);
1368 gma_write16(hw
, port
, GM_SMI_CTRL
,
1369 GM_SMI_CT_PHY_AD(hw
->phy_addr
) | GM_SMI_CT_REG_AD(reg
));
1370 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1373 if (!(gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_BUSY
))
1378 static u16
gm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
1382 gma_write16(hw
, port
, GM_SMI_CTRL
,
1383 GM_SMI_CT_PHY_AD(hw
->phy_addr
)
1384 | GM_SMI_CT_REG_AD(reg
) | GM_SMI_CT_OP_RD
);
1386 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1388 if (gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_RD_VAL
)
1392 printk(KERN_WARNING PFX
"%s: phy read timeout\n",
1393 hw
->dev
[port
]->name
);
1396 return gma_read16(hw
, port
, GM_SMI_DATA
);
1399 static void genesis_link_down(struct skge_port
*skge
)
1401 struct skge_hw
*hw
= skge
->hw
;
1402 int port
= skge
->port
;
1404 pr_debug("genesis_link_down\n");
1406 xm_write16(hw
, port
, XM_MMU_CMD
,
1407 xm_read16(hw
, port
, XM_MMU_CMD
)
1408 & ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
));
1410 /* dummy read to ensure writing */
1411 (void) xm_read16(hw
, port
, XM_MMU_CMD
);
1413 skge_link_down(skge
);
1416 static void genesis_link_up(struct skge_port
*skge
)
1418 struct skge_hw
*hw
= skge
->hw
;
1419 int port
= skge
->port
;
1423 pr_debug("genesis_link_up\n");
1424 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
1427 * enabling pause frame reception is required for 1000BT
1428 * because the XMAC is not reset if the link is going down
1430 if (skge
->flow_control
== FLOW_MODE_NONE
||
1431 skge
->flow_control
== FLOW_MODE_LOC_SEND
)
1432 cmd
|= XM_MMU_IGN_PF
;
1434 /* Enable Pause Frame Reception */
1435 cmd
&= ~XM_MMU_IGN_PF
;
1437 xm_write16(hw
, port
, XM_MMU_CMD
, cmd
);
1439 mode
= xm_read32(hw
, port
, XM_MODE
);
1440 if (skge
->flow_control
== FLOW_MODE_SYMMETRIC
||
1441 skge
->flow_control
== FLOW_MODE_LOC_SEND
) {
1443 * Configure Pause Frame Generation
1444 * Use internal and external Pause Frame Generation.
1445 * Sending pause frames is edge triggered.
1446 * Send a Pause frame with the maximum pause time if
1447 * internal oder external FIFO full condition occurs.
1448 * Send a zero pause time frame to re-start transmission.
1450 /* XM_PAUSE_DA = '010000C28001' (default) */
1451 /* XM_MAC_PTIME = 0xffff (maximum) */
1452 /* remember this value is defined in big endian (!) */
1453 xm_write16(hw
, port
, XM_MAC_PTIME
, 0xffff);
1455 mode
|= XM_PAUSE_MODE
;
1456 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_ENA_PAUSE
);
1459 * disable pause frame generation is required for 1000BT
1460 * because the XMAC is not reset if the link is going down
1462 /* Disable Pause Mode in Mode Register */
1463 mode
&= ~XM_PAUSE_MODE
;
1465 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_DIS_PAUSE
);
1468 xm_write32(hw
, port
, XM_MODE
, mode
);
1471 if (hw
->phy_type
!= SK_PHY_XMAC
)
1472 msk
|= XM_IS_INP_ASS
; /* disable GP0 interrupt bit */
1474 xm_write16(hw
, port
, XM_IMSK
, msk
);
1475 xm_read16(hw
, port
, XM_ISRC
);
1477 /* get MMU Command Reg. */
1478 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
1479 if (hw
->phy_type
!= SK_PHY_XMAC
&& skge
->duplex
== DUPLEX_FULL
)
1480 cmd
|= XM_MMU_GMII_FD
;
1482 if (hw
->phy_type
== SK_PHY_BCOM
) {
1484 * Workaround BCOM Errata (#10523) for all BCom Phys
1485 * Enable Power Management after link up
1487 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
,
1488 xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
)
1489 & ~PHY_B_AC_DIS_PM
);
1490 xm_phy_write(hw
, port
, PHY_BCOM_INT_MASK
,
1495 xm_write16(hw
, port
, XM_MMU_CMD
,
1496 cmd
| XM_MMU_ENA_RX
| XM_MMU_ENA_TX
);
1501 static void genesis_bcom_intr(struct skge_port
*skge
)
1503 struct skge_hw
*hw
= skge
->hw
;
1504 int port
= skge
->port
;
1505 u16 stat
= xm_phy_read(hw
, port
, PHY_BCOM_INT_STAT
);
1507 pr_debug("genesis_bcom intr stat=%x\n", stat
);
1509 /* Workaround BCom Errata:
1510 * enable and disable loopback mode if "NO HCD" occurs.
1512 if (stat
& PHY_B_IS_NO_HDCL
) {
1513 u16 ctrl
= xm_phy_read(hw
, port
, PHY_BCOM_CTRL
);
1514 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1515 ctrl
| PHY_CT_LOOP
);
1516 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1517 ctrl
& ~PHY_CT_LOOP
);
1520 stat
= xm_phy_read(hw
, port
, PHY_BCOM_STAT
);
1521 if (stat
& (PHY_B_IS_AN_PR
| PHY_B_IS_LST_CHANGE
)) {
1522 u16 aux
= xm_phy_read(hw
, port
, PHY_BCOM_AUX_STAT
);
1523 if ( !(aux
& PHY_B_AS_LS
) && netif_carrier_ok(skge
->netdev
))
1524 genesis_link_down(skge
);
1526 else if (stat
& PHY_B_IS_LST_CHANGE
) {
1527 if (aux
& PHY_B_AS_AN_C
) {
1528 switch (aux
& PHY_B_AS_AN_RES_MSK
) {
1529 case PHY_B_RES_1000FD
:
1530 skge
->duplex
= DUPLEX_FULL
;
1532 case PHY_B_RES_1000HD
:
1533 skge
->duplex
= DUPLEX_HALF
;
1537 switch (aux
& PHY_B_AS_PAUSE_MSK
) {
1538 case PHY_B_AS_PAUSE_MSK
:
1539 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
1542 skge
->flow_control
= FLOW_MODE_REM_SEND
;
1545 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
1548 skge
->flow_control
= FLOW_MODE_NONE
;
1550 skge
->speed
= SPEED_1000
;
1552 genesis_link_up(skge
);
1555 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1559 /* Perodic poll of phy status to check for link transistion */
1560 static void skge_link_timer(unsigned long __arg
)
1562 struct skge_port
*skge
= (struct skge_port
*) __arg
;
1563 struct skge_hw
*hw
= skge
->hw
;
1564 int port
= skge
->port
;
1566 if (hw
->chip_id
!= CHIP_ID_GENESIS
|| !netif_running(skge
->netdev
))
1569 spin_lock_bh(&hw
->phy_lock
);
1570 if (hw
->phy_type
== SK_PHY_BCOM
)
1571 genesis_bcom_intr(skge
);
1574 for (i
= 0; i
< 3; i
++)
1575 if (xm_read16(hw
, port
, XM_ISRC
) & XM_IS_INP_ASS
)
1579 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1581 genesis_link_up(skge
);
1583 spin_unlock_bh(&hw
->phy_lock
);
1586 /* Marvell Phy Initailization */
1587 static void yukon_init(struct skge_hw
*hw
, int port
)
1589 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1590 u16 ctrl
, ct1000
, adv
;
1591 u16 ledctrl
, ledover
;
1593 pr_debug("yukon_init\n");
1594 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1595 u16 ectrl
= gm_phy_read(hw
, port
, PHY_MARV_EXT_CTRL
);
1597 ectrl
&= ~(PHY_M_EC_M_DSC_MSK
| PHY_M_EC_S_DSC_MSK
|
1598 PHY_M_EC_MAC_S_MSK
);
1599 ectrl
|= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ
);
1601 /* on PHY 88E1111 there is a change for downshift control */
1602 if (hw
->chip_id
== CHIP_ID_YUKON_EC
)
1603 ectrl
|= PHY_M_EC_M_DSC_2(0) | PHY_M_EC_DOWN_S_ENA
;
1605 ectrl
|= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1607 gm_phy_write(hw
, port
, PHY_MARV_EXT_CTRL
, ectrl
);
1610 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
1611 if (skge
->autoneg
== AUTONEG_DISABLE
)
1612 ctrl
&= ~PHY_CT_ANE
;
1614 ctrl
|= PHY_CT_RESET
;
1615 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1621 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1623 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1624 ct1000
|= PHY_M_1000C_AFD
;
1625 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1626 ct1000
|= PHY_M_1000C_AHD
;
1627 if (skge
->advertising
& ADVERTISED_100baseT_Full
)
1628 adv
|= PHY_M_AN_100_FD
;
1629 if (skge
->advertising
& ADVERTISED_100baseT_Half
)
1630 adv
|= PHY_M_AN_100_HD
;
1631 if (skge
->advertising
& ADVERTISED_10baseT_Full
)
1632 adv
|= PHY_M_AN_10_FD
;
1633 if (skge
->advertising
& ADVERTISED_10baseT_Half
)
1634 adv
|= PHY_M_AN_10_HD
;
1636 /* Set Flow-control capabilities */
1637 switch (skge
->flow_control
) {
1638 case FLOW_MODE_NONE
:
1639 adv
|= PHY_B_P_NO_PAUSE
;
1641 case FLOW_MODE_LOC_SEND
:
1642 adv
|= PHY_B_P_ASYM_MD
;
1644 case FLOW_MODE_SYMMETRIC
:
1645 adv
|= PHY_B_P_SYM_MD
;
1647 case FLOW_MODE_REM_SEND
:
1648 adv
|= PHY_B_P_BOTH_MD
;
1651 } else { /* special defines for FIBER (88E1011S only) */
1652 adv
|= PHY_M_AN_1000X_AHD
| PHY_M_AN_1000X_AFD
;
1654 /* Set Flow-control capabilities */
1655 switch (skge
->flow_control
) {
1656 case FLOW_MODE_NONE
:
1657 adv
|= PHY_M_P_NO_PAUSE_X
;
1659 case FLOW_MODE_LOC_SEND
:
1660 adv
|= PHY_M_P_ASYM_MD_X
;
1662 case FLOW_MODE_SYMMETRIC
:
1663 adv
|= PHY_M_P_SYM_MD_X
;
1665 case FLOW_MODE_REM_SEND
:
1666 adv
|= PHY_M_P_BOTH_MD_X
;
1670 /* Restart Auto-negotiation */
1671 ctrl
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1673 /* forced speed/duplex settings */
1674 ct1000
= PHY_M_1000C_MSE
;
1676 if (skge
->duplex
== DUPLEX_FULL
)
1677 ctrl
|= PHY_CT_DUP_MD
;
1679 switch (skge
->speed
) {
1681 ctrl
|= PHY_CT_SP1000
;
1684 ctrl
|= PHY_CT_SP100
;
1688 ctrl
|= PHY_CT_RESET
;
1691 if (hw
->chip_id
!= CHIP_ID_YUKON_FE
)
1692 gm_phy_write(hw
, port
, PHY_MARV_1000T_CTRL
, ct1000
);
1694 gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
, adv
);
1695 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1697 /* Setup Phy LED's */
1698 ledctrl
= PHY_M_LED_PULS_DUR(PULS_170MS
);
1701 if (hw
->chip_id
== CHIP_ID_YUKON_FE
) {
1702 /* on 88E3082 these bits are at 11..9 (shifted left) */
1703 ledctrl
|= PHY_M_LED_BLINK_RT(BLINK_84MS
) << 1;
1705 gm_phy_write(hw
, port
, PHY_MARV_FE_LED_PAR
,
1706 ((gm_phy_read(hw
, port
, PHY_MARV_FE_LED_PAR
)
1708 & ~PHY_M_FELP_LED1_MSK
)
1709 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL
)));
1711 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
1712 ledctrl
|= PHY_M_LED_BLINK_RT(BLINK_84MS
) | PHY_M_LEDC_TX_CTRL
;
1714 /* turn off the Rx LED (LED_RX) */
1715 ledover
|= PHY_M_LED_MO_RX(MO_LED_OFF
);
1718 /* disable blink mode (LED_DUPLEX) on collisions */
1719 ctrl
|= PHY_M_LEDC_DP_CTRL
;
1720 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, ledctrl
);
1722 if (skge
->autoneg
== AUTONEG_DISABLE
|| skge
->speed
== SPEED_100
) {
1723 /* turn on 100 Mbps LED (LED_LINK100) */
1724 ledover
|= PHY_M_LED_MO_100(MO_LED_ON
);
1728 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
, ledover
);
1730 /* Enable phy interrupt on autonegotiation complete (or link up) */
1731 if (skge
->autoneg
== AUTONEG_ENABLE
)
1732 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_AN_COMPL
);
1734 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_DEF_MSK
);
1737 static void yukon_reset(struct skge_hw
*hw
, int port
)
1739 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, 0);/* disable PHY IRQs */
1740 gma_write16(hw
, port
, GM_MC_ADDR_H1
, 0); /* clear MC hash */
1741 gma_write16(hw
, port
, GM_MC_ADDR_H2
, 0);
1742 gma_write16(hw
, port
, GM_MC_ADDR_H3
, 0);
1743 gma_write16(hw
, port
, GM_MC_ADDR_H4
, 0);
1745 gma_write16(hw
, port
, GM_RX_CTRL
,
1746 gma_read16(hw
, port
, GM_RX_CTRL
)
1747 | GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
1750 static void yukon_mac_init(struct skge_hw
*hw
, int port
)
1752 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1755 const u8
*addr
= hw
->dev
[port
]->dev_addr
;
1757 /* WA code for COMA mode -- set PHY reset */
1758 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1759 hw
->chip_rev
== CHIP_REV_YU_LITE_A3
)
1760 skge_write32(hw
, B2_GP_IO
,
1761 (skge_read32(hw
, B2_GP_IO
) | GP_DIR_9
| GP_IO_9
));
1764 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), GPC_RST_SET
);
1765 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_RST_SET
);
1767 /* WA code for COMA mode -- clear PHY reset */
1768 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1769 hw
->chip_rev
== CHIP_REV_YU_LITE_A3
)
1770 skge_write32(hw
, B2_GP_IO
,
1771 (skge_read32(hw
, B2_GP_IO
) | GP_DIR_9
)
1774 /* Set hardware config mode */
1775 reg
= GPC_INT_POL_HI
| GPC_DIS_FC
| GPC_DIS_SLEEP
|
1776 GPC_ENA_XC
| GPC_ANEG_ADV_ALL_M
| GPC_ENA_PAUSE
;
1777 reg
|= iscopper(hw
) ? GPC_HWCFG_GMII_COP
: GPC_HWCFG_GMII_FIB
;
1779 /* Clear GMC reset */
1780 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), reg
| GPC_RST_SET
);
1781 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), reg
| GPC_RST_CLR
);
1782 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
| GMC_RST_CLR
);
1783 if (skge
->autoneg
== AUTONEG_DISABLE
) {
1784 reg
= GM_GPCR_AU_ALL_DIS
;
1785 gma_write16(hw
, port
, GM_GP_CTRL
,
1786 gma_read16(hw
, port
, GM_GP_CTRL
) | reg
);
1788 switch (skge
->speed
) {
1790 reg
|= GM_GPCR_SPEED_1000
;
1793 reg
|= GM_GPCR_SPEED_100
;
1796 if (skge
->duplex
== DUPLEX_FULL
)
1797 reg
|= GM_GPCR_DUP_FULL
;
1799 reg
= GM_GPCR_SPEED_1000
| GM_GPCR_SPEED_100
| GM_GPCR_DUP_FULL
;
1800 switch (skge
->flow_control
) {
1801 case FLOW_MODE_NONE
:
1802 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
1803 reg
|= GM_GPCR_FC_TX_DIS
| GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
1805 case FLOW_MODE_LOC_SEND
:
1806 /* disable Rx flow-control */
1807 reg
|= GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
1810 gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
1811 skge_read16(hw
, GMAC_IRQ_SRC
);
1813 spin_lock_bh(&hw
->phy_lock
);
1814 yukon_init(hw
, port
);
1815 spin_unlock_bh(&hw
->phy_lock
);
1818 reg
= gma_read16(hw
, port
, GM_PHY_ADDR
);
1819 gma_write16(hw
, port
, GM_PHY_ADDR
, reg
| GM_PAR_MIB_CLR
);
1821 for (i
= 0; i
< GM_MIB_CNT_SIZE
; i
++)
1822 gma_read16(hw
, port
, GM_MIB_CNT_BASE
+ 8*i
);
1823 gma_write16(hw
, port
, GM_PHY_ADDR
, reg
);
1825 /* transmit control */
1826 gma_write16(hw
, port
, GM_TX_CTRL
, TX_COL_THR(TX_COL_DEF
));
1828 /* receive control reg: unicast + multicast + no FCS */
1829 gma_write16(hw
, port
, GM_RX_CTRL
,
1830 GM_RXCR_UCF_ENA
| GM_RXCR_CRC_DIS
| GM_RXCR_MCF_ENA
);
1832 /* transmit flow control */
1833 gma_write16(hw
, port
, GM_TX_FLOW_CTRL
, 0xffff);
1835 /* transmit parameter */
1836 gma_write16(hw
, port
, GM_TX_PARAM
,
1837 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF
) |
1838 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF
) |
1839 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF
));
1841 /* serial mode register */
1842 reg
= GM_SMOD_VLAN_ENA
| IPG_DATA_VAL(IPG_DATA_DEF
);
1843 if (hw
->dev
[port
]->mtu
> 1500)
1844 reg
|= GM_SMOD_JUMBO_ENA
;
1846 gma_write16(hw
, port
, GM_SERIAL_MODE
, reg
);
1848 /* physical address: used for pause frames */
1849 gma_set_addr(hw
, port
, GM_SRC_ADDR_1L
, addr
);
1850 /* virtual address for data */
1851 gma_set_addr(hw
, port
, GM_SRC_ADDR_2L
, addr
);
1853 /* enable interrupt mask for counter overflows */
1854 gma_write16(hw
, port
, GM_TX_IRQ_MSK
, 0);
1855 gma_write16(hw
, port
, GM_RX_IRQ_MSK
, 0);
1856 gma_write16(hw
, port
, GM_TR_IRQ_MSK
, 0);
1858 /* Initialize Mac Fifo */
1860 /* Configure Rx MAC FIFO */
1861 skge_write16(hw
, SK_REG(port
, RX_GMF_FL_MSK
), RX_FF_FL_DEF_MSK
);
1862 reg
= GMF_OPER_ON
| GMF_RX_F_FL_ON
;
1863 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1864 hw
->chip_rev
== CHIP_REV_YU_LITE_A3
)
1865 reg
&= ~GMF_RX_F_FL_ON
;
1866 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_RST_CLR
);
1867 skge_write16(hw
, SK_REG(port
, RX_GMF_CTRL_T
), reg
);
1868 skge_write16(hw
, SK_REG(port
, RX_GMF_FL_THR
), RX_GMF_FL_THR_DEF
);
1870 /* Configure Tx MAC FIFO */
1871 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_RST_CLR
);
1872 skge_write16(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_OPER_ON
);
1875 static void yukon_stop(struct skge_port
*skge
)
1877 struct skge_hw
*hw
= skge
->hw
;
1878 int port
= skge
->port
;
1880 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1881 hw
->chip_rev
== CHIP_REV_YU_LITE_A3
) {
1882 skge_write32(hw
, B2_GP_IO
,
1883 skge_read32(hw
, B2_GP_IO
) | GP_DIR_9
| GP_IO_9
);
1886 gma_write16(hw
, port
, GM_GP_CTRL
,
1887 gma_read16(hw
, port
, GM_GP_CTRL
)
1888 & ~(GM_GPCR_RX_ENA
|GM_GPCR_RX_ENA
));
1889 gma_read16(hw
, port
, GM_GP_CTRL
);
1891 /* set GPHY Control reset */
1892 gma_write32(hw
, port
, GPHY_CTRL
, GPC_RST_SET
);
1893 gma_write32(hw
, port
, GMAC_CTRL
, GMC_RST_SET
);
1896 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
)
1898 struct skge_hw
*hw
= skge
->hw
;
1899 int port
= skge
->port
;
1902 data
[0] = (u64
) gma_read32(hw
, port
, GM_TXO_OK_HI
) << 32
1903 | gma_read32(hw
, port
, GM_TXO_OK_LO
);
1904 data
[1] = (u64
) gma_read32(hw
, port
, GM_RXO_OK_HI
) << 32
1905 | gma_read32(hw
, port
, GM_RXO_OK_LO
);
1907 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
1908 data
[i
] = gma_read32(hw
, port
,
1909 skge_stats
[i
].gma_offset
);
1912 static void yukon_mac_intr(struct skge_hw
*hw
, int port
)
1914 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1915 u8 status
= skge_read8(hw
, SK_REG(port
, GMAC_IRQ_SRC
));
1917 pr_debug("yukon_intr status %x\n", status
);
1918 if (status
& GM_IS_RX_FF_OR
) {
1919 ++skge
->net_stats
.rx_fifo_errors
;
1920 gma_write8(hw
, port
, RX_GMF_CTRL_T
, GMF_CLI_RX_FO
);
1922 if (status
& GM_IS_TX_FF_UR
) {
1923 ++skge
->net_stats
.tx_fifo_errors
;
1924 gma_write8(hw
, port
, TX_GMF_CTRL_T
, GMF_CLI_TX_FU
);
1929 static u16
yukon_speed(const struct skge_hw
*hw
, u16 aux
)
1931 if (hw
->chip_id
== CHIP_ID_YUKON_FE
)
1932 return (aux
& PHY_M_PS_SPEED_100
) ? SPEED_100
: SPEED_10
;
1934 switch (aux
& PHY_M_PS_SPEED_MSK
) {
1935 case PHY_M_PS_SPEED_1000
:
1937 case PHY_M_PS_SPEED_100
:
1944 static void yukon_link_up(struct skge_port
*skge
)
1946 struct skge_hw
*hw
= skge
->hw
;
1947 int port
= skge
->port
;
1950 pr_debug("yukon_link_up\n");
1952 /* Enable Transmit FIFO Underrun */
1953 skge_write8(hw
, GMAC_IRQ_MSK
, GMAC_DEF_MSK
);
1955 reg
= gma_read16(hw
, port
, GM_GP_CTRL
);
1956 if (skge
->duplex
== DUPLEX_FULL
|| skge
->autoneg
== AUTONEG_ENABLE
)
1957 reg
|= GM_GPCR_DUP_FULL
;
1960 reg
|= GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
;
1961 gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
1963 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_DEF_MSK
);
1967 static void yukon_link_down(struct skge_port
*skge
)
1969 struct skge_hw
*hw
= skge
->hw
;
1970 int port
= skge
->port
;
1972 pr_debug("yukon_link_down\n");
1973 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, 0);
1974 gm_phy_write(hw
, port
, GM_GP_CTRL
,
1975 gm_phy_read(hw
, port
, GM_GP_CTRL
)
1976 & ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
));
1978 if (hw
->chip_id
!= CHIP_ID_YUKON_FE
&&
1979 skge
->flow_control
== FLOW_MODE_REM_SEND
) {
1980 /* restore Asymmetric Pause bit */
1981 gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
,
1982 gm_phy_read(hw
, port
,
1988 yukon_reset(hw
, port
);
1989 skge_link_down(skge
);
1991 yukon_init(hw
, port
);
1994 static void yukon_phy_intr(struct skge_port
*skge
)
1996 struct skge_hw
*hw
= skge
->hw
;
1997 int port
= skge
->port
;
1998 const char *reason
= NULL
;
1999 u16 istatus
, phystat
;
2001 istatus
= gm_phy_read(hw
, port
, PHY_MARV_INT_STAT
);
2002 phystat
= gm_phy_read(hw
, port
, PHY_MARV_PHY_STAT
);
2003 pr_debug("yukon phy intr istat=%x phy_stat=%x\n", istatus
, phystat
);
2005 if (istatus
& PHY_M_IS_AN_COMPL
) {
2006 if (gm_phy_read(hw
, port
, PHY_MARV_AUNE_LP
)
2008 reason
= "remote fault";
2012 if (!(hw
->chip_id
== CHIP_ID_YUKON_FE
|| hw
->chip_id
== CHIP_ID_YUKON_EC
)
2013 && (gm_phy_read(hw
, port
, PHY_MARV_1000T_STAT
)
2014 & PHY_B_1000S_MSF
)) {
2015 reason
= "master/slave fault";
2019 if (!(phystat
& PHY_M_PS_SPDUP_RES
)) {
2020 reason
= "speed/duplex";
2024 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
)
2025 ? DUPLEX_FULL
: DUPLEX_HALF
;
2026 skge
->speed
= yukon_speed(hw
, phystat
);
2028 /* Tx & Rx Pause Enabled bits are at 9..8 */
2029 if (hw
->chip_id
== CHIP_ID_YUKON_XL
)
2032 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2033 switch (phystat
& PHY_M_PS_PAUSE_MSK
) {
2034 case PHY_M_PS_PAUSE_MSK
:
2035 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
2037 case PHY_M_PS_RX_P_EN
:
2038 skge
->flow_control
= FLOW_MODE_REM_SEND
;
2040 case PHY_M_PS_TX_P_EN
:
2041 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
2044 skge
->flow_control
= FLOW_MODE_NONE
;
2047 if (skge
->flow_control
== FLOW_MODE_NONE
||
2048 (skge
->speed
< SPEED_1000
&& skge
->duplex
== DUPLEX_HALF
))
2049 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
2051 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
);
2052 yukon_link_up(skge
);
2056 if (istatus
& PHY_M_IS_LSP_CHANGE
)
2057 skge
->speed
= yukon_speed(hw
, phystat
);
2059 if (istatus
& PHY_M_IS_DUP_CHANGE
)
2060 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
) ? DUPLEX_FULL
: DUPLEX_HALF
;
2061 if (istatus
& PHY_M_IS_LST_CHANGE
) {
2062 if (phystat
& PHY_M_PS_LINK_UP
)
2063 yukon_link_up(skge
);
2065 yukon_link_down(skge
);
2069 printk(KERN_ERR PFX
"%s: autonegotiation failed (%s)\n",
2070 skge
->netdev
->name
, reason
);
2072 /* XXX restart autonegotiation? */
2075 static void skge_ramset(struct skge_hw
*hw
, u16 q
, u32 start
, size_t len
)
2081 end
= start
+ len
- 1;
2083 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_RST_CLR
);
2084 skge_write32(hw
, RB_ADDR(q
, RB_START
), start
);
2085 skge_write32(hw
, RB_ADDR(q
, RB_WP
), start
);
2086 skge_write32(hw
, RB_ADDR(q
, RB_RP
), start
);
2087 skge_write32(hw
, RB_ADDR(q
, RB_END
), end
);
2089 if (q
== Q_R1
|| q
== Q_R2
) {
2090 /* Set thresholds on receive queue's */
2091 skge_write32(hw
, RB_ADDR(q
, RB_RX_UTPP
),
2093 skge_write32(hw
, RB_ADDR(q
, RB_RX_LTPP
),
2096 /* Enable store & forward on Tx queue's because
2097 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2099 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_STFWD
);
2102 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_OP_MD
);
2105 /* Setup Bus Memory Interface */
2106 static void skge_qset(struct skge_port
*skge
, u16 q
,
2107 const struct skge_element
*e
)
2109 struct skge_hw
*hw
= skge
->hw
;
2110 u32 watermark
= 0x600;
2111 u64 base
= skge
->dma
+ (e
->desc
- skge
->mem
);
2113 /* optimization to reduce window on 32bit/33mhz */
2114 if ((skge_read16(hw
, B0_CTST
) & (CS_BUS_CLOCK
| CS_BUS_SLOT_SZ
)) == 0)
2117 skge_write32(hw
, Q_ADDR(q
, Q_CSR
), CSR_CLR_RESET
);
2118 skge_write32(hw
, Q_ADDR(q
, Q_F
), watermark
);
2119 skge_write32(hw
, Q_ADDR(q
, Q_DA_H
), (u32
)(base
>> 32));
2120 skge_write32(hw
, Q_ADDR(q
, Q_DA_L
), (u32
)base
);
2123 static int skge_up(struct net_device
*dev
)
2125 struct skge_port
*skge
= netdev_priv(dev
);
2126 struct skge_hw
*hw
= skge
->hw
;
2127 int port
= skge
->port
;
2128 u32 chunk
, ram_addr
;
2129 size_t rx_size
, tx_size
;
2132 if (netif_msg_ifup(skge
))
2133 printk(KERN_INFO PFX
"%s: enabling interface\n", dev
->name
);
2135 rx_size
= skge
->rx_ring
.count
* sizeof(struct skge_rx_desc
);
2136 tx_size
= skge
->tx_ring
.count
* sizeof(struct skge_tx_desc
);
2137 skge
->mem_size
= tx_size
+ rx_size
;
2138 skge
->mem
= pci_alloc_consistent(hw
->pdev
, skge
->mem_size
, &skge
->dma
);
2142 memset(skge
->mem
, 0, skge
->mem_size
);
2144 if ((err
= skge_ring_alloc(&skge
->rx_ring
, skge
->mem
, skge
->dma
)))
2147 if (skge_rx_fill(skge
))
2150 if ((err
= skge_ring_alloc(&skge
->tx_ring
, skge
->mem
+ rx_size
,
2151 skge
->dma
+ rx_size
)))
2154 skge
->tx_avail
= skge
->tx_ring
.count
- 1;
2157 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2158 genesis_mac_init(hw
, port
);
2160 yukon_mac_init(hw
, port
);
2162 /* Configure RAMbuffers */
2163 chunk
= hw
->ram_size
/ ((hw
->ports
+ 1)*2);
2164 ram_addr
= hw
->ram_offset
+ 2 * chunk
* port
;
2166 skge_ramset(hw
, rxqaddr
[port
], ram_addr
, chunk
);
2167 skge_qset(skge
, rxqaddr
[port
], skge
->rx_ring
.to_clean
);
2169 BUG_ON(skge
->tx_ring
.to_use
!= skge
->tx_ring
.to_clean
);
2170 skge_ramset(hw
, txqaddr
[port
], ram_addr
+chunk
, chunk
);
2171 skge_qset(skge
, txqaddr
[port
], skge
->tx_ring
.to_use
);
2173 /* Start receiver BMU */
2175 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_START
| CSR_IRQ_CL_F
);
2177 pr_debug("skge_up completed\n");
2181 skge_rx_clean(skge
);
2182 kfree(skge
->rx_ring
.start
);
2184 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2189 static int skge_down(struct net_device
*dev
)
2191 struct skge_port
*skge
= netdev_priv(dev
);
2192 struct skge_hw
*hw
= skge
->hw
;
2193 int port
= skge
->port
;
2195 if (netif_msg_ifdown(skge
))
2196 printk(KERN_INFO PFX
"%s: disabling interface\n", dev
->name
);
2198 netif_stop_queue(dev
);
2200 del_timer_sync(&skge
->led_blink
);
2201 del_timer_sync(&skge
->link_check
);
2203 /* Stop transmitter */
2204 skge_write8(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_STOP
);
2205 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
),
2206 RB_RST_SET
|RB_DIS_OP_MD
);
2208 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2213 /* Disable Force Sync bit and Enable Alloc bit */
2214 skge_write8(hw
, SK_REG(port
, TXA_CTRL
),
2215 TXA_DIS_FSYNC
| TXA_DIS_ALLOC
| TXA_STOP_RC
);
2217 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2218 skge_write32(hw
, SK_REG(port
, TXA_ITI_INI
), 0L);
2219 skge_write32(hw
, SK_REG(port
, TXA_LIM_INI
), 0L);
2221 /* Reset PCI FIFO */
2222 skge_write32(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2223 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
), RB_RST_SET
);
2225 /* Reset the RAM Buffer async Tx queue */
2226 skge_write8(hw
, RB_ADDR(port
== 0 ? Q_XA1
: Q_XA2
, RB_CTRL
), RB_RST_SET
);
2228 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_STOP
);
2229 skge_write32(hw
, RB_ADDR(port
? Q_R2
: Q_R1
, RB_CTRL
),
2230 RB_RST_SET
|RB_DIS_OP_MD
);
2231 skge_write32(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2233 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2234 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_RST_SET
);
2235 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_RST_SET
);
2236 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_STOP
);
2237 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_STOP
);
2239 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
2240 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
2243 /* turn off led's */
2244 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
2246 skge_tx_clean(skge
);
2247 skge_rx_clean(skge
);
2249 kfree(skge
->rx_ring
.start
);
2250 kfree(skge
->tx_ring
.start
);
2251 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2255 static int skge_xmit_frame(struct sk_buff
*skb
, struct net_device
*dev
)
2257 struct skge_port
*skge
= netdev_priv(dev
);
2258 struct skge_hw
*hw
= skge
->hw
;
2259 struct skge_ring
*ring
= &skge
->tx_ring
;
2260 struct skge_element
*e
;
2261 struct skge_tx_desc
*td
;
2265 unsigned long flags
;
2267 skb
= skb_padto(skb
, ETH_ZLEN
);
2269 return NETDEV_TX_OK
;
2271 local_irq_save(flags
);
2272 if (!spin_trylock(&skge
->tx_lock
)) {
2273 /* Collision - tell upper layer to requeue */
2274 local_irq_restore(flags
);
2275 return NETDEV_TX_LOCKED
;
2278 if (unlikely(skge
->tx_avail
< skb_shinfo(skb
)->nr_frags
+1)) {
2279 netif_stop_queue(dev
);
2280 spin_unlock_irqrestore(&skge
->tx_lock
, flags
);
2282 printk(KERN_WARNING PFX
"%s: ring full when queue awake!\n",
2284 return NETDEV_TX_BUSY
;
2290 len
= skb_headlen(skb
);
2291 map
= pci_map_single(hw
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
2292 pci_unmap_addr_set(e
, mapaddr
, map
);
2293 pci_unmap_len_set(e
, maplen
, len
);
2296 td
->dma_hi
= map
>> 32;
2298 if (skb
->ip_summed
== CHECKSUM_HW
) {
2299 const struct iphdr
*ip
2300 = (const struct iphdr
*) (skb
->data
+ ETH_HLEN
);
2301 int offset
= skb
->h
.raw
- skb
->data
;
2303 /* This seems backwards, but it is what the sk98lin
2304 * does. Looks like hardware is wrong?
2306 if (ip
->protocol
== IPPROTO_UDP
2307 && hw
->chip_rev
== 0 && hw
->chip_id
== CHIP_ID_YUKON
)
2308 control
= BMU_TCP_CHECK
;
2310 control
= BMU_UDP_CHECK
;
2313 td
->csum_start
= offset
;
2314 td
->csum_write
= offset
+ skb
->csum
;
2316 control
= BMU_CHECK
;
2318 if (!skb_shinfo(skb
)->nr_frags
) /* single buffer i.e. no fragments */
2319 control
|= BMU_EOF
| BMU_IRQ_EOF
;
2321 struct skge_tx_desc
*tf
= td
;
2323 control
|= BMU_STFWD
;
2324 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2325 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2327 map
= pci_map_page(hw
->pdev
, frag
->page
, frag
->page_offset
,
2328 frag
->size
, PCI_DMA_TODEVICE
);
2334 tf
->dma_hi
= (u64
) map
>> 32;
2335 pci_unmap_addr_set(e
, mapaddr
, map
);
2336 pci_unmap_len_set(e
, maplen
, frag
->size
);
2338 tf
->control
= BMU_OWN
| BMU_SW
| control
| frag
->size
;
2340 tf
->control
|= BMU_EOF
| BMU_IRQ_EOF
;
2342 /* Make sure all the descriptors written */
2344 td
->control
= BMU_OWN
| BMU_SW
| BMU_STF
| control
| len
;
2347 skge_write8(hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_START
);
2349 if (netif_msg_tx_queued(skge
))
2350 printk(KERN_DEBUG
"%s: tx queued, slot %td, len %d\n",
2351 dev
->name
, e
- ring
->start
, skb
->len
);
2353 ring
->to_use
= e
->next
;
2354 skge
->tx_avail
-= skb_shinfo(skb
)->nr_frags
+ 1;
2355 if (skge
->tx_avail
<= MAX_SKB_FRAGS
+ 1) {
2356 pr_debug("%s: transmit queue full\n", dev
->name
);
2357 netif_stop_queue(dev
);
2360 dev
->trans_start
= jiffies
;
2361 spin_unlock_irqrestore(&skge
->tx_lock
, flags
);
2363 return NETDEV_TX_OK
;
2366 static inline void skge_tx_free(struct skge_hw
*hw
, struct skge_element
*e
)
2369 pci_unmap_single(hw
->pdev
,
2370 pci_unmap_addr(e
, mapaddr
),
2371 pci_unmap_len(e
, maplen
),
2373 dev_kfree_skb_any(e
->skb
);
2376 pci_unmap_page(hw
->pdev
,
2377 pci_unmap_addr(e
, mapaddr
),
2378 pci_unmap_len(e
, maplen
),
2383 static void skge_tx_clean(struct skge_port
*skge
)
2385 struct skge_ring
*ring
= &skge
->tx_ring
;
2386 struct skge_element
*e
;
2387 unsigned long flags
;
2389 spin_lock_irqsave(&skge
->tx_lock
, flags
);
2390 for (e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
2392 skge_tx_free(skge
->hw
, e
);
2395 spin_unlock_irqrestore(&skge
->tx_lock
, flags
);
2398 static void skge_tx_timeout(struct net_device
*dev
)
2400 struct skge_port
*skge
= netdev_priv(dev
);
2402 if (netif_msg_timer(skge
))
2403 printk(KERN_DEBUG PFX
"%s: tx timeout\n", dev
->name
);
2405 skge_write8(skge
->hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_STOP
);
2406 skge_tx_clean(skge
);
2409 static int skge_change_mtu(struct net_device
*dev
, int new_mtu
)
2413 if (new_mtu
< ETH_ZLEN
|| new_mtu
> ETH_JUMBO_MTU
)
2418 if (netif_running(dev
)) {
2426 static void genesis_set_multicast(struct net_device
*dev
)
2428 struct skge_port
*skge
= netdev_priv(dev
);
2429 struct skge_hw
*hw
= skge
->hw
;
2430 int port
= skge
->port
;
2431 int i
, count
= dev
->mc_count
;
2432 struct dev_mc_list
*list
= dev
->mc_list
;
2436 mode
= xm_read32(hw
, port
, XM_MODE
);
2437 mode
|= XM_MD_ENA_HASH
;
2438 if (dev
->flags
& IFF_PROMISC
)
2439 mode
|= XM_MD_ENA_PROM
;
2441 mode
&= ~XM_MD_ENA_PROM
;
2443 if (dev
->flags
& IFF_ALLMULTI
)
2444 memset(filter
, 0xff, sizeof(filter
));
2446 memset(filter
, 0, sizeof(filter
));
2447 for (i
= 0; list
&& i
< count
; i
++, list
= list
->next
) {
2448 u32 crc
= crc32_le(~0, list
->dmi_addr
, ETH_ALEN
);
2449 u8 bit
= 63 - (crc
& 63);
2451 filter
[bit
/8] |= 1 << (bit
%8);
2455 xm_outhash(hw
, port
, XM_HSM
, filter
);
2457 xm_write32(hw
, port
, XM_MODE
, mode
);
2460 static void yukon_set_multicast(struct net_device
*dev
)
2462 struct skge_port
*skge
= netdev_priv(dev
);
2463 struct skge_hw
*hw
= skge
->hw
;
2464 int port
= skge
->port
;
2465 struct dev_mc_list
*list
= dev
->mc_list
;
2469 memset(filter
, 0, sizeof(filter
));
2471 reg
= gma_read16(hw
, port
, GM_RX_CTRL
);
2472 reg
|= GM_RXCR_UCF_ENA
;
2474 if (dev
->flags
& IFF_PROMISC
) /* promiscious */
2475 reg
&= ~(GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
2476 else if (dev
->flags
& IFF_ALLMULTI
) /* all multicast */
2477 memset(filter
, 0xff, sizeof(filter
));
2478 else if (dev
->mc_count
== 0) /* no multicast */
2479 reg
&= ~GM_RXCR_MCF_ENA
;
2482 reg
|= GM_RXCR_MCF_ENA
;
2484 for (i
= 0; list
&& i
< dev
->mc_count
; i
++, list
= list
->next
) {
2485 u32 bit
= ether_crc(ETH_ALEN
, list
->dmi_addr
) & 0x3f;
2486 filter
[bit
/8] |= 1 << (bit
%8);
2491 gma_write16(hw
, port
, GM_MC_ADDR_H1
,
2492 (u16
)filter
[0] | ((u16
)filter
[1] << 8));
2493 gma_write16(hw
, port
, GM_MC_ADDR_H2
,
2494 (u16
)filter
[2] | ((u16
)filter
[3] << 8));
2495 gma_write16(hw
, port
, GM_MC_ADDR_H3
,
2496 (u16
)filter
[4] | ((u16
)filter
[5] << 8));
2497 gma_write16(hw
, port
, GM_MC_ADDR_H4
,
2498 (u16
)filter
[6] | ((u16
)filter
[7] << 8));
2500 gma_write16(hw
, port
, GM_RX_CTRL
, reg
);
2503 static inline int bad_phy_status(const struct skge_hw
*hw
, u32 status
)
2505 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2506 return (status
& (XMR_FS_ERR
| XMR_FS_2L_VLAN
)) != 0;
2508 return (status
& GMR_FS_ANY_ERR
) ||
2509 (status
& GMR_FS_RX_OK
) == 0;
2512 static void skge_rx_error(struct skge_port
*skge
, int slot
,
2513 u32 control
, u32 status
)
2515 if (netif_msg_rx_err(skge
))
2516 printk(KERN_DEBUG PFX
"%s: rx err, slot %d control 0x%x status 0x%x\n",
2517 skge
->netdev
->name
, slot
, control
, status
);
2519 if ((control
& (BMU_EOF
|BMU_STF
)) != (BMU_STF
|BMU_EOF
)
2520 || (control
& BMU_BBC
) > skge
->netdev
->mtu
+ VLAN_ETH_HLEN
)
2521 skge
->net_stats
.rx_length_errors
++;
2523 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
) {
2524 if (status
& (XMR_FS_RUNT
|XMR_FS_LNG_ERR
))
2525 skge
->net_stats
.rx_length_errors
++;
2526 if (status
& XMR_FS_FRA_ERR
)
2527 skge
->net_stats
.rx_frame_errors
++;
2528 if (status
& XMR_FS_FCS_ERR
)
2529 skge
->net_stats
.rx_crc_errors
++;
2531 if (status
& (GMR_FS_LONG_ERR
|GMR_FS_UN_SIZE
))
2532 skge
->net_stats
.rx_length_errors
++;
2533 if (status
& GMR_FS_FRAGMENT
)
2534 skge
->net_stats
.rx_frame_errors
++;
2535 if (status
& GMR_FS_CRC_ERR
)
2536 skge
->net_stats
.rx_crc_errors
++;
2541 static int skge_poll(struct net_device
*dev
, int *budget
)
2543 struct skge_port
*skge
= netdev_priv(dev
);
2544 struct skge_hw
*hw
= skge
->hw
;
2545 struct skge_ring
*ring
= &skge
->rx_ring
;
2546 struct skge_element
*e
;
2547 unsigned int to_do
= min(dev
->quota
, *budget
);
2548 unsigned int work_done
= 0;
2550 static const u32 irqmask
[] = { IS_PORT_1
, IS_PORT_2
};
2552 for (e
= ring
->to_clean
; e
!= ring
->to_use
&& work_done
< to_do
;
2554 struct skge_rx_desc
*rd
= e
->desc
;
2555 struct sk_buff
*skb
= e
->skb
;
2556 u32 control
, len
, status
;
2559 control
= rd
->control
;
2560 if (control
& BMU_OWN
)
2563 len
= control
& BMU_BBC
;
2566 pci_unmap_single(hw
->pdev
,
2567 pci_unmap_addr(e
, mapaddr
),
2568 pci_unmap_len(e
, maplen
),
2569 PCI_DMA_FROMDEVICE
);
2571 status
= rd
->status
;
2572 if ((control
& (BMU_EOF
|BMU_STF
)) != (BMU_STF
|BMU_EOF
)
2573 || len
> dev
->mtu
+ VLAN_ETH_HLEN
2574 || bad_phy_status(hw
, status
)) {
2575 skge_rx_error(skge
, e
- ring
->start
, control
, status
);
2580 if (netif_msg_rx_status(skge
))
2581 printk(KERN_DEBUG PFX
"%s: rx slot %td status 0x%x len %d\n",
2582 dev
->name
, e
- ring
->start
, rd
->status
, len
);
2585 skb
->protocol
= eth_type_trans(skb
, dev
);
2587 if (skge
->rx_csum
) {
2588 skb
->csum
= le16_to_cpu(rd
->csum2
);
2589 skb
->ip_summed
= CHECKSUM_HW
;
2592 dev
->last_rx
= jiffies
;
2593 netif_receive_skb(skb
);
2599 *budget
-= work_done
;
2600 dev
->quota
-= work_done
;
2601 done
= work_done
< to_do
;
2603 if (skge_rx_fill(skge
))
2606 /* restart receiver */
2608 skge_write8(hw
, Q_ADDR(rxqaddr
[skge
->port
], Q_CSR
),
2609 CSR_START
| CSR_IRQ_CL_F
);
2612 local_irq_disable();
2613 hw
->intr_mask
|= irqmask
[skge
->port
];
2614 /* Order is important since data can get interrupted */
2615 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2616 __netif_rx_complete(dev
);
2623 static inline void skge_tx_intr(struct net_device
*dev
)
2625 struct skge_port
*skge
= netdev_priv(dev
);
2626 struct skge_hw
*hw
= skge
->hw
;
2627 struct skge_ring
*ring
= &skge
->tx_ring
;
2628 struct skge_element
*e
;
2630 spin_lock(&skge
->tx_lock
);
2631 for (e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
2632 struct skge_tx_desc
*td
= e
->desc
;
2636 control
= td
->control
;
2637 if (control
& BMU_OWN
)
2640 if (unlikely(netif_msg_tx_done(skge
)))
2641 printk(KERN_DEBUG PFX
"%s: tx done slot %td status 0x%x\n",
2642 dev
->name
, e
- ring
->start
, td
->status
);
2644 skge_tx_free(hw
, e
);
2649 skge_write8(hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_IRQ_CL_F
);
2651 if (skge
->tx_avail
> MAX_SKB_FRAGS
+ 1)
2652 netif_wake_queue(dev
);
2654 spin_unlock(&skge
->tx_lock
);
2657 static void skge_mac_parity(struct skge_hw
*hw
, int port
)
2659 printk(KERN_ERR PFX
"%s: mac data parity error\n",
2660 hw
->dev
[port
] ? hw
->dev
[port
]->name
2661 : (port
== 0 ? "(port A)": "(port B"));
2663 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2664 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
),
2667 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2668 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
),
2669 (hw
->chip_id
== CHIP_ID_YUKON
&& hw
->chip_rev
== 0)
2670 ? GMF_CLI_TX_FC
: GMF_CLI_TX_PE
);
2673 static void skge_pci_clear(struct skge_hw
*hw
)
2677 pci_read_config_word(hw
->pdev
, PCI_STATUS
, &status
);
2678 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
2679 pci_write_config_word(hw
->pdev
, PCI_STATUS
,
2680 status
| PCI_STATUS_ERROR_BITS
);
2681 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
2684 static void skge_mac_intr(struct skge_hw
*hw
, int port
)
2686 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2687 genesis_mac_intr(hw
, port
);
2689 yukon_mac_intr(hw
, port
);
2692 /* Handle device specific framing and timeout interrupts */
2693 static void skge_error_irq(struct skge_hw
*hw
)
2695 u32 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
2697 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2698 /* clear xmac errors */
2699 if (hwstatus
& (IS_NO_STAT_M1
|IS_NO_TIST_M1
))
2700 skge_write16(hw
, SK_REG(0, RX_MFF_CTRL1
), MFF_CLR_INSTAT
);
2701 if (hwstatus
& (IS_NO_STAT_M2
|IS_NO_TIST_M2
))
2702 skge_write16(hw
, SK_REG(0, RX_MFF_CTRL2
), MFF_CLR_INSTAT
);
2704 /* Timestamp (unused) overflow */
2705 if (hwstatus
& IS_IRQ_TIST_OV
)
2706 skge_write8(hw
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
2708 if (hwstatus
& IS_IRQ_SENSOR
) {
2709 /* no sensors on 32-bit Yukon */
2710 if (!(skge_read16(hw
, B0_CTST
) & CS_BUS_SLOT_SZ
)) {
2711 printk(KERN_ERR PFX
"ignoring bogus sensor interrups\n");
2712 skge_write32(hw
, B0_HWE_IMSK
,
2713 IS_ERR_MSK
& ~IS_IRQ_SENSOR
);
2715 printk(KERN_WARNING PFX
"sensor interrupt\n");
2721 if (hwstatus
& IS_RAM_RD_PAR
) {
2722 printk(KERN_ERR PFX
"Ram read data parity error\n");
2723 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_RD_PERR
);
2726 if (hwstatus
& IS_RAM_WR_PAR
) {
2727 printk(KERN_ERR PFX
"Ram write data parity error\n");
2728 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_WR_PERR
);
2731 if (hwstatus
& IS_M1_PAR_ERR
)
2732 skge_mac_parity(hw
, 0);
2734 if (hwstatus
& IS_M2_PAR_ERR
)
2735 skge_mac_parity(hw
, 1);
2737 if (hwstatus
& IS_R1_PAR_ERR
)
2738 skge_write32(hw
, B0_R1_CSR
, CSR_IRQ_CL_P
);
2740 if (hwstatus
& IS_R2_PAR_ERR
)
2741 skge_write32(hw
, B0_R2_CSR
, CSR_IRQ_CL_P
);
2743 if (hwstatus
& (IS_IRQ_MST_ERR
|IS_IRQ_STAT
)) {
2744 printk(KERN_ERR PFX
"hardware error detected (status 0x%x)\n",
2749 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
2750 if (hwstatus
& IS_IRQ_STAT
) {
2751 printk(KERN_WARNING PFX
"IRQ status %x: still set ignoring hardware errors\n",
2753 hw
->intr_mask
&= ~IS_HW_ERR
;
2759 * Interrrupt from PHY are handled in tasklet (soft irq)
2760 * because accessing phy registers requires spin wait which might
2761 * cause excess interrupt latency.
2763 static void skge_extirq(unsigned long data
)
2765 struct skge_hw
*hw
= (struct skge_hw
*) data
;
2768 spin_lock(&hw
->phy_lock
);
2769 for (port
= 0; port
< 2; port
++) {
2770 struct net_device
*dev
= hw
->dev
[port
];
2772 if (dev
&& netif_running(dev
)) {
2773 struct skge_port
*skge
= netdev_priv(dev
);
2775 if (hw
->chip_id
!= CHIP_ID_GENESIS
)
2776 yukon_phy_intr(skge
);
2777 else if (hw
->phy_type
== SK_PHY_BCOM
)
2778 genesis_bcom_intr(skge
);
2781 spin_unlock(&hw
->phy_lock
);
2783 local_irq_disable();
2784 hw
->intr_mask
|= IS_EXT_REG
;
2785 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2789 static irqreturn_t
skge_intr(int irq
, void *dev_id
, struct pt_regs
*regs
)
2791 struct skge_hw
*hw
= dev_id
;
2792 u32 status
= skge_read32(hw
, B0_SP_ISRC
);
2794 if (status
== 0 || status
== ~0) /* hotplug or shared irq */
2797 status
&= hw
->intr_mask
;
2799 if ((status
& IS_R1_F
) && netif_rx_schedule_prep(hw
->dev
[0])) {
2801 hw
->intr_mask
&= ~IS_R1_F
;
2802 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2803 __netif_rx_schedule(hw
->dev
[0]);
2806 if ((status
& IS_R2_F
) && netif_rx_schedule_prep(hw
->dev
[1])) {
2808 hw
->intr_mask
&= ~IS_R2_F
;
2809 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2810 __netif_rx_schedule(hw
->dev
[1]);
2813 if (status
& IS_XA1_F
)
2814 skge_tx_intr(hw
->dev
[0]);
2816 if (status
& IS_XA2_F
)
2817 skge_tx_intr(hw
->dev
[1]);
2819 if (status
& IS_MAC1
)
2820 skge_mac_intr(hw
, 0);
2822 if (status
& IS_MAC2
)
2823 skge_mac_intr(hw
, 1);
2825 if (status
& IS_HW_ERR
)
2828 if (status
& IS_EXT_REG
) {
2829 hw
->intr_mask
&= ~IS_EXT_REG
;
2830 tasklet_schedule(&hw
->ext_tasklet
);
2834 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2839 #ifdef CONFIG_NET_POLL_CONTROLLER
2840 static void skge_netpoll(struct net_device
*dev
)
2842 struct skge_port
*skge
= netdev_priv(dev
);
2844 disable_irq(dev
->irq
);
2845 skge_intr(dev
->irq
, skge
->hw
, NULL
);
2846 enable_irq(dev
->irq
);
2850 static int skge_set_mac_address(struct net_device
*dev
, void *p
)
2852 struct skge_port
*skge
= netdev_priv(dev
);
2853 struct sockaddr
*addr
= p
;
2856 if (!is_valid_ether_addr(addr
->sa_data
))
2857 return -EADDRNOTAVAIL
;
2860 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
2861 memcpy_toio(skge
->hw
->regs
+ B2_MAC_1
+ skge
->port
*8,
2862 dev
->dev_addr
, ETH_ALEN
);
2863 memcpy_toio(skge
->hw
->regs
+ B2_MAC_2
+ skge
->port
*8,
2864 dev
->dev_addr
, ETH_ALEN
);
2865 if (dev
->flags
& IFF_UP
)
2870 static const struct {
2874 { CHIP_ID_GENESIS
, "Genesis" },
2875 { CHIP_ID_YUKON
, "Yukon" },
2876 { CHIP_ID_YUKON_LITE
, "Yukon-Lite"},
2877 { CHIP_ID_YUKON_LP
, "Yukon-LP"},
2878 { CHIP_ID_YUKON_XL
, "Yukon-2 XL"},
2879 { CHIP_ID_YUKON_EC
, "YUKON-2 EC"},
2880 { CHIP_ID_YUKON_FE
, "YUKON-2 FE"},
2883 static const char *skge_board_name(const struct skge_hw
*hw
)
2886 static char buf
[16];
2888 for (i
= 0; i
< ARRAY_SIZE(skge_chips
); i
++)
2889 if (skge_chips
[i
].id
== hw
->chip_id
)
2890 return skge_chips
[i
].name
;
2892 snprintf(buf
, sizeof buf
, "chipid 0x%x", hw
->chip_id
);
2898 * Setup the board data structure, but don't bring up
2901 static int skge_reset(struct skge_hw
*hw
)
2907 ctst
= skge_read16(hw
, B0_CTST
);
2910 skge_write8(hw
, B0_CTST
, CS_RST_SET
);
2911 skge_write8(hw
, B0_CTST
, CS_RST_CLR
);
2913 /* clear PCI errors, if any */
2916 skge_write8(hw
, B0_CTST
, CS_MRST_CLR
);
2918 /* restore CLK_RUN bits (for Yukon-Lite) */
2919 skge_write16(hw
, B0_CTST
,
2920 ctst
& (CS_CLK_RUN_HOT
|CS_CLK_RUN_RST
|CS_CLK_RUN_ENA
));
2922 hw
->chip_id
= skge_read8(hw
, B2_CHIP_ID
);
2923 hw
->phy_type
= skge_read8(hw
, B2_E_1
) & 0xf;
2924 hw
->pmd_type
= skge_read8(hw
, B2_PMD_TYP
);
2926 switch (hw
->chip_id
) {
2927 case CHIP_ID_GENESIS
:
2928 switch (hw
->phy_type
) {
2930 hw
->phy_addr
= PHY_ADDR_XMAC
;
2933 hw
->phy_addr
= PHY_ADDR_BCOM
;
2936 printk(KERN_ERR PFX
"%s: unsupported phy type 0x%x\n",
2937 pci_name(hw
->pdev
), hw
->phy_type
);
2943 case CHIP_ID_YUKON_LITE
:
2944 case CHIP_ID_YUKON_LP
:
2945 if (hw
->phy_type
< SK_PHY_MARV_COPPER
&& hw
->pmd_type
!= 'S')
2946 hw
->phy_type
= SK_PHY_MARV_COPPER
;
2948 hw
->phy_addr
= PHY_ADDR_MARV
;
2950 hw
->phy_type
= SK_PHY_MARV_FIBER
;
2955 printk(KERN_ERR PFX
"%s: unsupported chip type 0x%x\n",
2956 pci_name(hw
->pdev
), hw
->chip_id
);
2960 mac_cfg
= skge_read8(hw
, B2_MAC_CFG
);
2961 hw
->ports
= (mac_cfg
& CFG_SNG_MAC
) ? 1 : 2;
2962 hw
->chip_rev
= (mac_cfg
& CFG_CHIP_R_MSK
) >> 4;
2964 /* read the adapters RAM size */
2965 t8
= skge_read8(hw
, B2_E_0
);
2966 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2968 /* special case: 4 x 64k x 36, offset = 0x80000 */
2969 hw
->ram_size
= 0x100000;
2970 hw
->ram_offset
= 0x80000;
2972 hw
->ram_size
= t8
* 512;
2975 hw
->ram_size
= 0x20000;
2977 hw
->ram_size
= t8
* 4096;
2979 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2982 /* switch power to VCC (WA for VAUX problem) */
2983 skge_write8(hw
, B0_POWER_CTRL
,
2984 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_OFF
| PC_VCC_ON
);
2985 for (i
= 0; i
< hw
->ports
; i
++) {
2986 skge_write16(hw
, SK_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_SET
);
2987 skge_write16(hw
, SK_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_CLR
);
2991 /* turn off hardware timer (unused) */
2992 skge_write8(hw
, B2_TI_CTRL
, TIM_STOP
);
2993 skge_write8(hw
, B2_TI_CTRL
, TIM_CLR_IRQ
);
2994 skge_write8(hw
, B0_LED
, LED_STAT_ON
);
2996 /* enable the Tx Arbiters */
2997 for (i
= 0; i
< hw
->ports
; i
++)
2998 skge_write8(hw
, SK_REG(i
, TXA_CTRL
), TXA_ENA_ARB
);
3000 /* Initialize ram interface */
3001 skge_write16(hw
, B3_RI_CTRL
, RI_RST_CLR
);
3003 skge_write8(hw
, B3_RI_WTO_R1
, SK_RI_TO_53
);
3004 skge_write8(hw
, B3_RI_WTO_XA1
, SK_RI_TO_53
);
3005 skge_write8(hw
, B3_RI_WTO_XS1
, SK_RI_TO_53
);
3006 skge_write8(hw
, B3_RI_RTO_R1
, SK_RI_TO_53
);
3007 skge_write8(hw
, B3_RI_RTO_XA1
, SK_RI_TO_53
);
3008 skge_write8(hw
, B3_RI_RTO_XS1
, SK_RI_TO_53
);
3009 skge_write8(hw
, B3_RI_WTO_R2
, SK_RI_TO_53
);
3010 skge_write8(hw
, B3_RI_WTO_XA2
, SK_RI_TO_53
);
3011 skge_write8(hw
, B3_RI_WTO_XS2
, SK_RI_TO_53
);
3012 skge_write8(hw
, B3_RI_RTO_R2
, SK_RI_TO_53
);
3013 skge_write8(hw
, B3_RI_RTO_XA2
, SK_RI_TO_53
);
3014 skge_write8(hw
, B3_RI_RTO_XS2
, SK_RI_TO_53
);
3016 skge_write32(hw
, B0_HWE_IMSK
, IS_ERR_MSK
);
3018 /* Set interrupt moderation for Transmit only
3019 * Receive interrupts avoided by NAPI
3021 skge_write32(hw
, B2_IRQM_MSK
, IS_XA1_F
|IS_XA2_F
);
3022 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, 100));
3023 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
3025 hw
->intr_mask
= IS_HW_ERR
| IS_EXT_REG
| IS_PORT_1
;
3027 hw
->intr_mask
|= IS_PORT_2
;
3028 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
3030 if (hw
->chip_id
!= CHIP_ID_GENESIS
)
3031 skge_write8(hw
, GMAC_IRQ_MSK
, 0);
3033 spin_lock_bh(&hw
->phy_lock
);
3034 for (i
= 0; i
< hw
->ports
; i
++) {
3035 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3036 genesis_reset(hw
, i
);
3040 spin_unlock_bh(&hw
->phy_lock
);
3045 /* Initialize network device */
3046 static struct net_device
*skge_devinit(struct skge_hw
*hw
, int port
,
3049 struct skge_port
*skge
;
3050 struct net_device
*dev
= alloc_etherdev(sizeof(*skge
));
3053 printk(KERN_ERR
"skge etherdev alloc failed");
3057 SET_MODULE_OWNER(dev
);
3058 SET_NETDEV_DEV(dev
, &hw
->pdev
->dev
);
3059 dev
->open
= skge_up
;
3060 dev
->stop
= skge_down
;
3061 dev
->hard_start_xmit
= skge_xmit_frame
;
3062 dev
->get_stats
= skge_get_stats
;
3063 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3064 dev
->set_multicast_list
= genesis_set_multicast
;
3066 dev
->set_multicast_list
= yukon_set_multicast
;
3068 dev
->set_mac_address
= skge_set_mac_address
;
3069 dev
->change_mtu
= skge_change_mtu
;
3070 SET_ETHTOOL_OPS(dev
, &skge_ethtool_ops
);
3071 dev
->tx_timeout
= skge_tx_timeout
;
3072 dev
->watchdog_timeo
= TX_WATCHDOG
;
3073 dev
->poll
= skge_poll
;
3074 dev
->weight
= NAPI_WEIGHT
;
3075 #ifdef CONFIG_NET_POLL_CONTROLLER
3076 dev
->poll_controller
= skge_netpoll
;
3078 dev
->irq
= hw
->pdev
->irq
;
3079 dev
->features
= NETIF_F_LLTX
;
3081 dev
->features
|= NETIF_F_HIGHDMA
;
3083 skge
= netdev_priv(dev
);
3086 skge
->msg_enable
= netif_msg_init(debug
, default_msg
);
3087 skge
->tx_ring
.count
= DEFAULT_TX_RING_SIZE
;
3088 skge
->rx_ring
.count
= DEFAULT_RX_RING_SIZE
;
3090 /* Auto speed and flow control */
3091 skge
->autoneg
= AUTONEG_ENABLE
;
3092 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
3095 skge
->advertising
= skge_modes(hw
);
3097 hw
->dev
[port
] = dev
;
3101 spin_lock_init(&skge
->tx_lock
);
3103 init_timer(&skge
->link_check
);
3104 skge
->link_check
.function
= skge_link_timer
;
3105 skge
->link_check
.data
= (unsigned long) skge
;
3107 init_timer(&skge
->led_blink
);
3108 skge
->led_blink
.function
= skge_blink_timer
;
3109 skge
->led_blink
.data
= (unsigned long) skge
;
3111 if (hw
->chip_id
!= CHIP_ID_GENESIS
) {
3112 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
3116 /* read the mac address */
3117 memcpy_fromio(dev
->dev_addr
, hw
->regs
+ B2_MAC_1
+ port
*8, ETH_ALEN
);
3119 /* device is off until link detection */
3120 netif_carrier_off(dev
);
3121 netif_stop_queue(dev
);
3126 static void __devinit
skge_show_addr(struct net_device
*dev
)
3128 const struct skge_port
*skge
= netdev_priv(dev
);
3130 if (netif_msg_probe(skge
))
3131 printk(KERN_INFO PFX
"%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3133 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
3134 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
3137 static int __devinit
skge_probe(struct pci_dev
*pdev
,
3138 const struct pci_device_id
*ent
)
3140 struct net_device
*dev
, *dev1
;
3142 int err
, using_dac
= 0;
3144 if ((err
= pci_enable_device(pdev
))) {
3145 printk(KERN_ERR PFX
"%s cannot enable PCI device\n",
3150 if ((err
= pci_request_regions(pdev
, DRV_NAME
))) {
3151 printk(KERN_ERR PFX
"%s cannot obtain PCI resources\n",
3153 goto err_out_disable_pdev
;
3156 pci_set_master(pdev
);
3158 if (!(err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)))
3160 else if (!(err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
))) {
3161 printk(KERN_ERR PFX
"%s no usable DMA configuration\n",
3163 goto err_out_free_regions
;
3167 /* byte swap decriptors in hardware */
3171 pci_read_config_dword(pdev
, PCI_DEV_REG2
, ®
);
3172 reg
|= PCI_REV_DESC
;
3173 pci_write_config_dword(pdev
, PCI_DEV_REG2
, reg
);
3178 hw
= kmalloc(sizeof(*hw
), GFP_KERNEL
);
3180 printk(KERN_ERR PFX
"%s: cannot allocate hardware struct\n",
3182 goto err_out_free_regions
;
3185 memset(hw
, 0, sizeof(*hw
));
3187 spin_lock_init(&hw
->phy_lock
);
3188 tasklet_init(&hw
->ext_tasklet
, skge_extirq
, (unsigned long) hw
);
3190 hw
->regs
= ioremap_nocache(pci_resource_start(pdev
, 0), 0x4000);
3192 printk(KERN_ERR PFX
"%s: cannot map device registers\n",
3194 goto err_out_free_hw
;
3197 if ((err
= request_irq(pdev
->irq
, skge_intr
, SA_SHIRQ
, DRV_NAME
, hw
))) {
3198 printk(KERN_ERR PFX
"%s: cannot assign irq %d\n",
3199 pci_name(pdev
), pdev
->irq
);
3200 goto err_out_iounmap
;
3202 pci_set_drvdata(pdev
, hw
);
3204 err
= skge_reset(hw
);
3206 goto err_out_free_irq
;
3208 printk(KERN_INFO PFX
"addr 0x%lx irq %d chip %s rev %d\n",
3209 pci_resource_start(pdev
, 0), pdev
->irq
,
3210 skge_board_name(hw
), hw
->chip_rev
);
3212 if ((dev
= skge_devinit(hw
, 0, using_dac
)) == NULL
)
3213 goto err_out_led_off
;
3215 if ((err
= register_netdev(dev
))) {
3216 printk(KERN_ERR PFX
"%s: cannot register net device\n",
3218 goto err_out_free_netdev
;
3221 skge_show_addr(dev
);
3223 if (hw
->ports
> 1 && (dev1
= skge_devinit(hw
, 1, using_dac
))) {
3224 if (register_netdev(dev1
) == 0)
3225 skge_show_addr(dev1
);
3227 /* Failure to register second port need not be fatal */
3228 printk(KERN_WARNING PFX
"register of second port failed\n");
3236 err_out_free_netdev
:
3239 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
3241 free_irq(pdev
->irq
, hw
);
3246 err_out_free_regions
:
3247 pci_release_regions(pdev
);
3248 err_out_disable_pdev
:
3249 pci_disable_device(pdev
);
3250 pci_set_drvdata(pdev
, NULL
);
3255 static void __devexit
skge_remove(struct pci_dev
*pdev
)
3257 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3258 struct net_device
*dev0
, *dev1
;
3263 if ((dev1
= hw
->dev
[1]))
3264 unregister_netdev(dev1
);
3266 unregister_netdev(dev0
);
3268 tasklet_kill(&hw
->ext_tasklet
);
3270 free_irq(pdev
->irq
, hw
);
3271 pci_release_regions(pdev
);
3272 pci_disable_device(pdev
);
3276 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
3279 pci_set_drvdata(pdev
, NULL
);
3283 static int skge_suspend(struct pci_dev
*pdev
, u32 state
)
3285 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3288 for (i
= 0; i
< 2; i
++) {
3289 struct net_device
*dev
= hw
->dev
[i
];
3292 struct skge_port
*skge
= netdev_priv(dev
);
3293 if (netif_running(dev
)) {
3294 netif_carrier_off(dev
);
3297 netif_device_detach(dev
);
3302 pci_save_state(pdev
);
3303 pci_enable_wake(pdev
, state
, wol
);
3304 pci_disable_device(pdev
);
3305 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3310 static int skge_resume(struct pci_dev
*pdev
)
3312 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3315 pci_set_power_state(pdev
, PCI_D0
);
3316 pci_restore_state(pdev
);
3317 pci_enable_wake(pdev
, PCI_D0
, 0);
3321 for (i
= 0; i
< 2; i
++) {
3322 struct net_device
*dev
= hw
->dev
[i
];
3324 netif_device_attach(dev
);
3325 if (netif_running(dev
))
3333 static struct pci_driver skge_driver
= {
3335 .id_table
= skge_id_table
,
3336 .probe
= skge_probe
,
3337 .remove
= __devexit_p(skge_remove
),
3339 .suspend
= skge_suspend
,
3340 .resume
= skge_resume
,
3344 static int __init
skge_init_module(void)
3346 return pci_module_init(&skge_driver
);
3349 static void __exit
skge_cleanup_module(void)
3351 pci_unregister_driver(&skge_driver
);
3354 module_init(skge_init_module
);
3355 module_exit(skge_cleanup_module
);