r8169: missing barriers.
[deliverable/linux.git] / drivers / net / ethernet / realtek / r8169.c
CommitLineData
1da177e4 1/*
07d3f51f
FR
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
1da177e4
LT
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/delay.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/if_vlan.h>
20#include <linux/crc32.h>
21#include <linux/in.h>
22#include <linux/ip.h>
23#include <linux/tcp.h>
24#include <linux/init.h>
a6b7a407 25#include <linux/interrupt.h>
1da177e4 26#include <linux/dma-mapping.h>
e1759441 27#include <linux/pm_runtime.h>
bca03d5f 28#include <linux/firmware.h>
ba04c7c9 29#include <linux/pci-aspm.h>
70c71606 30#include <linux/prefetch.h>
1da177e4 31
99f252b0 32#include <asm/system.h>
1da177e4
LT
33#include <asm/io.h>
34#include <asm/irq.h>
35
865c652d 36#define RTL8169_VERSION "2.3LK-NAPI"
1da177e4
LT
37#define MODULENAME "r8169"
38#define PFX MODULENAME ": "
39
bca03d5f 40#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
41#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
01dc7fec 42#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
43#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
70090424 44#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
c2218925
HW
45#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
46#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
5a5e4443 47#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
bca03d5f 48
1da177e4
LT
49#ifdef RTL8169_DEBUG
50#define assert(expr) \
5b0384f4
FR
51 if (!(expr)) { \
52 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
b39d66a8 53 #expr,__FILE__,__func__,__LINE__); \
5b0384f4 54 }
06fa7358
JP
55#define dprintk(fmt, args...) \
56 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
1da177e4
LT
57#else
58#define assert(expr) do {} while (0)
59#define dprintk(fmt, args...) do {} while (0)
60#endif /* RTL8169_DEBUG */
61
b57b7e5a 62#define R8169_MSG_DEFAULT \
f0e837d9 63 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 64
1da177e4
LT
65#define TX_BUFFS_AVAIL(tp) \
66 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
67
1da177e4
LT
68/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
69 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 70static const int multicast_filter_limit = 32;
1da177e4 71
9c14ceaf 72#define MAX_READ_REQUEST_SHIFT 12
1da177e4 73#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
1da177e4
LT
74#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
75#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
76
77#define R8169_REGS_SIZE 256
78#define R8169_NAPI_WEIGHT 64
79#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
80#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
81#define RX_BUF_SIZE 1536 /* Rx Buffer size */
82#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
83#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
84
85#define RTL8169_TX_TIMEOUT (6*HZ)
86#define RTL8169_PHY_TIMEOUT (10*HZ)
87
ea8dbdd1 88#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
89#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
e1564ec9
FR
90#define RTL_EEPROM_SIG_ADDR 0x0000
91
1da177e4
LT
92/* write/read MMIO register */
93#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
94#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
95#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
96#define RTL_R8(reg) readb (ioaddr + (reg))
97#define RTL_R16(reg) readw (ioaddr + (reg))
06f555f3 98#define RTL_R32(reg) readl (ioaddr + (reg))
1da177e4
LT
99
100enum mac_version {
85bffe6c
FR
101 RTL_GIGA_MAC_VER_01 = 0,
102 RTL_GIGA_MAC_VER_02,
103 RTL_GIGA_MAC_VER_03,
104 RTL_GIGA_MAC_VER_04,
105 RTL_GIGA_MAC_VER_05,
106 RTL_GIGA_MAC_VER_06,
107 RTL_GIGA_MAC_VER_07,
108 RTL_GIGA_MAC_VER_08,
109 RTL_GIGA_MAC_VER_09,
110 RTL_GIGA_MAC_VER_10,
111 RTL_GIGA_MAC_VER_11,
112 RTL_GIGA_MAC_VER_12,
113 RTL_GIGA_MAC_VER_13,
114 RTL_GIGA_MAC_VER_14,
115 RTL_GIGA_MAC_VER_15,
116 RTL_GIGA_MAC_VER_16,
117 RTL_GIGA_MAC_VER_17,
118 RTL_GIGA_MAC_VER_18,
119 RTL_GIGA_MAC_VER_19,
120 RTL_GIGA_MAC_VER_20,
121 RTL_GIGA_MAC_VER_21,
122 RTL_GIGA_MAC_VER_22,
123 RTL_GIGA_MAC_VER_23,
124 RTL_GIGA_MAC_VER_24,
125 RTL_GIGA_MAC_VER_25,
126 RTL_GIGA_MAC_VER_26,
127 RTL_GIGA_MAC_VER_27,
128 RTL_GIGA_MAC_VER_28,
129 RTL_GIGA_MAC_VER_29,
130 RTL_GIGA_MAC_VER_30,
131 RTL_GIGA_MAC_VER_31,
132 RTL_GIGA_MAC_VER_32,
133 RTL_GIGA_MAC_VER_33,
70090424 134 RTL_GIGA_MAC_VER_34,
c2218925
HW
135 RTL_GIGA_MAC_VER_35,
136 RTL_GIGA_MAC_VER_36,
85bffe6c 137 RTL_GIGA_MAC_NONE = 0xff,
1da177e4
LT
138};
139
2b7b4318
FR
140enum rtl_tx_desc_version {
141 RTL_TD_0 = 0,
142 RTL_TD_1 = 1,
143};
144
d58d46b5
FR
145#define JUMBO_1K ETH_DATA_LEN
146#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
147#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
148#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
149#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
150
151#define _R(NAME,TD,FW,SZ,B) { \
152 .name = NAME, \
153 .txd_version = TD, \
154 .fw_name = FW, \
155 .jumbo_max = SZ, \
156 .jumbo_tx_csum = B \
157}
1da177e4 158
3c6bee1d 159static const struct {
1da177e4 160 const char *name;
2b7b4318 161 enum rtl_tx_desc_version txd_version;
953a12cc 162 const char *fw_name;
d58d46b5
FR
163 u16 jumbo_max;
164 bool jumbo_tx_csum;
85bffe6c
FR
165} rtl_chip_infos[] = {
166 /* PCI devices. */
167 [RTL_GIGA_MAC_VER_01] =
d58d46b5 168 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 169 [RTL_GIGA_MAC_VER_02] =
d58d46b5 170 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 171 [RTL_GIGA_MAC_VER_03] =
d58d46b5 172 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 173 [RTL_GIGA_MAC_VER_04] =
d58d46b5 174 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 175 [RTL_GIGA_MAC_VER_05] =
d58d46b5 176 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 177 [RTL_GIGA_MAC_VER_06] =
d58d46b5 178 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c
FR
179 /* PCI-E devices. */
180 [RTL_GIGA_MAC_VER_07] =
d58d46b5 181 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
85bffe6c 182 [RTL_GIGA_MAC_VER_08] =
d58d46b5 183 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
85bffe6c 184 [RTL_GIGA_MAC_VER_09] =
d58d46b5 185 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
85bffe6c 186 [RTL_GIGA_MAC_VER_10] =
d58d46b5 187 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 188 [RTL_GIGA_MAC_VER_11] =
d58d46b5 189 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
85bffe6c 190 [RTL_GIGA_MAC_VER_12] =
d58d46b5 191 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
85bffe6c 192 [RTL_GIGA_MAC_VER_13] =
d58d46b5 193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 194 [RTL_GIGA_MAC_VER_14] =
d58d46b5 195 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 196 [RTL_GIGA_MAC_VER_15] =
d58d46b5 197 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 198 [RTL_GIGA_MAC_VER_16] =
d58d46b5 199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 200 [RTL_GIGA_MAC_VER_17] =
d58d46b5 201 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
85bffe6c 202 [RTL_GIGA_MAC_VER_18] =
d58d46b5 203 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 204 [RTL_GIGA_MAC_VER_19] =
d58d46b5 205 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 206 [RTL_GIGA_MAC_VER_20] =
d58d46b5 207 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 208 [RTL_GIGA_MAC_VER_21] =
d58d46b5 209 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 210 [RTL_GIGA_MAC_VER_22] =
d58d46b5 211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 212 [RTL_GIGA_MAC_VER_23] =
d58d46b5 213 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 214 [RTL_GIGA_MAC_VER_24] =
d58d46b5 215 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 216 [RTL_GIGA_MAC_VER_25] =
d58d46b5
FR
217 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
218 JUMBO_9K, false),
85bffe6c 219 [RTL_GIGA_MAC_VER_26] =
d58d46b5
FR
220 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
221 JUMBO_9K, false),
85bffe6c 222 [RTL_GIGA_MAC_VER_27] =
d58d46b5 223 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
85bffe6c 224 [RTL_GIGA_MAC_VER_28] =
d58d46b5 225 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
85bffe6c 226 [RTL_GIGA_MAC_VER_29] =
d58d46b5
FR
227 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
228 JUMBO_1K, true),
85bffe6c 229 [RTL_GIGA_MAC_VER_30] =
d58d46b5
FR
230 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
231 JUMBO_1K, true),
85bffe6c 232 [RTL_GIGA_MAC_VER_31] =
d58d46b5 233 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
85bffe6c 234 [RTL_GIGA_MAC_VER_32] =
d58d46b5
FR
235 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
236 JUMBO_9K, false),
85bffe6c 237 [RTL_GIGA_MAC_VER_33] =
d58d46b5
FR
238 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
239 JUMBO_9K, false),
70090424 240 [RTL_GIGA_MAC_VER_34] =
d58d46b5
FR
241 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
242 JUMBO_9K, false),
c2218925 243 [RTL_GIGA_MAC_VER_35] =
d58d46b5
FR
244 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
245 JUMBO_9K, false),
c2218925 246 [RTL_GIGA_MAC_VER_36] =
d58d46b5
FR
247 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
248 JUMBO_9K, false),
953a12cc 249};
85bffe6c 250#undef _R
953a12cc 251
bcf0bf90
FR
252enum cfg_version {
253 RTL_CFG_0 = 0x00,
254 RTL_CFG_1,
255 RTL_CFG_2
256};
257
07ce4064
FR
258static void rtl_hw_start_8169(struct net_device *);
259static void rtl_hw_start_8168(struct net_device *);
260static void rtl_hw_start_8101(struct net_device *);
261
a3aa1884 262static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
bcf0bf90 263 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 264 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 265 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
07ce4064 266 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
bcf0bf90
FR
267 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
268 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
93a3aa25 269 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
bc1660b5 270 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
271 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
272 { PCI_VENDOR_ID_LINKSYS, 0x1032,
273 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
11d2e282
CM
274 { 0x0001, 0x8168,
275 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
1da177e4
LT
276 {0,},
277};
278
279MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
280
6f0333b8 281static int rx_buf_sz = 16383;
4300e8c7 282static int use_dac;
b57b7e5a
SH
283static struct {
284 u32 msg_enable;
285} debug = { -1 };
1da177e4 286
07d3f51f
FR
287enum rtl_registers {
288 MAC0 = 0, /* Ethernet hardware address. */
773d2021 289 MAC4 = 4,
07d3f51f
FR
290 MAR0 = 8, /* Multicast filter. */
291 CounterAddrLow = 0x10,
292 CounterAddrHigh = 0x14,
293 TxDescStartAddrLow = 0x20,
294 TxDescStartAddrHigh = 0x24,
295 TxHDescStartAddrLow = 0x28,
296 TxHDescStartAddrHigh = 0x2c,
297 FLASH = 0x30,
298 ERSR = 0x36,
299 ChipCmd = 0x37,
300 TxPoll = 0x38,
301 IntrMask = 0x3c,
302 IntrStatus = 0x3e,
4f6b00e5 303
07d3f51f 304 TxConfig = 0x40,
4f6b00e5
HW
305#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
306#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
2b7b4318 307
4f6b00e5
HW
308 RxConfig = 0x44,
309#define RX128_INT_EN (1 << 15) /* 8111c and later */
310#define RX_MULTI_EN (1 << 14) /* 8111c only */
311#define RXCFG_FIFO_SHIFT 13
312 /* No threshold before first PCI xfer */
313#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
314#define RXCFG_DMA_SHIFT 8
315 /* Unlimited maximum PCI burst. */
316#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
2b7b4318 317
07d3f51f
FR
318 RxMissed = 0x4c,
319 Cfg9346 = 0x50,
320 Config0 = 0x51,
321 Config1 = 0x52,
322 Config2 = 0x53,
323 Config3 = 0x54,
324 Config4 = 0x55,
325 Config5 = 0x56,
326 MultiIntr = 0x5c,
327 PHYAR = 0x60,
07d3f51f
FR
328 PHYstatus = 0x6c,
329 RxMaxSize = 0xda,
330 CPlusCmd = 0xe0,
331 IntrMitigate = 0xe2,
332 RxDescAddrLow = 0xe4,
333 RxDescAddrHigh = 0xe8,
f0298f81 334 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
335
336#define NoEarlyTx 0x3f /* Max value : no early transmit. */
337
338 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
339
340#define TxPacketMax (8064 >> 7)
3090bd9a 341#define EarlySize 0x27
f0298f81 342
07d3f51f
FR
343 FuncEvent = 0xf0,
344 FuncEventMask = 0xf4,
345 FuncPresetState = 0xf8,
346 FuncForceEvent = 0xfc,
1da177e4
LT
347};
348
f162a5d1
FR
349enum rtl8110_registers {
350 TBICSR = 0x64,
351 TBI_ANAR = 0x68,
352 TBI_LPAR = 0x6a,
353};
354
355enum rtl8168_8101_registers {
356 CSIDR = 0x64,
357 CSIAR = 0x68,
358#define CSIAR_FLAG 0x80000000
359#define CSIAR_WRITE_CMD 0x80000000
360#define CSIAR_BYTE_ENABLE 0x0f
361#define CSIAR_BYTE_ENABLE_SHIFT 12
362#define CSIAR_ADDR_MASK 0x0fff
065c27c1 363 PMCH = 0x6f,
f162a5d1
FR
364 EPHYAR = 0x80,
365#define EPHYAR_FLAG 0x80000000
366#define EPHYAR_WRITE_CMD 0x80000000
367#define EPHYAR_REG_MASK 0x1f
368#define EPHYAR_REG_SHIFT 16
369#define EPHYAR_DATA_MASK 0xffff
5a5e4443 370 DLLPR = 0xd0,
4f6b00e5 371#define PFM_EN (1 << 6)
f162a5d1
FR
372 DBG_REG = 0xd1,
373#define FIX_NAK_1 (1 << 4)
374#define FIX_NAK_2 (1 << 3)
5a5e4443
HW
375 TWSI = 0xd2,
376 MCU = 0xd3,
4f6b00e5 377#define NOW_IS_OOB (1 << 7)
5a5e4443
HW
378#define EN_NDP (1 << 3)
379#define EN_OOB_RESET (1 << 2)
daf9df6d 380 EFUSEAR = 0xdc,
381#define EFUSEAR_FLAG 0x80000000
382#define EFUSEAR_WRITE_CMD 0x80000000
383#define EFUSEAR_READ_CMD 0x00000000
384#define EFUSEAR_REG_MASK 0x03ff
385#define EFUSEAR_REG_SHIFT 8
386#define EFUSEAR_DATA_MASK 0xff
f162a5d1
FR
387};
388
c0e45c1c 389enum rtl8168_registers {
4f6b00e5
HW
390 LED_FREQ = 0x1a,
391 EEE_LED = 0x1b,
b646d900 392 ERIDR = 0x70,
393 ERIAR = 0x74,
394#define ERIAR_FLAG 0x80000000
395#define ERIAR_WRITE_CMD 0x80000000
396#define ERIAR_READ_CMD 0x00000000
397#define ERIAR_ADDR_BYTE_ALIGN 4
b646d900 398#define ERIAR_TYPE_SHIFT 16
4f6b00e5
HW
399#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
400#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
401#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
402#define ERIAR_MASK_SHIFT 12
403#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
404#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
405#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
c0e45c1c 406 EPHY_RXER_NUM = 0x7c,
407 OCPDR = 0xb0, /* OCP GPHY access */
408#define OCPDR_WRITE_CMD 0x80000000
409#define OCPDR_READ_CMD 0x00000000
410#define OCPDR_REG_MASK 0x7f
411#define OCPDR_GPHY_REG_SHIFT 16
412#define OCPDR_DATA_MASK 0xffff
413 OCPAR = 0xb4,
414#define OCPAR_FLAG 0x80000000
415#define OCPAR_GPHY_WRITE_CMD 0x8000f060
416#define OCPAR_GPHY_READ_CMD 0x0000f060
01dc7fec 417 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
418 MISC = 0xf0, /* 8168e only. */
cecb5fd7 419#define TXPLA_RST (1 << 29)
4f6b00e5 420#define PWM_EN (1 << 22)
c0e45c1c 421};
422
07d3f51f 423enum rtl_register_content {
1da177e4 424 /* InterruptStatusBits */
07d3f51f
FR
425 SYSErr = 0x8000,
426 PCSTimeout = 0x4000,
427 SWInt = 0x0100,
428 TxDescUnavail = 0x0080,
429 RxFIFOOver = 0x0040,
430 LinkChg = 0x0020,
431 RxOverflow = 0x0010,
432 TxErr = 0x0008,
433 TxOK = 0x0004,
434 RxErr = 0x0002,
435 RxOK = 0x0001,
1da177e4
LT
436
437 /* RxStatusDesc */
e03f33af 438 RxBOVF = (1 << 24),
9dccf611
FR
439 RxFOVF = (1 << 23),
440 RxRWT = (1 << 22),
441 RxRES = (1 << 21),
442 RxRUNT = (1 << 20),
443 RxCRC = (1 << 19),
1da177e4
LT
444
445 /* ChipCmdBits */
4f6b00e5 446 StopReq = 0x80,
07d3f51f
FR
447 CmdReset = 0x10,
448 CmdRxEnb = 0x08,
449 CmdTxEnb = 0x04,
450 RxBufEmpty = 0x01,
1da177e4 451
275391a4
FR
452 /* TXPoll register p.5 */
453 HPQ = 0x80, /* Poll cmd on the high prio queue */
454 NPQ = 0x40, /* Poll cmd on the low prio queue */
455 FSWInt = 0x01, /* Forced software interrupt */
456
1da177e4 457 /* Cfg9346Bits */
07d3f51f
FR
458 Cfg9346_Lock = 0x00,
459 Cfg9346_Unlock = 0xc0,
1da177e4
LT
460
461 /* rx_mode_bits */
07d3f51f
FR
462 AcceptErr = 0x20,
463 AcceptRunt = 0x10,
464 AcceptBroadcast = 0x08,
465 AcceptMulticast = 0x04,
466 AcceptMyPhys = 0x02,
467 AcceptAllPhys = 0x01,
1687b566 468#define RX_CONFIG_ACCEPT_MASK 0x3f
1da177e4 469
1da177e4
LT
470 /* TxConfigBits */
471 TxInterFrameGapShift = 24,
472 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
473
5d06a99f 474 /* Config1 register p.24 */
f162a5d1
FR
475 LEDS1 = (1 << 7),
476 LEDS0 = (1 << 6),
f162a5d1
FR
477 Speed_down = (1 << 4),
478 MEMMAP = (1 << 3),
479 IOMAP = (1 << 2),
480 VPD = (1 << 1),
5d06a99f
FR
481 PMEnable = (1 << 0), /* Power Management Enable */
482
6dccd16b 483 /* Config2 register p. 25 */
2ca6cf06 484 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
6dccd16b
FR
485 PCI_Clock_66MHz = 0x01,
486 PCI_Clock_33MHz = 0x00,
487
61a4dcc2
FR
488 /* Config3 register p.25 */
489 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
490 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
d58d46b5 491 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
f162a5d1 492 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
61a4dcc2 493
d58d46b5
FR
494 /* Config4 register */
495 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
496
5d06a99f 497 /* Config5 register p.27 */
61a4dcc2
FR
498 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
499 MWF = (1 << 5), /* Accept Multicast wakeup frame */
500 UWF = (1 << 4), /* Accept Unicast wakeup frame */
cecb5fd7 501 Spi_en = (1 << 3),
61a4dcc2 502 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
503 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
504
1da177e4
LT
505 /* TBICSR p.28 */
506 TBIReset = 0x80000000,
507 TBILoopback = 0x40000000,
508 TBINwEnable = 0x20000000,
509 TBINwRestart = 0x10000000,
510 TBILinkOk = 0x02000000,
511 TBINwComplete = 0x01000000,
512
513 /* CPlusCmd p.31 */
f162a5d1
FR
514 EnableBist = (1 << 15), // 8168 8101
515 Mac_dbgo_oe = (1 << 14), // 8168 8101
516 Normal_mode = (1 << 13), // unused
517 Force_half_dup = (1 << 12), // 8168 8101
518 Force_rxflow_en = (1 << 11), // 8168 8101
519 Force_txflow_en = (1 << 10), // 8168 8101
520 Cxpl_dbg_sel = (1 << 9), // 8168 8101
521 ASF = (1 << 8), // 8168 8101
522 PktCntrDisable = (1 << 7), // 8168 8101
523 Mac_dbgo_sel = 0x001c, // 8168
1da177e4
LT
524 RxVlan = (1 << 6),
525 RxChkSum = (1 << 5),
526 PCIDAC = (1 << 4),
527 PCIMulRW = (1 << 3),
0e485150
FR
528 INTT_0 = 0x0000, // 8168
529 INTT_1 = 0x0001, // 8168
530 INTT_2 = 0x0002, // 8168
531 INTT_3 = 0x0003, // 8168
1da177e4
LT
532
533 /* rtl8169_PHYstatus */
07d3f51f
FR
534 TBI_Enable = 0x80,
535 TxFlowCtrl = 0x40,
536 RxFlowCtrl = 0x20,
537 _1000bpsF = 0x10,
538 _100bps = 0x08,
539 _10bps = 0x04,
540 LinkStatus = 0x02,
541 FullDup = 0x01,
1da177e4 542
1da177e4 543 /* _TBICSRBit */
07d3f51f 544 TBILinkOK = 0x02000000,
d4a3a0fc
SH
545
546 /* DumpCounterCommand */
07d3f51f 547 CounterDump = 0x8,
1da177e4
LT
548};
549
2b7b4318
FR
550enum rtl_desc_bit {
551 /* First doubleword. */
1da177e4
LT
552 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
553 RingEnd = (1 << 30), /* End of descriptor ring */
554 FirstFrag = (1 << 29), /* First segment of a packet */
555 LastFrag = (1 << 28), /* Final segment of a packet */
2b7b4318
FR
556};
557
558/* Generic case. */
559enum rtl_tx_desc_bit {
560 /* First doubleword. */
561 TD_LSO = (1 << 27), /* Large Send Offload */
562#define TD_MSS_MAX 0x07ffu /* MSS value */
1da177e4 563
2b7b4318
FR
564 /* Second doubleword. */
565 TxVlanTag = (1 << 17), /* Add VLAN tag */
566};
567
568/* 8169, 8168b and 810x except 8102e. */
569enum rtl_tx_desc_bit_0 {
570 /* First doubleword. */
571#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
572 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
573 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
574 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
575};
576
577/* 8102e, 8168c and beyond. */
578enum rtl_tx_desc_bit_1 {
579 /* Second doubleword. */
580#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
581 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
582 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
583 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
584};
1da177e4 585
2b7b4318
FR
586static const struct rtl_tx_desc_info {
587 struct {
588 u32 udp;
589 u32 tcp;
590 } checksum;
591 u16 mss_shift;
592 u16 opts_offset;
593} tx_desc_info [] = {
594 [RTL_TD_0] = {
595 .checksum = {
596 .udp = TD0_IP_CS | TD0_UDP_CS,
597 .tcp = TD0_IP_CS | TD0_TCP_CS
598 },
599 .mss_shift = TD0_MSS_SHIFT,
600 .opts_offset = 0
601 },
602 [RTL_TD_1] = {
603 .checksum = {
604 .udp = TD1_IP_CS | TD1_UDP_CS,
605 .tcp = TD1_IP_CS | TD1_TCP_CS
606 },
607 .mss_shift = TD1_MSS_SHIFT,
608 .opts_offset = 1
609 }
610};
611
612enum rtl_rx_desc_bit {
1da177e4
LT
613 /* Rx private */
614 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
615 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
616
617#define RxProtoUDP (PID1)
618#define RxProtoTCP (PID0)
619#define RxProtoIP (PID1 | PID0)
620#define RxProtoMask RxProtoIP
621
622 IPFail = (1 << 16), /* IP checksum failed */
623 UDPFail = (1 << 15), /* UDP/IP checksum failed */
624 TCPFail = (1 << 14), /* TCP/IP checksum failed */
625 RxVlanTag = (1 << 16), /* VLAN tag available */
626};
627
628#define RsvdMask 0x3fffc000
629
630struct TxDesc {
6cccd6e7
REB
631 __le32 opts1;
632 __le32 opts2;
633 __le64 addr;
1da177e4
LT
634};
635
636struct RxDesc {
6cccd6e7
REB
637 __le32 opts1;
638 __le32 opts2;
639 __le64 addr;
1da177e4
LT
640};
641
642struct ring_info {
643 struct sk_buff *skb;
644 u32 len;
645 u8 __pad[sizeof(void *) - sizeof(u32)];
646};
647
f23e7fda 648enum features {
ccdffb9a
FR
649 RTL_FEATURE_WOL = (1 << 0),
650 RTL_FEATURE_MSI = (1 << 1),
651 RTL_FEATURE_GMII = (1 << 2),
f23e7fda
FR
652};
653
355423d0
IV
654struct rtl8169_counters {
655 __le64 tx_packets;
656 __le64 rx_packets;
657 __le64 tx_errors;
658 __le32 rx_errors;
659 __le16 rx_missed;
660 __le16 align_errors;
661 __le32 tx_one_collision;
662 __le32 tx_multi_collision;
663 __le64 rx_unicast;
664 __le64 rx_broadcast;
665 __le32 rx_multicast;
666 __le16 tx_aborted;
667 __le16 tx_underun;
668};
669
1da177e4
LT
670struct rtl8169_private {
671 void __iomem *mmio_addr; /* memory map physical address */
cecb5fd7 672 struct pci_dev *pci_dev;
c4028958 673 struct net_device *dev;
bea3348e 674 struct napi_struct napi;
cecb5fd7 675 spinlock_t lock;
b57b7e5a 676 u32 msg_enable;
2b7b4318
FR
677 u16 txd_version;
678 u16 mac_version;
1da177e4
LT
679 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
680 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
681 u32 dirty_rx;
682 u32 dirty_tx;
683 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
684 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
685 dma_addr_t TxPhyAddr;
686 dma_addr_t RxPhyAddr;
6f0333b8 687 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
1da177e4 688 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
1da177e4
LT
689 struct timer_list timer;
690 u16 cp_cmd;
0e485150
FR
691 u16 intr_event;
692 u16 napi_event;
1da177e4 693 u16 intr_mask;
c0e45c1c 694
695 struct mdio_ops {
696 void (*write)(void __iomem *, int, int);
697 int (*read)(void __iomem *, int);
698 } mdio_ops;
699
065c27c1 700 struct pll_power_ops {
701 void (*down)(struct rtl8169_private *);
702 void (*up)(struct rtl8169_private *);
703 } pll_power_ops;
704
d58d46b5
FR
705 struct jumbo_ops {
706 void (*enable)(struct rtl8169_private *);
707 void (*disable)(struct rtl8169_private *);
708 } jumbo_ops;
709
54405cde 710 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
ccdffb9a 711 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
4da19633 712 void (*phy_reset_enable)(struct rtl8169_private *tp);
07ce4064 713 void (*hw_start)(struct net_device *);
4da19633 714 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
1da177e4 715 unsigned int (*link_ok)(void __iomem *);
8b4ab28d 716 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
4422bcd4
FR
717
718 struct {
719 struct work_struct work;
720 } wk;
721
f23e7fda 722 unsigned features;
ccdffb9a
FR
723
724 struct mii_if_info mii;
355423d0 725 struct rtl8169_counters counters;
e1759441 726 u32 saved_wolopts;
e03f33af 727 u32 opts1_mask;
f1e02ed1 728
b6ffd97f
FR
729 struct rtl_fw {
730 const struct firmware *fw;
1c361efb
FR
731
732#define RTL_VER_SIZE 32
733
734 char version[RTL_VER_SIZE];
735
736 struct rtl_fw_phy_action {
737 __le32 *code;
738 size_t size;
739 } phy_action;
b6ffd97f 740 } *rtl_fw;
497888cf 741#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
1da177e4
LT
742};
743
979b6c13 744MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4 745MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
1da177e4 746module_param(use_dac, int, 0);
4300e8c7 747MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
748module_param_named(debug, debug.msg_enable, int, 0);
749MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
750MODULE_LICENSE("GPL");
751MODULE_VERSION(RTL8169_VERSION);
bca03d5f 752MODULE_FIRMWARE(FIRMWARE_8168D_1);
753MODULE_FIRMWARE(FIRMWARE_8168D_2);
01dc7fec 754MODULE_FIRMWARE(FIRMWARE_8168E_1);
755MODULE_FIRMWARE(FIRMWARE_8168E_2);
bbb8af75 756MODULE_FIRMWARE(FIRMWARE_8168E_3);
5a5e4443 757MODULE_FIRMWARE(FIRMWARE_8105E_1);
c2218925
HW
758MODULE_FIRMWARE(FIRMWARE_8168F_1);
759MODULE_FIRMWARE(FIRMWARE_8168F_2);
1da177e4
LT
760
761static int rtl8169_open(struct net_device *dev);
61357325
SH
762static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
763 struct net_device *dev);
7d12e780 764static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
1da177e4 765static int rtl8169_init_ring(struct net_device *dev);
07ce4064 766static void rtl_hw_start(struct net_device *dev);
1da177e4 767static int rtl8169_close(struct net_device *dev);
07ce4064 768static void rtl_set_rx_mode(struct net_device *dev);
1da177e4 769static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 770static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4 771static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
bea3348e 772 void __iomem *, u32 budget);
4dcb7d33 773static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4 774static void rtl8169_down(struct net_device *dev);
99f252b0 775static void rtl8169_rx_clear(struct rtl8169_private *tp);
bea3348e 776static int rtl8169_poll(struct napi_struct *napi, int budget);
1da177e4 777
d58d46b5
FR
778static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
779{
780 int cap = pci_pcie_cap(pdev);
781
782 if (cap) {
783 u16 ctl;
784
785 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
786 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
787 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
788 }
789}
790
b646d900 791static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
792{
793 void __iomem *ioaddr = tp->mmio_addr;
794 int i;
795
796 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
797 for (i = 0; i < 20; i++) {
798 udelay(100);
799 if (RTL_R32(OCPAR) & OCPAR_FLAG)
800 break;
801 }
802 return RTL_R32(OCPDR);
803}
804
805static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
806{
807 void __iomem *ioaddr = tp->mmio_addr;
808 int i;
809
810 RTL_W32(OCPDR, data);
811 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
812 for (i = 0; i < 20; i++) {
813 udelay(100);
814 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
815 break;
816 }
817}
818
fac5b3ca 819static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
b646d900 820{
fac5b3ca 821 void __iomem *ioaddr = tp->mmio_addr;
b646d900 822 int i;
823
824 RTL_W8(ERIDR, cmd);
825 RTL_W32(ERIAR, 0x800010e8);
826 msleep(2);
827 for (i = 0; i < 5; i++) {
828 udelay(100);
1e4e82ba 829 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
b646d900 830 break;
831 }
832
fac5b3ca 833 ocp_write(tp, 0x1, 0x30, 0x00000001);
b646d900 834}
835
836#define OOB_CMD_RESET 0x00
837#define OOB_CMD_DRIVER_START 0x05
838#define OOB_CMD_DRIVER_STOP 0x06
839
cecb5fd7
FR
840static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
841{
842 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
843}
844
b646d900 845static void rtl8168_driver_start(struct rtl8169_private *tp)
846{
cecb5fd7 847 u16 reg;
b646d900 848 int i;
849
850 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
851
cecb5fd7 852 reg = rtl8168_get_ocp_reg(tp);
4804b3b3 853
b646d900 854 for (i = 0; i < 10; i++) {
855 msleep(10);
4804b3b3 856 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
b646d900 857 break;
858 }
859}
860
861static void rtl8168_driver_stop(struct rtl8169_private *tp)
862{
cecb5fd7 863 u16 reg;
b646d900 864 int i;
865
866 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
867
cecb5fd7 868 reg = rtl8168_get_ocp_reg(tp);
4804b3b3 869
b646d900 870 for (i = 0; i < 10; i++) {
871 msleep(10);
4804b3b3 872 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
b646d900 873 break;
874 }
875}
876
4804b3b3 877static int r8168dp_check_dash(struct rtl8169_private *tp)
878{
cecb5fd7 879 u16 reg = rtl8168_get_ocp_reg(tp);
4804b3b3 880
cecb5fd7 881 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
4804b3b3 882}
b646d900 883
4da19633 884static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
1da177e4
LT
885{
886 int i;
887
a6baf3af 888 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
1da177e4 889
2371408c 890 for (i = 20; i > 0; i--) {
07d3f51f
FR
891 /*
892 * Check if the RTL8169 has completed writing to the specified
893 * MII register.
894 */
5b0384f4 895 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 896 break;
2371408c 897 udelay(25);
1da177e4 898 }
024a07ba 899 /*
81a95f04
TT
900 * According to hardware specs a 20us delay is required after write
901 * complete indication, but before sending next command.
024a07ba 902 */
81a95f04 903 udelay(20);
1da177e4
LT
904}
905
4da19633 906static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
1da177e4
LT
907{
908 int i, value = -1;
909
a6baf3af 910 RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
1da177e4 911
2371408c 912 for (i = 20; i > 0; i--) {
07d3f51f
FR
913 /*
914 * Check if the RTL8169 has completed retrieving data from
915 * the specified MII register.
916 */
1da177e4 917 if (RTL_R32(PHYAR) & 0x80000000) {
a6baf3af 918 value = RTL_R32(PHYAR) & 0xffff;
1da177e4
LT
919 break;
920 }
2371408c 921 udelay(25);
1da177e4 922 }
81a95f04
TT
923 /*
924 * According to hardware specs a 20us delay is required after read
925 * complete indication, but before sending next command.
926 */
927 udelay(20);
928
1da177e4
LT
929 return value;
930}
931
c0e45c1c 932static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
933{
934 int i;
935
936 RTL_W32(OCPDR, data |
937 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
938 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
939 RTL_W32(EPHY_RXER_NUM, 0);
940
941 for (i = 0; i < 100; i++) {
942 mdelay(1);
943 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
944 break;
945 }
946}
947
948static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
949{
950 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
951 (value & OCPDR_DATA_MASK));
952}
953
954static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
955{
956 int i;
957
958 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
959
960 mdelay(1);
961 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
962 RTL_W32(EPHY_RXER_NUM, 0);
963
964 for (i = 0; i < 100; i++) {
965 mdelay(1);
966 if (RTL_R32(OCPAR) & OCPAR_FLAG)
967 break;
968 }
969
970 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
971}
972
e6de30d6 973#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
974
975static void r8168dp_2_mdio_start(void __iomem *ioaddr)
976{
977 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
978}
979
980static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
981{
982 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
983}
984
985static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
986{
987 r8168dp_2_mdio_start(ioaddr);
988
989 r8169_mdio_write(ioaddr, reg_addr, value);
990
991 r8168dp_2_mdio_stop(ioaddr);
992}
993
994static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
995{
996 int value;
997
998 r8168dp_2_mdio_start(ioaddr);
999
1000 value = r8169_mdio_read(ioaddr, reg_addr);
1001
1002 r8168dp_2_mdio_stop(ioaddr);
1003
1004 return value;
1005}
1006
4da19633 1007static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
dacf8154 1008{
c0e45c1c 1009 tp->mdio_ops.write(tp->mmio_addr, location, val);
dacf8154
FR
1010}
1011
4da19633 1012static int rtl_readphy(struct rtl8169_private *tp, int location)
1013{
c0e45c1c 1014 return tp->mdio_ops.read(tp->mmio_addr, location);
4da19633 1015}
1016
1017static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1018{
1019 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1020}
1021
1022static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
daf9df6d 1023{
1024 int val;
1025
4da19633 1026 val = rtl_readphy(tp, reg_addr);
1027 rtl_writephy(tp, reg_addr, (val | p) & ~m);
daf9df6d 1028}
1029
ccdffb9a
FR
1030static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1031 int val)
1032{
1033 struct rtl8169_private *tp = netdev_priv(dev);
ccdffb9a 1034
4da19633 1035 rtl_writephy(tp, location, val);
ccdffb9a
FR
1036}
1037
1038static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1039{
1040 struct rtl8169_private *tp = netdev_priv(dev);
ccdffb9a 1041
4da19633 1042 return rtl_readphy(tp, location);
ccdffb9a
FR
1043}
1044
dacf8154
FR
1045static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
1046{
1047 unsigned int i;
1048
1049 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1050 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1051
1052 for (i = 0; i < 100; i++) {
1053 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
1054 break;
1055 udelay(10);
1056 }
1057}
1058
1059static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1060{
1061 u16 value = 0xffff;
1062 unsigned int i;
1063
1064 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1065
1066 for (i = 0; i < 100; i++) {
1067 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
1068 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1069 break;
1070 }
1071 udelay(10);
1072 }
1073
1074 return value;
1075}
1076
1077static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
1078{
1079 unsigned int i;
1080
1081 RTL_W32(CSIDR, value);
1082 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
1083 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1084
1085 for (i = 0; i < 100; i++) {
1086 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
1087 break;
1088 udelay(10);
1089 }
1090}
1091
1092static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
1093{
1094 u32 value = ~0x00;
1095 unsigned int i;
1096
1097 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
1098 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1099
1100 for (i = 0; i < 100; i++) {
1101 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
1102 value = RTL_R32(CSIDR);
1103 break;
1104 }
1105 udelay(10);
1106 }
1107
1108 return value;
1109}
1110
133ac40a
HW
1111static
1112void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1113{
1114 unsigned int i;
1115
1116 BUG_ON((addr & 3) || (mask == 0));
1117 RTL_W32(ERIDR, val);
1118 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1119
1120 for (i = 0; i < 100; i++) {
1121 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1122 break;
1123 udelay(100);
1124 }
1125}
1126
1127static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
1128{
1129 u32 value = ~0x00;
1130 unsigned int i;
1131
1132 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1133
1134 for (i = 0; i < 100; i++) {
1135 if (RTL_R32(ERIAR) & ERIAR_FLAG) {
1136 value = RTL_R32(ERIDR);
1137 break;
1138 }
1139 udelay(100);
1140 }
1141
1142 return value;
1143}
1144
1145static void
1146rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
1147{
1148 u32 val;
1149
1150 val = rtl_eri_read(ioaddr, addr, type);
1151 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
1152}
1153
c28aa385 1154struct exgmac_reg {
1155 u16 addr;
1156 u16 mask;
1157 u32 val;
1158};
1159
1160static void rtl_write_exgmac_batch(void __iomem *ioaddr,
1161 const struct exgmac_reg *r, int len)
1162{
1163 while (len-- > 0) {
1164 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1165 r++;
1166 }
1167}
1168
daf9df6d 1169static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1170{
1171 u8 value = 0xff;
1172 unsigned int i;
1173
1174 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1175
1176 for (i = 0; i < 300; i++) {
1177 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
1178 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
1179 break;
1180 }
1181 udelay(100);
1182 }
1183
1184 return value;
1185}
1186
9085cdfa
FR
1187static u16 rtl_get_events(struct rtl8169_private *tp)
1188{
1189 void __iomem *ioaddr = tp->mmio_addr;
1190
1191 return RTL_R16(IntrStatus);
1192}
1193
1194static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1195{
1196 void __iomem *ioaddr = tp->mmio_addr;
1197
1198 RTL_W16(IntrStatus, bits);
1199 mmiowb();
1200}
1201
1202static void rtl_irq_disable(struct rtl8169_private *tp)
1203{
1204 void __iomem *ioaddr = tp->mmio_addr;
1205
1206 RTL_W16(IntrMask, 0);
1207 mmiowb();
1208}
1209
3e990ff5
FR
1210static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1211{
1212 void __iomem *ioaddr = tp->mmio_addr;
1213
1214 RTL_W16(IntrMask, bits);
1215}
1216
811fd301 1217static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1da177e4 1218{
811fd301 1219 void __iomem *ioaddr = tp->mmio_addr;
1da177e4 1220
9085cdfa
FR
1221 rtl_irq_disable(tp);
1222 rtl_ack_events(tp, tp->intr_event);
811fd301 1223 RTL_R8(ChipCmd);
1da177e4
LT
1224}
1225
4da19633 1226static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1da177e4 1227{
4da19633 1228 void __iomem *ioaddr = tp->mmio_addr;
1229
1da177e4
LT
1230 return RTL_R32(TBICSR) & TBIReset;
1231}
1232
4da19633 1233static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1da177e4 1234{
4da19633 1235 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1da177e4
LT
1236}
1237
1238static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1239{
1240 return RTL_R32(TBICSR) & TBILinkOk;
1241}
1242
1243static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1244{
1245 return RTL_R8(PHYstatus) & LinkStatus;
1246}
1247
4da19633 1248static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1da177e4 1249{
4da19633 1250 void __iomem *ioaddr = tp->mmio_addr;
1251
1da177e4
LT
1252 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1253}
1254
4da19633 1255static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1da177e4
LT
1256{
1257 unsigned int val;
1258
4da19633 1259 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1260 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1da177e4
LT
1261}
1262
70090424
HW
1263static void rtl_link_chg_patch(struct rtl8169_private *tp)
1264{
1265 void __iomem *ioaddr = tp->mmio_addr;
1266 struct net_device *dev = tp->dev;
1267
1268 if (!netif_running(dev))
1269 return;
1270
1271 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
1272 if (RTL_R8(PHYstatus) & _1000bpsF) {
1273 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1274 0x00000011, ERIAR_EXGMAC);
1275 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1276 0x00000005, ERIAR_EXGMAC);
1277 } else if (RTL_R8(PHYstatus) & _100bps) {
1278 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1279 0x0000001f, ERIAR_EXGMAC);
1280 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1281 0x00000005, ERIAR_EXGMAC);
1282 } else {
1283 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1284 0x0000001f, ERIAR_EXGMAC);
1285 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1286 0x0000003f, ERIAR_EXGMAC);
1287 }
1288 /* Reset packet filter */
1289 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1290 ERIAR_EXGMAC);
1291 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1292 ERIAR_EXGMAC);
c2218925
HW
1293 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1294 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1295 if (RTL_R8(PHYstatus) & _1000bpsF) {
1296 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1297 0x00000011, ERIAR_EXGMAC);
1298 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1299 0x00000005, ERIAR_EXGMAC);
1300 } else {
1301 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1302 0x0000001f, ERIAR_EXGMAC);
1303 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1304 0x0000003f, ERIAR_EXGMAC);
1305 }
70090424
HW
1306 }
1307}
1308
e4fbce74 1309static void __rtl8169_check_link_status(struct net_device *dev,
cecb5fd7
FR
1310 struct rtl8169_private *tp,
1311 void __iomem *ioaddr, bool pm)
1da177e4
LT
1312{
1313 unsigned long flags;
1314
1315 spin_lock_irqsave(&tp->lock, flags);
1316 if (tp->link_ok(ioaddr)) {
70090424 1317 rtl_link_chg_patch(tp);
e1759441 1318 /* This is to cancel a scheduled suspend if there's one. */
e4fbce74
RW
1319 if (pm)
1320 pm_request_resume(&tp->pci_dev->dev);
1da177e4 1321 netif_carrier_on(dev);
1519e57f
FR
1322 if (net_ratelimit())
1323 netif_info(tp, ifup, dev, "link up\n");
b57b7e5a 1324 } else {
1da177e4 1325 netif_carrier_off(dev);
bf82c189 1326 netif_info(tp, ifdown, dev, "link down\n");
e4fbce74 1327 if (pm)
10953db8 1328 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
b57b7e5a 1329 }
1da177e4
LT
1330 spin_unlock_irqrestore(&tp->lock, flags);
1331}
1332
e4fbce74
RW
1333static void rtl8169_check_link_status(struct net_device *dev,
1334 struct rtl8169_private *tp,
1335 void __iomem *ioaddr)
1336{
1337 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1338}
1339
e1759441
RW
1340#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1341
1342static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
61a4dcc2 1343{
61a4dcc2
FR
1344 void __iomem *ioaddr = tp->mmio_addr;
1345 u8 options;
e1759441 1346 u32 wolopts = 0;
61a4dcc2
FR
1347
1348 options = RTL_R8(Config1);
1349 if (!(options & PMEnable))
e1759441 1350 return 0;
61a4dcc2
FR
1351
1352 options = RTL_R8(Config3);
1353 if (options & LinkUp)
e1759441 1354 wolopts |= WAKE_PHY;
61a4dcc2 1355 if (options & MagicPacket)
e1759441 1356 wolopts |= WAKE_MAGIC;
61a4dcc2
FR
1357
1358 options = RTL_R8(Config5);
1359 if (options & UWF)
e1759441 1360 wolopts |= WAKE_UCAST;
61a4dcc2 1361 if (options & BWF)
e1759441 1362 wolopts |= WAKE_BCAST;
61a4dcc2 1363 if (options & MWF)
e1759441 1364 wolopts |= WAKE_MCAST;
61a4dcc2 1365
e1759441 1366 return wolopts;
61a4dcc2
FR
1367}
1368
e1759441 1369static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
61a4dcc2
FR
1370{
1371 struct rtl8169_private *tp = netdev_priv(dev);
e1759441
RW
1372
1373 spin_lock_irq(&tp->lock);
1374
1375 wol->supported = WAKE_ANY;
1376 wol->wolopts = __rtl8169_get_wol(tp);
1377
1378 spin_unlock_irq(&tp->lock);
1379}
1380
1381static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1382{
61a4dcc2 1383 void __iomem *ioaddr = tp->mmio_addr;
07d3f51f 1384 unsigned int i;
350f7596 1385 static const struct {
61a4dcc2
FR
1386 u32 opt;
1387 u16 reg;
1388 u8 mask;
1389 } cfg[] = {
1390 { WAKE_ANY, Config1, PMEnable },
1391 { WAKE_PHY, Config3, LinkUp },
1392 { WAKE_MAGIC, Config3, MagicPacket },
1393 { WAKE_UCAST, Config5, UWF },
1394 { WAKE_BCAST, Config5, BWF },
1395 { WAKE_MCAST, Config5, MWF },
1396 { WAKE_ANY, Config5, LanWake }
1397 };
1398
61a4dcc2
FR
1399 RTL_W8(Cfg9346, Cfg9346_Unlock);
1400
1401 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1402 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
e1759441 1403 if (wolopts & cfg[i].opt)
61a4dcc2
FR
1404 options |= cfg[i].mask;
1405 RTL_W8(cfg[i].reg, options);
1406 }
1407
1408 RTL_W8(Cfg9346, Cfg9346_Lock);
e1759441
RW
1409}
1410
1411static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1412{
1413 struct rtl8169_private *tp = netdev_priv(dev);
1414
1415 spin_lock_irq(&tp->lock);
61a4dcc2 1416
f23e7fda
FR
1417 if (wol->wolopts)
1418 tp->features |= RTL_FEATURE_WOL;
1419 else
1420 tp->features &= ~RTL_FEATURE_WOL;
e1759441 1421 __rtl8169_set_wol(tp, wol->wolopts);
61a4dcc2
FR
1422 spin_unlock_irq(&tp->lock);
1423
ea80907f 1424 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1425
61a4dcc2
FR
1426 return 0;
1427}
1428
31bd204f
FR
1429static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1430{
85bffe6c 1431 return rtl_chip_infos[tp->mac_version].fw_name;
31bd204f
FR
1432}
1433
1da177e4
LT
1434static void rtl8169_get_drvinfo(struct net_device *dev,
1435 struct ethtool_drvinfo *info)
1436{
1437 struct rtl8169_private *tp = netdev_priv(dev);
b6ffd97f 1438 struct rtl_fw *rtl_fw = tp->rtl_fw;
1da177e4 1439
68aad78c
RJ
1440 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1441 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1442 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1c361efb 1443 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
8ac72d16
RJ
1444 if (!IS_ERR_OR_NULL(rtl_fw))
1445 strlcpy(info->fw_version, rtl_fw->version,
1446 sizeof(info->fw_version));
1da177e4
LT
1447}
1448
1449static int rtl8169_get_regs_len(struct net_device *dev)
1450{
1451 return R8169_REGS_SIZE;
1452}
1453
1454static int rtl8169_set_speed_tbi(struct net_device *dev,
54405cde 1455 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1da177e4
LT
1456{
1457 struct rtl8169_private *tp = netdev_priv(dev);
1458 void __iomem *ioaddr = tp->mmio_addr;
1459 int ret = 0;
1460 u32 reg;
1461
1462 reg = RTL_R32(TBICSR);
1463 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1464 (duplex == DUPLEX_FULL)) {
1465 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1466 } else if (autoneg == AUTONEG_ENABLE)
1467 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1468 else {
bf82c189
JP
1469 netif_warn(tp, link, dev,
1470 "incorrect speed setting refused in TBI mode\n");
1da177e4
LT
1471 ret = -EOPNOTSUPP;
1472 }
1473
1474 return ret;
1475}
1476
1477static int rtl8169_set_speed_xmii(struct net_device *dev,
54405cde 1478 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1da177e4
LT
1479{
1480 struct rtl8169_private *tp = netdev_priv(dev);
3577aa1b 1481 int giga_ctrl, bmcr;
54405cde 1482 int rc = -EINVAL;
1da177e4 1483
716b50a3 1484 rtl_writephy(tp, 0x1f, 0x0000);
1da177e4
LT
1485
1486 if (autoneg == AUTONEG_ENABLE) {
3577aa1b 1487 int auto_nego;
1488
4da19633 1489 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
54405cde
ON
1490 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1491 ADVERTISE_100HALF | ADVERTISE_100FULL);
1492
1493 if (adv & ADVERTISED_10baseT_Half)
1494 auto_nego |= ADVERTISE_10HALF;
1495 if (adv & ADVERTISED_10baseT_Full)
1496 auto_nego |= ADVERTISE_10FULL;
1497 if (adv & ADVERTISED_100baseT_Half)
1498 auto_nego |= ADVERTISE_100HALF;
1499 if (adv & ADVERTISED_100baseT_Full)
1500 auto_nego |= ADVERTISE_100FULL;
1501
3577aa1b 1502 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1da177e4 1503
4da19633 1504 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
3577aa1b 1505 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
bcf0bf90 1506
3577aa1b 1507 /* The 8100e/8101e/8102e do Fast Ethernet only. */
826e6cbd 1508 if (tp->mii.supports_gmii) {
54405cde
ON
1509 if (adv & ADVERTISED_1000baseT_Half)
1510 giga_ctrl |= ADVERTISE_1000HALF;
1511 if (adv & ADVERTISED_1000baseT_Full)
1512 giga_ctrl |= ADVERTISE_1000FULL;
1513 } else if (adv & (ADVERTISED_1000baseT_Half |
1514 ADVERTISED_1000baseT_Full)) {
bf82c189
JP
1515 netif_info(tp, link, dev,
1516 "PHY does not support 1000Mbps\n");
54405cde 1517 goto out;
bcf0bf90 1518 }
1da177e4 1519
3577aa1b 1520 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1521
4da19633 1522 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1523 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
3577aa1b 1524 } else {
1525 giga_ctrl = 0;
1526
1527 if (speed == SPEED_10)
1528 bmcr = 0;
1529 else if (speed == SPEED_100)
1530 bmcr = BMCR_SPEED100;
1531 else
54405cde 1532 goto out;
3577aa1b 1533
1534 if (duplex == DUPLEX_FULL)
1535 bmcr |= BMCR_FULLDPLX;
2584fbc3
RS
1536 }
1537
4da19633 1538 rtl_writephy(tp, MII_BMCR, bmcr);
3577aa1b 1539
cecb5fd7
FR
1540 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1541 tp->mac_version == RTL_GIGA_MAC_VER_03) {
3577aa1b 1542 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
4da19633 1543 rtl_writephy(tp, 0x17, 0x2138);
1544 rtl_writephy(tp, 0x0e, 0x0260);
3577aa1b 1545 } else {
4da19633 1546 rtl_writephy(tp, 0x17, 0x2108);
1547 rtl_writephy(tp, 0x0e, 0x0000);
3577aa1b 1548 }
1549 }
1550
54405cde
ON
1551 rc = 0;
1552out:
1553 return rc;
1da177e4
LT
1554}
1555
1556static int rtl8169_set_speed(struct net_device *dev,
54405cde 1557 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1da177e4
LT
1558{
1559 struct rtl8169_private *tp = netdev_priv(dev);
1560 int ret;
1561
54405cde 1562 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
4876cc1e
FR
1563 if (ret < 0)
1564 goto out;
1da177e4 1565
4876cc1e
FR
1566 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1567 (advertising & ADVERTISED_1000baseT_Full)) {
1da177e4 1568 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
4876cc1e
FR
1569 }
1570out:
1da177e4
LT
1571 return ret;
1572}
1573
1574static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1575{
1576 struct rtl8169_private *tp = netdev_priv(dev);
1577 unsigned long flags;
1578 int ret;
1579
4876cc1e
FR
1580 del_timer_sync(&tp->timer);
1581
1da177e4 1582 spin_lock_irqsave(&tp->lock, flags);
cecb5fd7 1583 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
25db0338 1584 cmd->duplex, cmd->advertising);
1da177e4 1585 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 1586
1da177e4
LT
1587 return ret;
1588}
1589
c8f44aff
MM
1590static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1591 netdev_features_t features)
1da177e4 1592{
d58d46b5
FR
1593 struct rtl8169_private *tp = netdev_priv(dev);
1594
2b7b4318 1595 if (dev->mtu > TD_MSS_MAX)
350fb32a 1596 features &= ~NETIF_F_ALL_TSO;
1da177e4 1597
d58d46b5
FR
1598 if (dev->mtu > JUMBO_1K &&
1599 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1600 features &= ~NETIF_F_IP_CSUM;
1601
350fb32a 1602 return features;
1da177e4
LT
1603}
1604
c8f44aff
MM
1605static int rtl8169_set_features(struct net_device *dev,
1606 netdev_features_t features)
1da177e4
LT
1607{
1608 struct rtl8169_private *tp = netdev_priv(dev);
1609 void __iomem *ioaddr = tp->mmio_addr;
1610 unsigned long flags;
1611
1612 spin_lock_irqsave(&tp->lock, flags);
1613
350fb32a 1614 if (features & NETIF_F_RXCSUM)
1da177e4
LT
1615 tp->cp_cmd |= RxChkSum;
1616 else
1617 tp->cp_cmd &= ~RxChkSum;
1618
350fb32a
MM
1619 if (dev->features & NETIF_F_HW_VLAN_RX)
1620 tp->cp_cmd |= RxVlan;
1621 else
1622 tp->cp_cmd &= ~RxVlan;
1623
1da177e4
LT
1624 RTL_W16(CPlusCmd, tp->cp_cmd);
1625 RTL_R16(CPlusCmd);
1626
1627 spin_unlock_irqrestore(&tp->lock, flags);
1628
1629 return 0;
1630}
1631
1da177e4
LT
1632static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1633 struct sk_buff *skb)
1634{
eab6d18d 1635 return (vlan_tx_tag_present(skb)) ?
1da177e4
LT
1636 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1637}
1638
7a8fc77b 1639static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1da177e4
LT
1640{
1641 u32 opts2 = le32_to_cpu(desc->opts2);
1da177e4 1642
7a8fc77b
FR
1643 if (opts2 & RxVlanTag)
1644 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
2edae08e 1645
1da177e4 1646 desc->opts2 = 0;
1da177e4
LT
1647}
1648
ccdffb9a 1649static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1da177e4
LT
1650{
1651 struct rtl8169_private *tp = netdev_priv(dev);
1652 void __iomem *ioaddr = tp->mmio_addr;
1653 u32 status;
1654
1655 cmd->supported =
1656 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1657 cmd->port = PORT_FIBRE;
1658 cmd->transceiver = XCVR_INTERNAL;
1659
1660 status = RTL_R32(TBICSR);
1661 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1662 cmd->autoneg = !!(status & TBINwEnable);
1663
70739497 1664 ethtool_cmd_speed_set(cmd, SPEED_1000);
1da177e4 1665 cmd->duplex = DUPLEX_FULL; /* Always set */
ccdffb9a
FR
1666
1667 return 0;
1da177e4
LT
1668}
1669
ccdffb9a 1670static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1da177e4
LT
1671{
1672 struct rtl8169_private *tp = netdev_priv(dev);
ccdffb9a
FR
1673
1674 return mii_ethtool_gset(&tp->mii, cmd);
1da177e4
LT
1675}
1676
1677static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1678{
1679 struct rtl8169_private *tp = netdev_priv(dev);
1680 unsigned long flags;
ccdffb9a 1681 int rc;
1da177e4
LT
1682
1683 spin_lock_irqsave(&tp->lock, flags);
1684
ccdffb9a 1685 rc = tp->get_settings(dev, cmd);
1da177e4
LT
1686
1687 spin_unlock_irqrestore(&tp->lock, flags);
ccdffb9a 1688 return rc;
1da177e4
LT
1689}
1690
1691static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1692 void *p)
1693{
5b0384f4
FR
1694 struct rtl8169_private *tp = netdev_priv(dev);
1695 unsigned long flags;
1da177e4 1696
5b0384f4
FR
1697 if (regs->len > R8169_REGS_SIZE)
1698 regs->len = R8169_REGS_SIZE;
1da177e4 1699
5b0384f4
FR
1700 spin_lock_irqsave(&tp->lock, flags);
1701 memcpy_fromio(p, tp->mmio_addr, regs->len);
1702 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1703}
1704
b57b7e5a
SH
1705static u32 rtl8169_get_msglevel(struct net_device *dev)
1706{
1707 struct rtl8169_private *tp = netdev_priv(dev);
1708
1709 return tp->msg_enable;
1710}
1711
1712static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1713{
1714 struct rtl8169_private *tp = netdev_priv(dev);
1715
1716 tp->msg_enable = value;
1717}
1718
d4a3a0fc
SH
1719static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1720 "tx_packets",
1721 "rx_packets",
1722 "tx_errors",
1723 "rx_errors",
1724 "rx_missed",
1725 "align_errors",
1726 "tx_single_collisions",
1727 "tx_multi_collisions",
1728 "unicast",
1729 "broadcast",
1730 "multicast",
1731 "tx_aborted",
1732 "tx_underrun",
1733};
1734
b9f2c044 1735static int rtl8169_get_sset_count(struct net_device *dev, int sset)
d4a3a0fc 1736{
b9f2c044
JG
1737 switch (sset) {
1738 case ETH_SS_STATS:
1739 return ARRAY_SIZE(rtl8169_gstrings);
1740 default:
1741 return -EOPNOTSUPP;
1742 }
d4a3a0fc
SH
1743}
1744
355423d0 1745static void rtl8169_update_counters(struct net_device *dev)
d4a3a0fc
SH
1746{
1747 struct rtl8169_private *tp = netdev_priv(dev);
1748 void __iomem *ioaddr = tp->mmio_addr;
cecb5fd7 1749 struct device *d = &tp->pci_dev->dev;
d4a3a0fc
SH
1750 struct rtl8169_counters *counters;
1751 dma_addr_t paddr;
1752 u32 cmd;
355423d0 1753 int wait = 1000;
d4a3a0fc 1754
355423d0
IV
1755 /*
1756 * Some chips are unable to dump tally counters when the receiver
1757 * is disabled.
1758 */
1759 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1760 return;
d4a3a0fc 1761
48addcc9 1762 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
d4a3a0fc
SH
1763 if (!counters)
1764 return;
1765
1766 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
284901a9 1767 cmd = (u64)paddr & DMA_BIT_MASK(32);
d4a3a0fc
SH
1768 RTL_W32(CounterAddrLow, cmd);
1769 RTL_W32(CounterAddrLow, cmd | CounterDump);
1770
355423d0
IV
1771 while (wait--) {
1772 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
355423d0 1773 memcpy(&tp->counters, counters, sizeof(*counters));
d4a3a0fc 1774 break;
355423d0
IV
1775 }
1776 udelay(10);
d4a3a0fc
SH
1777 }
1778
1779 RTL_W32(CounterAddrLow, 0);
1780 RTL_W32(CounterAddrHigh, 0);
1781
48addcc9 1782 dma_free_coherent(d, sizeof(*counters), counters, paddr);
d4a3a0fc
SH
1783}
1784
355423d0
IV
1785static void rtl8169_get_ethtool_stats(struct net_device *dev,
1786 struct ethtool_stats *stats, u64 *data)
1787{
1788 struct rtl8169_private *tp = netdev_priv(dev);
1789
1790 ASSERT_RTNL();
1791
1792 rtl8169_update_counters(dev);
1793
1794 data[0] = le64_to_cpu(tp->counters.tx_packets);
1795 data[1] = le64_to_cpu(tp->counters.rx_packets);
1796 data[2] = le64_to_cpu(tp->counters.tx_errors);
1797 data[3] = le32_to_cpu(tp->counters.rx_errors);
1798 data[4] = le16_to_cpu(tp->counters.rx_missed);
1799 data[5] = le16_to_cpu(tp->counters.align_errors);
1800 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1801 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1802 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1803 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1804 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1805 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1806 data[12] = le16_to_cpu(tp->counters.tx_underun);
1807}
1808
d4a3a0fc
SH
1809static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1810{
1811 switch(stringset) {
1812 case ETH_SS_STATS:
1813 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1814 break;
1815 }
1816}
1817
7282d491 1818static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1819 .get_drvinfo = rtl8169_get_drvinfo,
1820 .get_regs_len = rtl8169_get_regs_len,
1821 .get_link = ethtool_op_get_link,
1822 .get_settings = rtl8169_get_settings,
1823 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1824 .get_msglevel = rtl8169_get_msglevel,
1825 .set_msglevel = rtl8169_set_msglevel,
1da177e4 1826 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1827 .get_wol = rtl8169_get_wol,
1828 .set_wol = rtl8169_set_wol,
d4a3a0fc 1829 .get_strings = rtl8169_get_strings,
b9f2c044 1830 .get_sset_count = rtl8169_get_sset_count,
d4a3a0fc 1831 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1da177e4
LT
1832};
1833
07d3f51f 1834static void rtl8169_get_mac_version(struct rtl8169_private *tp,
5d320a20 1835 struct net_device *dev, u8 default_version)
1da177e4 1836{
5d320a20 1837 void __iomem *ioaddr = tp->mmio_addr;
0e485150
FR
1838 /*
1839 * The driver currently handles the 8168Bf and the 8168Be identically
1840 * but they can be identified more specifically through the test below
1841 * if needed:
1842 *
1843 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
0127215c
FR
1844 *
1845 * Same thing for the 8101Eb and the 8101Ec:
1846 *
1847 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
0e485150 1848 */
3744100e 1849 static const struct rtl_mac_info {
1da177e4 1850 u32 mask;
e3cf0cc0 1851 u32 val;
1da177e4
LT
1852 int mac_version;
1853 } mac_info[] = {
c2218925
HW
1854 /* 8168F family. */
1855 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1856 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1857
01dc7fec 1858 /* 8168E family. */
70090424 1859 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
01dc7fec 1860 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1861 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1862 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1863
5b538df9 1864 /* 8168D family. */
daf9df6d 1865 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1866 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
daf9df6d 1867 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
5b538df9 1868
e6de30d6 1869 /* 8168DP family. */
1870 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1871 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
4804b3b3 1872 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
e6de30d6 1873
ef808d50 1874 /* 8168C family. */
17c99297 1875 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
ef3386f0 1876 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
ef808d50 1877 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
7f3e3d3a 1878 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
e3cf0cc0
FR
1879 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1880 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
197ff761 1881 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
6fb07058 1882 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
ef808d50 1883 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
e3cf0cc0
FR
1884
1885 /* 8168B family. */
1886 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1887 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1888 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1889 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1890
1891 /* 8101 family. */
36a0e6c2 1892 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
5a5e4443
HW
1893 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1894 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1895 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2857ffb7
FR
1896 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1897 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1898 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1899 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1900 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1901 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
e3cf0cc0 1902 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2857ffb7 1903 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
e3cf0cc0 1904 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2857ffb7
FR
1905 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1906 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
e3cf0cc0
FR
1907 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1908 /* FIXME: where did these entries come from ? -- FR */
1909 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1910 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1911
1912 /* 8110 family. */
1913 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1914 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1915 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1916 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1917 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1918 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1919
f21b75e9
JD
1920 /* Catch-all */
1921 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
3744100e
FR
1922 };
1923 const struct rtl_mac_info *p = mac_info;
1da177e4
LT
1924 u32 reg;
1925
e3cf0cc0
FR
1926 reg = RTL_R32(TxConfig);
1927 while ((reg & p->mask) != p->val)
1da177e4
LT
1928 p++;
1929 tp->mac_version = p->mac_version;
5d320a20
FR
1930
1931 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
1932 netif_notice(tp, probe, dev,
1933 "unknown MAC, using family default\n");
1934 tp->mac_version = default_version;
1935 }
1da177e4
LT
1936}
1937
1938static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1939{
bcf0bf90 1940 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1941}
1942
867763c1
FR
1943struct phy_reg {
1944 u16 reg;
1945 u16 val;
1946};
1947
4da19633 1948static void rtl_writephy_batch(struct rtl8169_private *tp,
1949 const struct phy_reg *regs, int len)
867763c1
FR
1950{
1951 while (len-- > 0) {
4da19633 1952 rtl_writephy(tp, regs->reg, regs->val);
867763c1
FR
1953 regs++;
1954 }
1955}
1956
bca03d5f 1957#define PHY_READ 0x00000000
1958#define PHY_DATA_OR 0x10000000
1959#define PHY_DATA_AND 0x20000000
1960#define PHY_BJMPN 0x30000000
1961#define PHY_READ_EFUSE 0x40000000
1962#define PHY_READ_MAC_BYTE 0x50000000
1963#define PHY_WRITE_MAC_BYTE 0x60000000
1964#define PHY_CLEAR_READCOUNT 0x70000000
1965#define PHY_WRITE 0x80000000
1966#define PHY_READCOUNT_EQ_SKIP 0x90000000
1967#define PHY_COMP_EQ_SKIPN 0xa0000000
1968#define PHY_COMP_NEQ_SKIPN 0xb0000000
1969#define PHY_WRITE_PREVIOUS 0xc0000000
1970#define PHY_SKIPN 0xd0000000
1971#define PHY_DELAY_MS 0xe0000000
1972#define PHY_WRITE_ERI_WORD 0xf0000000
1973
960aee6c
HW
1974struct fw_info {
1975 u32 magic;
1976 char version[RTL_VER_SIZE];
1977 __le32 fw_start;
1978 __le32 fw_len;
1979 u8 chksum;
1980} __packed;
1981
1c361efb
FR
1982#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
1983
1984static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
bca03d5f 1985{
b6ffd97f 1986 const struct firmware *fw = rtl_fw->fw;
960aee6c 1987 struct fw_info *fw_info = (struct fw_info *)fw->data;
1c361efb
FR
1988 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
1989 char *version = rtl_fw->version;
1990 bool rc = false;
1991
1992 if (fw->size < FW_OPCODE_SIZE)
1993 goto out;
960aee6c
HW
1994
1995 if (!fw_info->magic) {
1996 size_t i, size, start;
1997 u8 checksum = 0;
1998
1999 if (fw->size < sizeof(*fw_info))
2000 goto out;
2001
2002 for (i = 0; i < fw->size; i++)
2003 checksum += fw->data[i];
2004 if (checksum != 0)
2005 goto out;
2006
2007 start = le32_to_cpu(fw_info->fw_start);
2008 if (start > fw->size)
2009 goto out;
2010
2011 size = le32_to_cpu(fw_info->fw_len);
2012 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2013 goto out;
2014
2015 memcpy(version, fw_info->version, RTL_VER_SIZE);
2016
2017 pa->code = (__le32 *)(fw->data + start);
2018 pa->size = size;
2019 } else {
1c361efb
FR
2020 if (fw->size % FW_OPCODE_SIZE)
2021 goto out;
2022
2023 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2024
2025 pa->code = (__le32 *)fw->data;
2026 pa->size = fw->size / FW_OPCODE_SIZE;
2027 }
2028 version[RTL_VER_SIZE - 1] = 0;
2029
2030 rc = true;
2031out:
2032 return rc;
2033}
2034
fd112f2e
FR
2035static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2036 struct rtl_fw_phy_action *pa)
1c361efb 2037{
fd112f2e 2038 bool rc = false;
1c361efb 2039 size_t index;
bca03d5f 2040
1c361efb
FR
2041 for (index = 0; index < pa->size; index++) {
2042 u32 action = le32_to_cpu(pa->code[index]);
42b82dc1 2043 u32 regno = (action & 0x0fff0000) >> 16;
bca03d5f 2044
42b82dc1 2045 switch(action & 0xf0000000) {
2046 case PHY_READ:
2047 case PHY_DATA_OR:
2048 case PHY_DATA_AND:
2049 case PHY_READ_EFUSE:
2050 case PHY_CLEAR_READCOUNT:
2051 case PHY_WRITE:
2052 case PHY_WRITE_PREVIOUS:
2053 case PHY_DELAY_MS:
2054 break;
2055
2056 case PHY_BJMPN:
2057 if (regno > index) {
fd112f2e 2058 netif_err(tp, ifup, tp->dev,
cecb5fd7 2059 "Out of range of firmware\n");
fd112f2e 2060 goto out;
42b82dc1 2061 }
2062 break;
2063 case PHY_READCOUNT_EQ_SKIP:
1c361efb 2064 if (index + 2 >= pa->size) {
fd112f2e 2065 netif_err(tp, ifup, tp->dev,
cecb5fd7 2066 "Out of range of firmware\n");
fd112f2e 2067 goto out;
42b82dc1 2068 }
2069 break;
2070 case PHY_COMP_EQ_SKIPN:
2071 case PHY_COMP_NEQ_SKIPN:
2072 case PHY_SKIPN:
1c361efb 2073 if (index + 1 + regno >= pa->size) {
fd112f2e 2074 netif_err(tp, ifup, tp->dev,
cecb5fd7 2075 "Out of range of firmware\n");
fd112f2e 2076 goto out;
42b82dc1 2077 }
bca03d5f 2078 break;
2079
42b82dc1 2080 case PHY_READ_MAC_BYTE:
2081 case PHY_WRITE_MAC_BYTE:
2082 case PHY_WRITE_ERI_WORD:
2083 default:
fd112f2e 2084 netif_err(tp, ifup, tp->dev,
42b82dc1 2085 "Invalid action 0x%08x\n", action);
fd112f2e 2086 goto out;
bca03d5f 2087 }
2088 }
fd112f2e
FR
2089 rc = true;
2090out:
2091 return rc;
2092}
bca03d5f 2093
fd112f2e
FR
2094static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2095{
2096 struct net_device *dev = tp->dev;
2097 int rc = -EINVAL;
2098
2099 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2100 netif_err(tp, ifup, dev, "invalid firwmare\n");
2101 goto out;
2102 }
2103
2104 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2105 rc = 0;
2106out:
2107 return rc;
2108}
2109
2110static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2111{
2112 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2113 u32 predata, count;
2114 size_t index;
2115
2116 predata = count = 0;
42b82dc1 2117
1c361efb
FR
2118 for (index = 0; index < pa->size; ) {
2119 u32 action = le32_to_cpu(pa->code[index]);
bca03d5f 2120 u32 data = action & 0x0000ffff;
42b82dc1 2121 u32 regno = (action & 0x0fff0000) >> 16;
2122
2123 if (!action)
2124 break;
bca03d5f 2125
2126 switch(action & 0xf0000000) {
42b82dc1 2127 case PHY_READ:
2128 predata = rtl_readphy(tp, regno);
2129 count++;
2130 index++;
2131 break;
2132 case PHY_DATA_OR:
2133 predata |= data;
2134 index++;
2135 break;
2136 case PHY_DATA_AND:
2137 predata &= data;
2138 index++;
2139 break;
2140 case PHY_BJMPN:
2141 index -= regno;
2142 break;
2143 case PHY_READ_EFUSE:
2144 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
2145 index++;
2146 break;
2147 case PHY_CLEAR_READCOUNT:
2148 count = 0;
2149 index++;
2150 break;
bca03d5f 2151 case PHY_WRITE:
42b82dc1 2152 rtl_writephy(tp, regno, data);
2153 index++;
2154 break;
2155 case PHY_READCOUNT_EQ_SKIP:
cecb5fd7 2156 index += (count == data) ? 2 : 1;
bca03d5f 2157 break;
42b82dc1 2158 case PHY_COMP_EQ_SKIPN:
2159 if (predata == data)
2160 index += regno;
2161 index++;
2162 break;
2163 case PHY_COMP_NEQ_SKIPN:
2164 if (predata != data)
2165 index += regno;
2166 index++;
2167 break;
2168 case PHY_WRITE_PREVIOUS:
2169 rtl_writephy(tp, regno, predata);
2170 index++;
2171 break;
2172 case PHY_SKIPN:
2173 index += regno + 1;
2174 break;
2175 case PHY_DELAY_MS:
2176 mdelay(data);
2177 index++;
2178 break;
2179
2180 case PHY_READ_MAC_BYTE:
2181 case PHY_WRITE_MAC_BYTE:
2182 case PHY_WRITE_ERI_WORD:
bca03d5f 2183 default:
2184 BUG();
2185 }
2186 }
2187}
2188
f1e02ed1 2189static void rtl_release_firmware(struct rtl8169_private *tp)
2190{
b6ffd97f
FR
2191 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2192 release_firmware(tp->rtl_fw->fw);
2193 kfree(tp->rtl_fw);
2194 }
2195 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
f1e02ed1 2196}
2197
953a12cc 2198static void rtl_apply_firmware(struct rtl8169_private *tp)
f1e02ed1 2199{
b6ffd97f 2200 struct rtl_fw *rtl_fw = tp->rtl_fw;
f1e02ed1 2201
2202 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
b6ffd97f
FR
2203 if (!IS_ERR_OR_NULL(rtl_fw))
2204 rtl_phy_write_fw(tp, rtl_fw);
953a12cc
FR
2205}
2206
2207static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2208{
2209 if (rtl_readphy(tp, reg) != val)
2210 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2211 else
2212 rtl_apply_firmware(tp);
f1e02ed1 2213}
2214
4da19633 2215static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
1da177e4 2216{
350f7596 2217 static const struct phy_reg phy_reg_init[] = {
0b9b571d 2218 { 0x1f, 0x0001 },
2219 { 0x06, 0x006e },
2220 { 0x08, 0x0708 },
2221 { 0x15, 0x4000 },
2222 { 0x18, 0x65c7 },
1da177e4 2223
0b9b571d 2224 { 0x1f, 0x0001 },
2225 { 0x03, 0x00a1 },
2226 { 0x02, 0x0008 },
2227 { 0x01, 0x0120 },
2228 { 0x00, 0x1000 },
2229 { 0x04, 0x0800 },
2230 { 0x04, 0x0000 },
1da177e4 2231
0b9b571d 2232 { 0x03, 0xff41 },
2233 { 0x02, 0xdf60 },
2234 { 0x01, 0x0140 },
2235 { 0x00, 0x0077 },
2236 { 0x04, 0x7800 },
2237 { 0x04, 0x7000 },
2238
2239 { 0x03, 0x802f },
2240 { 0x02, 0x4f02 },
2241 { 0x01, 0x0409 },
2242 { 0x00, 0xf0f9 },
2243 { 0x04, 0x9800 },
2244 { 0x04, 0x9000 },
2245
2246 { 0x03, 0xdf01 },
2247 { 0x02, 0xdf20 },
2248 { 0x01, 0xff95 },
2249 { 0x00, 0xba00 },
2250 { 0x04, 0xa800 },
2251 { 0x04, 0xa000 },
2252
2253 { 0x03, 0xff41 },
2254 { 0x02, 0xdf20 },
2255 { 0x01, 0x0140 },
2256 { 0x00, 0x00bb },
2257 { 0x04, 0xb800 },
2258 { 0x04, 0xb000 },
2259
2260 { 0x03, 0xdf41 },
2261 { 0x02, 0xdc60 },
2262 { 0x01, 0x6340 },
2263 { 0x00, 0x007d },
2264 { 0x04, 0xd800 },
2265 { 0x04, 0xd000 },
2266
2267 { 0x03, 0xdf01 },
2268 { 0x02, 0xdf20 },
2269 { 0x01, 0x100a },
2270 { 0x00, 0xa0ff },
2271 { 0x04, 0xf800 },
2272 { 0x04, 0xf000 },
2273
2274 { 0x1f, 0x0000 },
2275 { 0x0b, 0x0000 },
2276 { 0x00, 0x9200 }
2277 };
1da177e4 2278
4da19633 2279 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1da177e4
LT
2280}
2281
4da19633 2282static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
5615d9f1 2283{
350f7596 2284 static const struct phy_reg phy_reg_init[] = {
a441d7b6
FR
2285 { 0x1f, 0x0002 },
2286 { 0x01, 0x90d0 },
2287 { 0x1f, 0x0000 }
2288 };
2289
4da19633 2290 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
5615d9f1
FR
2291}
2292
4da19633 2293static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2e955856 2294{
2295 struct pci_dev *pdev = tp->pci_dev;
2e955856 2296
ccbae55e
SS
2297 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2298 (pdev->subsystem_device != 0xe000))
2e955856 2299 return;
2300
4da19633 2301 rtl_writephy(tp, 0x1f, 0x0001);
2302 rtl_writephy(tp, 0x10, 0xf01b);
2303 rtl_writephy(tp, 0x1f, 0x0000);
2e955856 2304}
2305
4da19633 2306static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2e955856 2307{
350f7596 2308 static const struct phy_reg phy_reg_init[] = {
2e955856 2309 { 0x1f, 0x0001 },
2310 { 0x04, 0x0000 },
2311 { 0x03, 0x00a1 },
2312 { 0x02, 0x0008 },
2313 { 0x01, 0x0120 },
2314 { 0x00, 0x1000 },
2315 { 0x04, 0x0800 },
2316 { 0x04, 0x9000 },
2317 { 0x03, 0x802f },
2318 { 0x02, 0x4f02 },
2319 { 0x01, 0x0409 },
2320 { 0x00, 0xf099 },
2321 { 0x04, 0x9800 },
2322 { 0x04, 0xa000 },
2323 { 0x03, 0xdf01 },
2324 { 0x02, 0xdf20 },
2325 { 0x01, 0xff95 },
2326 { 0x00, 0xba00 },
2327 { 0x04, 0xa800 },
2328 { 0x04, 0xf000 },
2329 { 0x03, 0xdf01 },
2330 { 0x02, 0xdf20 },
2331 { 0x01, 0x101a },
2332 { 0x00, 0xa0ff },
2333 { 0x04, 0xf800 },
2334 { 0x04, 0x0000 },
2335 { 0x1f, 0x0000 },
2336
2337 { 0x1f, 0x0001 },
2338 { 0x10, 0xf41b },
2339 { 0x14, 0xfb54 },
2340 { 0x18, 0xf5c7 },
2341 { 0x1f, 0x0000 },
2342
2343 { 0x1f, 0x0001 },
2344 { 0x17, 0x0cc0 },
2345 { 0x1f, 0x0000 }
2346 };
2347
4da19633 2348 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2e955856 2349
4da19633 2350 rtl8169scd_hw_phy_config_quirk(tp);
2e955856 2351}
2352
4da19633 2353static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
8c7006aa 2354{
350f7596 2355 static const struct phy_reg phy_reg_init[] = {
8c7006aa 2356 { 0x1f, 0x0001 },
2357 { 0x04, 0x0000 },
2358 { 0x03, 0x00a1 },
2359 { 0x02, 0x0008 },
2360 { 0x01, 0x0120 },
2361 { 0x00, 0x1000 },
2362 { 0x04, 0x0800 },
2363 { 0x04, 0x9000 },
2364 { 0x03, 0x802f },
2365 { 0x02, 0x4f02 },
2366 { 0x01, 0x0409 },
2367 { 0x00, 0xf099 },
2368 { 0x04, 0x9800 },
2369 { 0x04, 0xa000 },
2370 { 0x03, 0xdf01 },
2371 { 0x02, 0xdf20 },
2372 { 0x01, 0xff95 },
2373 { 0x00, 0xba00 },
2374 { 0x04, 0xa800 },
2375 { 0x04, 0xf000 },
2376 { 0x03, 0xdf01 },
2377 { 0x02, 0xdf20 },
2378 { 0x01, 0x101a },
2379 { 0x00, 0xa0ff },
2380 { 0x04, 0xf800 },
2381 { 0x04, 0x0000 },
2382 { 0x1f, 0x0000 },
2383
2384 { 0x1f, 0x0001 },
2385 { 0x0b, 0x8480 },
2386 { 0x1f, 0x0000 },
2387
2388 { 0x1f, 0x0001 },
2389 { 0x18, 0x67c7 },
2390 { 0x04, 0x2000 },
2391 { 0x03, 0x002f },
2392 { 0x02, 0x4360 },
2393 { 0x01, 0x0109 },
2394 { 0x00, 0x3022 },
2395 { 0x04, 0x2800 },
2396 { 0x1f, 0x0000 },
2397
2398 { 0x1f, 0x0001 },
2399 { 0x17, 0x0cc0 },
2400 { 0x1f, 0x0000 }
2401 };
2402
4da19633 2403 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
8c7006aa 2404}
2405
4da19633 2406static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
236b8082 2407{
350f7596 2408 static const struct phy_reg phy_reg_init[] = {
236b8082
FR
2409 { 0x10, 0xf41b },
2410 { 0x1f, 0x0000 }
2411 };
2412
4da19633 2413 rtl_writephy(tp, 0x1f, 0x0001);
2414 rtl_patchphy(tp, 0x16, 1 << 0);
236b8082 2415
4da19633 2416 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
236b8082
FR
2417}
2418
4da19633 2419static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
236b8082 2420{
350f7596 2421 static const struct phy_reg phy_reg_init[] = {
236b8082
FR
2422 { 0x1f, 0x0001 },
2423 { 0x10, 0xf41b },
2424 { 0x1f, 0x0000 }
2425 };
2426
4da19633 2427 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
236b8082
FR
2428}
2429
4da19633 2430static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
867763c1 2431{
350f7596 2432 static const struct phy_reg phy_reg_init[] = {
867763c1
FR
2433 { 0x1f, 0x0000 },
2434 { 0x1d, 0x0f00 },
2435 { 0x1f, 0x0002 },
2436 { 0x0c, 0x1ec8 },
2437 { 0x1f, 0x0000 }
2438 };
2439
4da19633 2440 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
867763c1
FR
2441}
2442
4da19633 2443static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
ef3386f0 2444{
350f7596 2445 static const struct phy_reg phy_reg_init[] = {
ef3386f0
FR
2446 { 0x1f, 0x0001 },
2447 { 0x1d, 0x3d98 },
2448 { 0x1f, 0x0000 }
2449 };
2450
4da19633 2451 rtl_writephy(tp, 0x1f, 0x0000);
2452 rtl_patchphy(tp, 0x14, 1 << 5);
2453 rtl_patchphy(tp, 0x0d, 1 << 5);
ef3386f0 2454
4da19633 2455 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
ef3386f0
FR
2456}
2457
4da19633 2458static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
867763c1 2459{
350f7596 2460 static const struct phy_reg phy_reg_init[] = {
a3f80671
FR
2461 { 0x1f, 0x0001 },
2462 { 0x12, 0x2300 },
867763c1
FR
2463 { 0x1f, 0x0002 },
2464 { 0x00, 0x88d4 },
2465 { 0x01, 0x82b1 },
2466 { 0x03, 0x7002 },
2467 { 0x08, 0x9e30 },
2468 { 0x09, 0x01f0 },
2469 { 0x0a, 0x5500 },
2470 { 0x0c, 0x00c8 },
2471 { 0x1f, 0x0003 },
2472 { 0x12, 0xc096 },
2473 { 0x16, 0x000a },
f50d4275
FR
2474 { 0x1f, 0x0000 },
2475 { 0x1f, 0x0000 },
2476 { 0x09, 0x2000 },
2477 { 0x09, 0x0000 }
867763c1
FR
2478 };
2479
4da19633 2480 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
f50d4275 2481
4da19633 2482 rtl_patchphy(tp, 0x14, 1 << 5);
2483 rtl_patchphy(tp, 0x0d, 1 << 5);
2484 rtl_writephy(tp, 0x1f, 0x0000);
867763c1
FR
2485}
2486
4da19633 2487static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
7da97ec9 2488{
350f7596 2489 static const struct phy_reg phy_reg_init[] = {
f50d4275 2490 { 0x1f, 0x0001 },
7da97ec9 2491 { 0x12, 0x2300 },
f50d4275
FR
2492 { 0x03, 0x802f },
2493 { 0x02, 0x4f02 },
2494 { 0x01, 0x0409 },
2495 { 0x00, 0xf099 },
2496 { 0x04, 0x9800 },
2497 { 0x04, 0x9000 },
2498 { 0x1d, 0x3d98 },
7da97ec9
FR
2499 { 0x1f, 0x0002 },
2500 { 0x0c, 0x7eb8 },
f50d4275
FR
2501 { 0x06, 0x0761 },
2502 { 0x1f, 0x0003 },
2503 { 0x16, 0x0f0a },
7da97ec9
FR
2504 { 0x1f, 0x0000 }
2505 };
2506
4da19633 2507 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
f50d4275 2508
4da19633 2509 rtl_patchphy(tp, 0x16, 1 << 0);
2510 rtl_patchphy(tp, 0x14, 1 << 5);
2511 rtl_patchphy(tp, 0x0d, 1 << 5);
2512 rtl_writephy(tp, 0x1f, 0x0000);
7da97ec9
FR
2513}
2514
4da19633 2515static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
197ff761 2516{
350f7596 2517 static const struct phy_reg phy_reg_init[] = {
197ff761
FR
2518 { 0x1f, 0x0001 },
2519 { 0x12, 0x2300 },
2520 { 0x1d, 0x3d98 },
2521 { 0x1f, 0x0002 },
2522 { 0x0c, 0x7eb8 },
2523 { 0x06, 0x5461 },
2524 { 0x1f, 0x0003 },
2525 { 0x16, 0x0f0a },
2526 { 0x1f, 0x0000 }
2527 };
2528
4da19633 2529 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
197ff761 2530
4da19633 2531 rtl_patchphy(tp, 0x16, 1 << 0);
2532 rtl_patchphy(tp, 0x14, 1 << 5);
2533 rtl_patchphy(tp, 0x0d, 1 << 5);
2534 rtl_writephy(tp, 0x1f, 0x0000);
197ff761
FR
2535}
2536
4da19633 2537static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
6fb07058 2538{
4da19633 2539 rtl8168c_3_hw_phy_config(tp);
6fb07058
FR
2540}
2541
bca03d5f 2542static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
5b538df9 2543{
350f7596 2544 static const struct phy_reg phy_reg_init_0[] = {
bca03d5f 2545 /* Channel Estimation */
5b538df9 2546 { 0x1f, 0x0001 },
daf9df6d 2547 { 0x06, 0x4064 },
2548 { 0x07, 0x2863 },
2549 { 0x08, 0x059c },
2550 { 0x09, 0x26b4 },
2551 { 0x0a, 0x6a19 },
2552 { 0x0b, 0xdcc8 },
2553 { 0x10, 0xf06d },
2554 { 0x14, 0x7f68 },
2555 { 0x18, 0x7fd9 },
2556 { 0x1c, 0xf0ff },
2557 { 0x1d, 0x3d9c },
5b538df9 2558 { 0x1f, 0x0003 },
daf9df6d 2559 { 0x12, 0xf49f },
2560 { 0x13, 0x070b },
2561 { 0x1a, 0x05ad },
bca03d5f 2562 { 0x14, 0x94c0 },
2563
2564 /*
2565 * Tx Error Issue
cecb5fd7 2566 * Enhance line driver power
bca03d5f 2567 */
5b538df9 2568 { 0x1f, 0x0002 },
daf9df6d 2569 { 0x06, 0x5561 },
2570 { 0x1f, 0x0005 },
2571 { 0x05, 0x8332 },
bca03d5f 2572 { 0x06, 0x5561 },
2573
2574 /*
2575 * Can not link to 1Gbps with bad cable
2576 * Decrease SNR threshold form 21.07dB to 19.04dB
2577 */
2578 { 0x1f, 0x0001 },
2579 { 0x17, 0x0cc0 },
daf9df6d 2580
5b538df9 2581 { 0x1f, 0x0000 },
bca03d5f 2582 { 0x0d, 0xf880 }
daf9df6d 2583 };
bca03d5f 2584 void __iomem *ioaddr = tp->mmio_addr;
daf9df6d 2585
4da19633 2586 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
daf9df6d 2587
bca03d5f 2588 /*
2589 * Rx Error Issue
2590 * Fine Tune Switching regulator parameter
2591 */
4da19633 2592 rtl_writephy(tp, 0x1f, 0x0002);
2593 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2594 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
daf9df6d 2595
daf9df6d 2596 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
350f7596 2597 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2598 { 0x1f, 0x0002 },
2599 { 0x05, 0x669a },
2600 { 0x1f, 0x0005 },
2601 { 0x05, 0x8330 },
2602 { 0x06, 0x669a },
2603 { 0x1f, 0x0002 }
2604 };
2605 int val;
2606
4da19633 2607 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
daf9df6d 2608
4da19633 2609 val = rtl_readphy(tp, 0x0d);
daf9df6d 2610
2611 if ((val & 0x00ff) != 0x006c) {
350f7596 2612 static const u32 set[] = {
daf9df6d 2613 0x0065, 0x0066, 0x0067, 0x0068,
2614 0x0069, 0x006a, 0x006b, 0x006c
2615 };
2616 int i;
2617
4da19633 2618 rtl_writephy(tp, 0x1f, 0x0002);
daf9df6d 2619
2620 val &= 0xff00;
2621 for (i = 0; i < ARRAY_SIZE(set); i++)
4da19633 2622 rtl_writephy(tp, 0x0d, val | set[i]);
daf9df6d 2623 }
2624 } else {
350f7596 2625 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2626 { 0x1f, 0x0002 },
2627 { 0x05, 0x6662 },
2628 { 0x1f, 0x0005 },
2629 { 0x05, 0x8330 },
2630 { 0x06, 0x6662 }
2631 };
2632
4da19633 2633 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
daf9df6d 2634 }
2635
bca03d5f 2636 /* RSET couple improve */
4da19633 2637 rtl_writephy(tp, 0x1f, 0x0002);
2638 rtl_patchphy(tp, 0x0d, 0x0300);
2639 rtl_patchphy(tp, 0x0f, 0x0010);
daf9df6d 2640
bca03d5f 2641 /* Fine tune PLL performance */
4da19633 2642 rtl_writephy(tp, 0x1f, 0x0002);
2643 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2644 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
daf9df6d 2645
4da19633 2646 rtl_writephy(tp, 0x1f, 0x0005);
2647 rtl_writephy(tp, 0x05, 0x001b);
953a12cc
FR
2648
2649 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
bca03d5f 2650
4da19633 2651 rtl_writephy(tp, 0x1f, 0x0000);
daf9df6d 2652}
2653
bca03d5f 2654static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
daf9df6d 2655{
350f7596 2656 static const struct phy_reg phy_reg_init_0[] = {
bca03d5f 2657 /* Channel Estimation */
daf9df6d 2658 { 0x1f, 0x0001 },
2659 { 0x06, 0x4064 },
2660 { 0x07, 0x2863 },
2661 { 0x08, 0x059c },
2662 { 0x09, 0x26b4 },
2663 { 0x0a, 0x6a19 },
2664 { 0x0b, 0xdcc8 },
2665 { 0x10, 0xf06d },
2666 { 0x14, 0x7f68 },
2667 { 0x18, 0x7fd9 },
2668 { 0x1c, 0xf0ff },
2669 { 0x1d, 0x3d9c },
2670 { 0x1f, 0x0003 },
2671 { 0x12, 0xf49f },
2672 { 0x13, 0x070b },
2673 { 0x1a, 0x05ad },
2674 { 0x14, 0x94c0 },
2675
bca03d5f 2676 /*
2677 * Tx Error Issue
cecb5fd7 2678 * Enhance line driver power
bca03d5f 2679 */
daf9df6d 2680 { 0x1f, 0x0002 },
2681 { 0x06, 0x5561 },
2682 { 0x1f, 0x0005 },
2683 { 0x05, 0x8332 },
bca03d5f 2684 { 0x06, 0x5561 },
2685
2686 /*
2687 * Can not link to 1Gbps with bad cable
2688 * Decrease SNR threshold form 21.07dB to 19.04dB
2689 */
2690 { 0x1f, 0x0001 },
2691 { 0x17, 0x0cc0 },
daf9df6d 2692
2693 { 0x1f, 0x0000 },
bca03d5f 2694 { 0x0d, 0xf880 }
5b538df9 2695 };
bca03d5f 2696 void __iomem *ioaddr = tp->mmio_addr;
5b538df9 2697
4da19633 2698 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
5b538df9 2699
daf9df6d 2700 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
350f7596 2701 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2702 { 0x1f, 0x0002 },
2703 { 0x05, 0x669a },
5b538df9 2704 { 0x1f, 0x0005 },
daf9df6d 2705 { 0x05, 0x8330 },
2706 { 0x06, 0x669a },
2707
2708 { 0x1f, 0x0002 }
2709 };
2710 int val;
2711
4da19633 2712 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
daf9df6d 2713
4da19633 2714 val = rtl_readphy(tp, 0x0d);
daf9df6d 2715 if ((val & 0x00ff) != 0x006c) {
b6bc7650 2716 static const u32 set[] = {
daf9df6d 2717 0x0065, 0x0066, 0x0067, 0x0068,
2718 0x0069, 0x006a, 0x006b, 0x006c
2719 };
2720 int i;
2721
4da19633 2722 rtl_writephy(tp, 0x1f, 0x0002);
daf9df6d 2723
2724 val &= 0xff00;
2725 for (i = 0; i < ARRAY_SIZE(set); i++)
4da19633 2726 rtl_writephy(tp, 0x0d, val | set[i]);
daf9df6d 2727 }
2728 } else {
350f7596 2729 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2730 { 0x1f, 0x0002 },
2731 { 0x05, 0x2642 },
5b538df9 2732 { 0x1f, 0x0005 },
daf9df6d 2733 { 0x05, 0x8330 },
2734 { 0x06, 0x2642 }
5b538df9
FR
2735 };
2736
4da19633 2737 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
5b538df9
FR
2738 }
2739
bca03d5f 2740 /* Fine tune PLL performance */
4da19633 2741 rtl_writephy(tp, 0x1f, 0x0002);
2742 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2743 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
daf9df6d 2744
bca03d5f 2745 /* Switching regulator Slew rate */
4da19633 2746 rtl_writephy(tp, 0x1f, 0x0002);
2747 rtl_patchphy(tp, 0x0f, 0x0017);
daf9df6d 2748
4da19633 2749 rtl_writephy(tp, 0x1f, 0x0005);
2750 rtl_writephy(tp, 0x05, 0x001b);
953a12cc
FR
2751
2752 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
bca03d5f 2753
4da19633 2754 rtl_writephy(tp, 0x1f, 0x0000);
daf9df6d 2755}
2756
4da19633 2757static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
daf9df6d 2758{
350f7596 2759 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2760 { 0x1f, 0x0002 },
2761 { 0x10, 0x0008 },
2762 { 0x0d, 0x006c },
2763
2764 { 0x1f, 0x0000 },
2765 { 0x0d, 0xf880 },
2766
2767 { 0x1f, 0x0001 },
2768 { 0x17, 0x0cc0 },
2769
2770 { 0x1f, 0x0001 },
2771 { 0x0b, 0xa4d8 },
2772 { 0x09, 0x281c },
2773 { 0x07, 0x2883 },
2774 { 0x0a, 0x6b35 },
2775 { 0x1d, 0x3da4 },
2776 { 0x1c, 0xeffd },
2777 { 0x14, 0x7f52 },
2778 { 0x18, 0x7fc6 },
2779 { 0x08, 0x0601 },
2780 { 0x06, 0x4063 },
2781 { 0x10, 0xf074 },
2782 { 0x1f, 0x0003 },
2783 { 0x13, 0x0789 },
2784 { 0x12, 0xf4bd },
2785 { 0x1a, 0x04fd },
2786 { 0x14, 0x84b0 },
2787 { 0x1f, 0x0000 },
2788 { 0x00, 0x9200 },
2789
2790 { 0x1f, 0x0005 },
2791 { 0x01, 0x0340 },
2792 { 0x1f, 0x0001 },
2793 { 0x04, 0x4000 },
2794 { 0x03, 0x1d21 },
2795 { 0x02, 0x0c32 },
2796 { 0x01, 0x0200 },
2797 { 0x00, 0x5554 },
2798 { 0x04, 0x4800 },
2799 { 0x04, 0x4000 },
2800 { 0x04, 0xf000 },
2801 { 0x03, 0xdf01 },
2802 { 0x02, 0xdf20 },
2803 { 0x01, 0x101a },
2804 { 0x00, 0xa0ff },
2805 { 0x04, 0xf800 },
2806 { 0x04, 0xf000 },
2807 { 0x1f, 0x0000 },
2808
2809 { 0x1f, 0x0007 },
2810 { 0x1e, 0x0023 },
2811 { 0x16, 0x0000 },
2812 { 0x1f, 0x0000 }
2813 };
2814
4da19633 2815 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
5b538df9
FR
2816}
2817
e6de30d6 2818static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2819{
2820 static const struct phy_reg phy_reg_init[] = {
2821 { 0x1f, 0x0001 },
2822 { 0x17, 0x0cc0 },
2823
2824 { 0x1f, 0x0007 },
2825 { 0x1e, 0x002d },
2826 { 0x18, 0x0040 },
2827 { 0x1f, 0x0000 }
2828 };
2829
2830 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2831 rtl_patchphy(tp, 0x0d, 1 << 5);
2832}
2833
70090424 2834static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
01dc7fec 2835{
2836 static const struct phy_reg phy_reg_init[] = {
2837 /* Enable Delay cap */
2838 { 0x1f, 0x0005 },
2839 { 0x05, 0x8b80 },
2840 { 0x06, 0xc896 },
2841 { 0x1f, 0x0000 },
2842
2843 /* Channel estimation fine tune */
2844 { 0x1f, 0x0001 },
2845 { 0x0b, 0x6c20 },
2846 { 0x07, 0x2872 },
2847 { 0x1c, 0xefff },
2848 { 0x1f, 0x0003 },
2849 { 0x14, 0x6420 },
2850 { 0x1f, 0x0000 },
2851
2852 /* Update PFM & 10M TX idle timer */
2853 { 0x1f, 0x0007 },
2854 { 0x1e, 0x002f },
2855 { 0x15, 0x1919 },
2856 { 0x1f, 0x0000 },
2857
2858 { 0x1f, 0x0007 },
2859 { 0x1e, 0x00ac },
2860 { 0x18, 0x0006 },
2861 { 0x1f, 0x0000 }
2862 };
2863
15ecd039
FR
2864 rtl_apply_firmware(tp);
2865
01dc7fec 2866 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2867
2868 /* DCO enable for 10M IDLE Power */
2869 rtl_writephy(tp, 0x1f, 0x0007);
2870 rtl_writephy(tp, 0x1e, 0x0023);
2871 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2872 rtl_writephy(tp, 0x1f, 0x0000);
2873
2874 /* For impedance matching */
2875 rtl_writephy(tp, 0x1f, 0x0002);
2876 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
cecb5fd7 2877 rtl_writephy(tp, 0x1f, 0x0000);
01dc7fec 2878
2879 /* PHY auto speed down */
2880 rtl_writephy(tp, 0x1f, 0x0007);
2881 rtl_writephy(tp, 0x1e, 0x002d);
2882 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2883 rtl_writephy(tp, 0x1f, 0x0000);
2884 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2885
2886 rtl_writephy(tp, 0x1f, 0x0005);
2887 rtl_writephy(tp, 0x05, 0x8b86);
2888 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2889 rtl_writephy(tp, 0x1f, 0x0000);
2890
2891 rtl_writephy(tp, 0x1f, 0x0005);
2892 rtl_writephy(tp, 0x05, 0x8b85);
2893 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2894 rtl_writephy(tp, 0x1f, 0x0007);
2895 rtl_writephy(tp, 0x1e, 0x0020);
2896 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2897 rtl_writephy(tp, 0x1f, 0x0006);
2898 rtl_writephy(tp, 0x00, 0x5a00);
2899 rtl_writephy(tp, 0x1f, 0x0000);
2900 rtl_writephy(tp, 0x0d, 0x0007);
2901 rtl_writephy(tp, 0x0e, 0x003c);
2902 rtl_writephy(tp, 0x0d, 0x4007);
2903 rtl_writephy(tp, 0x0e, 0x0000);
2904 rtl_writephy(tp, 0x0d, 0x0000);
2905}
2906
70090424
HW
2907static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2908{
2909 static const struct phy_reg phy_reg_init[] = {
2910 /* Enable Delay cap */
2911 { 0x1f, 0x0004 },
2912 { 0x1f, 0x0007 },
2913 { 0x1e, 0x00ac },
2914 { 0x18, 0x0006 },
2915 { 0x1f, 0x0002 },
2916 { 0x1f, 0x0000 },
2917 { 0x1f, 0x0000 },
2918
2919 /* Channel estimation fine tune */
2920 { 0x1f, 0x0003 },
2921 { 0x09, 0xa20f },
2922 { 0x1f, 0x0000 },
2923 { 0x1f, 0x0000 },
2924
2925 /* Green Setting */
2926 { 0x1f, 0x0005 },
2927 { 0x05, 0x8b5b },
2928 { 0x06, 0x9222 },
2929 { 0x05, 0x8b6d },
2930 { 0x06, 0x8000 },
2931 { 0x05, 0x8b76 },
2932 { 0x06, 0x8000 },
2933 { 0x1f, 0x0000 }
2934 };
2935
2936 rtl_apply_firmware(tp);
2937
2938 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2939
2940 /* For 4-corner performance improve */
2941 rtl_writephy(tp, 0x1f, 0x0005);
2942 rtl_writephy(tp, 0x05, 0x8b80);
2943 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2944 rtl_writephy(tp, 0x1f, 0x0000);
2945
2946 /* PHY auto speed down */
2947 rtl_writephy(tp, 0x1f, 0x0004);
2948 rtl_writephy(tp, 0x1f, 0x0007);
2949 rtl_writephy(tp, 0x1e, 0x002d);
2950 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
2951 rtl_writephy(tp, 0x1f, 0x0002);
2952 rtl_writephy(tp, 0x1f, 0x0000);
2953 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2954
2955 /* improve 10M EEE waveform */
2956 rtl_writephy(tp, 0x1f, 0x0005);
2957 rtl_writephy(tp, 0x05, 0x8b86);
2958 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2959 rtl_writephy(tp, 0x1f, 0x0000);
2960
2961 /* Improve 2-pair detection performance */
2962 rtl_writephy(tp, 0x1f, 0x0005);
2963 rtl_writephy(tp, 0x05, 0x8b85);
2964 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
2965 rtl_writephy(tp, 0x1f, 0x0000);
2966
2967 /* EEE setting */
2968 rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
2969 ERIAR_EXGMAC);
2970 rtl_writephy(tp, 0x1f, 0x0005);
2971 rtl_writephy(tp, 0x05, 0x8b85);
2972 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2973 rtl_writephy(tp, 0x1f, 0x0004);
2974 rtl_writephy(tp, 0x1f, 0x0007);
2975 rtl_writephy(tp, 0x1e, 0x0020);
1b23a3e3 2976 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
70090424
HW
2977 rtl_writephy(tp, 0x1f, 0x0002);
2978 rtl_writephy(tp, 0x1f, 0x0000);
2979 rtl_writephy(tp, 0x0d, 0x0007);
2980 rtl_writephy(tp, 0x0e, 0x003c);
2981 rtl_writephy(tp, 0x0d, 0x4007);
2982 rtl_writephy(tp, 0x0e, 0x0000);
2983 rtl_writephy(tp, 0x0d, 0x0000);
2984
2985 /* Green feature */
2986 rtl_writephy(tp, 0x1f, 0x0003);
2987 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
2988 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
2989 rtl_writephy(tp, 0x1f, 0x0000);
2990}
2991
c2218925
HW
2992static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
2993{
2994 static const struct phy_reg phy_reg_init[] = {
2995 /* Channel estimation fine tune */
2996 { 0x1f, 0x0003 },
2997 { 0x09, 0xa20f },
2998 { 0x1f, 0x0000 },
2999
3000 /* Modify green table for giga & fnet */
3001 { 0x1f, 0x0005 },
3002 { 0x05, 0x8b55 },
3003 { 0x06, 0x0000 },
3004 { 0x05, 0x8b5e },
3005 { 0x06, 0x0000 },
3006 { 0x05, 0x8b67 },
3007 { 0x06, 0x0000 },
3008 { 0x05, 0x8b70 },
3009 { 0x06, 0x0000 },
3010 { 0x1f, 0x0000 },
3011 { 0x1f, 0x0007 },
3012 { 0x1e, 0x0078 },
3013 { 0x17, 0x0000 },
3014 { 0x19, 0x00fb },
3015 { 0x1f, 0x0000 },
3016
3017 /* Modify green table for 10M */
3018 { 0x1f, 0x0005 },
3019 { 0x05, 0x8b79 },
3020 { 0x06, 0xaa00 },
3021 { 0x1f, 0x0000 },
3022
3023 /* Disable hiimpedance detection (RTCT) */
3024 { 0x1f, 0x0003 },
3025 { 0x01, 0x328a },
3026 { 0x1f, 0x0000 }
3027 };
3028
3029 rtl_apply_firmware(tp);
3030
3031 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3032
3033 /* For 4-corner performance improve */
3034 rtl_writephy(tp, 0x1f, 0x0005);
3035 rtl_writephy(tp, 0x05, 0x8b80);
3036 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3037 rtl_writephy(tp, 0x1f, 0x0000);
3038
3039 /* PHY auto speed down */
3040 rtl_writephy(tp, 0x1f, 0x0007);
3041 rtl_writephy(tp, 0x1e, 0x002d);
3042 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3043 rtl_writephy(tp, 0x1f, 0x0000);
3044 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3045
3046 /* Improve 10M EEE waveform */
3047 rtl_writephy(tp, 0x1f, 0x0005);
3048 rtl_writephy(tp, 0x05, 0x8b86);
3049 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3050 rtl_writephy(tp, 0x1f, 0x0000);
3051
3052 /* Improve 2-pair detection performance */
3053 rtl_writephy(tp, 0x1f, 0x0005);
3054 rtl_writephy(tp, 0x05, 0x8b85);
3055 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3056 rtl_writephy(tp, 0x1f, 0x0000);
3057}
3058
3059static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3060{
3061 rtl_apply_firmware(tp);
3062
3063 /* For 4-corner performance improve */
3064 rtl_writephy(tp, 0x1f, 0x0005);
3065 rtl_writephy(tp, 0x05, 0x8b80);
3066 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3067 rtl_writephy(tp, 0x1f, 0x0000);
3068
3069 /* PHY auto speed down */
3070 rtl_writephy(tp, 0x1f, 0x0007);
3071 rtl_writephy(tp, 0x1e, 0x002d);
3072 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3073 rtl_writephy(tp, 0x1f, 0x0000);
3074 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3075
3076 /* Improve 10M EEE waveform */
3077 rtl_writephy(tp, 0x1f, 0x0005);
3078 rtl_writephy(tp, 0x05, 0x8b86);
3079 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3080 rtl_writephy(tp, 0x1f, 0x0000);
3081}
3082
4da19633 3083static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2857ffb7 3084{
350f7596 3085 static const struct phy_reg phy_reg_init[] = {
2857ffb7
FR
3086 { 0x1f, 0x0003 },
3087 { 0x08, 0x441d },
3088 { 0x01, 0x9100 },
3089 { 0x1f, 0x0000 }
3090 };
3091
4da19633 3092 rtl_writephy(tp, 0x1f, 0x0000);
3093 rtl_patchphy(tp, 0x11, 1 << 12);
3094 rtl_patchphy(tp, 0x19, 1 << 13);
3095 rtl_patchphy(tp, 0x10, 1 << 15);
2857ffb7 3096
4da19633 3097 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2857ffb7
FR
3098}
3099
5a5e4443
HW
3100static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3101{
3102 static const struct phy_reg phy_reg_init[] = {
3103 { 0x1f, 0x0005 },
3104 { 0x1a, 0x0000 },
3105 { 0x1f, 0x0000 },
3106
3107 { 0x1f, 0x0004 },
3108 { 0x1c, 0x0000 },
3109 { 0x1f, 0x0000 },
3110
3111 { 0x1f, 0x0001 },
3112 { 0x15, 0x7701 },
3113 { 0x1f, 0x0000 }
3114 };
3115
3116 /* Disable ALDPS before ram code */
3117 rtl_writephy(tp, 0x1f, 0x0000);
3118 rtl_writephy(tp, 0x18, 0x0310);
3119 msleep(100);
3120
953a12cc 3121 rtl_apply_firmware(tp);
5a5e4443
HW
3122
3123 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3124}
3125
5615d9f1
FR
3126static void rtl_hw_phy_config(struct net_device *dev)
3127{
3128 struct rtl8169_private *tp = netdev_priv(dev);
5615d9f1
FR
3129
3130 rtl8169_print_mac_version(tp);
3131
3132 switch (tp->mac_version) {
3133 case RTL_GIGA_MAC_VER_01:
3134 break;
3135 case RTL_GIGA_MAC_VER_02:
3136 case RTL_GIGA_MAC_VER_03:
4da19633 3137 rtl8169s_hw_phy_config(tp);
5615d9f1
FR
3138 break;
3139 case RTL_GIGA_MAC_VER_04:
4da19633 3140 rtl8169sb_hw_phy_config(tp);
5615d9f1 3141 break;
2e955856 3142 case RTL_GIGA_MAC_VER_05:
4da19633 3143 rtl8169scd_hw_phy_config(tp);
2e955856 3144 break;
8c7006aa 3145 case RTL_GIGA_MAC_VER_06:
4da19633 3146 rtl8169sce_hw_phy_config(tp);
8c7006aa 3147 break;
2857ffb7
FR
3148 case RTL_GIGA_MAC_VER_07:
3149 case RTL_GIGA_MAC_VER_08:
3150 case RTL_GIGA_MAC_VER_09:
4da19633 3151 rtl8102e_hw_phy_config(tp);
2857ffb7 3152 break;
236b8082 3153 case RTL_GIGA_MAC_VER_11:
4da19633 3154 rtl8168bb_hw_phy_config(tp);
236b8082
FR
3155 break;
3156 case RTL_GIGA_MAC_VER_12:
4da19633 3157 rtl8168bef_hw_phy_config(tp);
236b8082
FR
3158 break;
3159 case RTL_GIGA_MAC_VER_17:
4da19633 3160 rtl8168bef_hw_phy_config(tp);
236b8082 3161 break;
867763c1 3162 case RTL_GIGA_MAC_VER_18:
4da19633 3163 rtl8168cp_1_hw_phy_config(tp);
867763c1
FR
3164 break;
3165 case RTL_GIGA_MAC_VER_19:
4da19633 3166 rtl8168c_1_hw_phy_config(tp);
867763c1 3167 break;
7da97ec9 3168 case RTL_GIGA_MAC_VER_20:
4da19633 3169 rtl8168c_2_hw_phy_config(tp);
7da97ec9 3170 break;
197ff761 3171 case RTL_GIGA_MAC_VER_21:
4da19633 3172 rtl8168c_3_hw_phy_config(tp);
197ff761 3173 break;
6fb07058 3174 case RTL_GIGA_MAC_VER_22:
4da19633 3175 rtl8168c_4_hw_phy_config(tp);
6fb07058 3176 break;
ef3386f0 3177 case RTL_GIGA_MAC_VER_23:
7f3e3d3a 3178 case RTL_GIGA_MAC_VER_24:
4da19633 3179 rtl8168cp_2_hw_phy_config(tp);
ef3386f0 3180 break;
5b538df9 3181 case RTL_GIGA_MAC_VER_25:
bca03d5f 3182 rtl8168d_1_hw_phy_config(tp);
daf9df6d 3183 break;
3184 case RTL_GIGA_MAC_VER_26:
bca03d5f 3185 rtl8168d_2_hw_phy_config(tp);
daf9df6d 3186 break;
3187 case RTL_GIGA_MAC_VER_27:
4da19633 3188 rtl8168d_3_hw_phy_config(tp);
5b538df9 3189 break;
e6de30d6 3190 case RTL_GIGA_MAC_VER_28:
3191 rtl8168d_4_hw_phy_config(tp);
3192 break;
5a5e4443
HW
3193 case RTL_GIGA_MAC_VER_29:
3194 case RTL_GIGA_MAC_VER_30:
3195 rtl8105e_hw_phy_config(tp);
3196 break;
cecb5fd7
FR
3197 case RTL_GIGA_MAC_VER_31:
3198 /* None. */
3199 break;
01dc7fec 3200 case RTL_GIGA_MAC_VER_32:
01dc7fec 3201 case RTL_GIGA_MAC_VER_33:
70090424
HW
3202 rtl8168e_1_hw_phy_config(tp);
3203 break;
3204 case RTL_GIGA_MAC_VER_34:
3205 rtl8168e_2_hw_phy_config(tp);
01dc7fec 3206 break;
c2218925
HW
3207 case RTL_GIGA_MAC_VER_35:
3208 rtl8168f_1_hw_phy_config(tp);
3209 break;
3210 case RTL_GIGA_MAC_VER_36:
3211 rtl8168f_2_hw_phy_config(tp);
3212 break;
ef3386f0 3213
5615d9f1
FR
3214 default:
3215 break;
3216 }
3217}
3218
1da177e4
LT
3219static void rtl8169_phy_timer(unsigned long __opaque)
3220{
3221 struct net_device *dev = (struct net_device *)__opaque;
3222 struct rtl8169_private *tp = netdev_priv(dev);
3223 struct timer_list *timer = &tp->timer;
3224 void __iomem *ioaddr = tp->mmio_addr;
3225 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3226
bcf0bf90 3227 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4 3228
1da177e4
LT
3229 spin_lock_irq(&tp->lock);
3230
4da19633 3231 if (tp->phy_reset_pending(tp)) {
5b0384f4 3232 /*
1da177e4
LT
3233 * A busy loop could burn quite a few cycles on nowadays CPU.
3234 * Let's delay the execution of the timer for a few ticks.
3235 */
3236 timeout = HZ/10;
3237 goto out_mod_timer;
3238 }
3239
3240 if (tp->link_ok(ioaddr))
3241 goto out_unlock;
3242
bf82c189 3243 netif_warn(tp, link, dev, "PHY reset until link up\n");
1da177e4 3244
4da19633 3245 tp->phy_reset_enable(tp);
1da177e4
LT
3246
3247out_mod_timer:
3248 mod_timer(timer, jiffies + timeout);
3249out_unlock:
3250 spin_unlock_irq(&tp->lock);
3251}
3252
1da177e4
LT
3253#ifdef CONFIG_NET_POLL_CONTROLLER
3254/*
3255 * Polling 'interrupt' - used by things like netconsole to send skbs
3256 * without having to re-enable interrupts. It's not called while
3257 * the interrupt routine is executing.
3258 */
3259static void rtl8169_netpoll(struct net_device *dev)
3260{
3261 struct rtl8169_private *tp = netdev_priv(dev);
3262 struct pci_dev *pdev = tp->pci_dev;
3263
3264 disable_irq(pdev->irq);
7d12e780 3265 rtl8169_interrupt(pdev->irq, dev);
1da177e4
LT
3266 enable_irq(pdev->irq);
3267}
3268#endif
3269
3270static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3271 void __iomem *ioaddr)
3272{
3273 iounmap(ioaddr);
3274 pci_release_regions(pdev);
87aeec76 3275 pci_clear_mwi(pdev);
1da177e4
LT
3276 pci_disable_device(pdev);
3277 free_netdev(dev);
3278}
3279
bf793295
FR
3280static void rtl8169_phy_reset(struct net_device *dev,
3281 struct rtl8169_private *tp)
3282{
07d3f51f 3283 unsigned int i;
bf793295 3284
4da19633 3285 tp->phy_reset_enable(tp);
bf793295 3286 for (i = 0; i < 100; i++) {
4da19633 3287 if (!tp->phy_reset_pending(tp))
bf793295
FR
3288 return;
3289 msleep(1);
3290 }
bf82c189 3291 netif_err(tp, link, dev, "PHY reset failed\n");
bf793295
FR
3292}
3293
2544bfc0
FR
3294static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3295{
3296 void __iomem *ioaddr = tp->mmio_addr;
3297
3298 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3299 (RTL_R8(PHYstatus) & TBI_Enable);
3300}
3301
4ff96fa6
FR
3302static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3303{
3304 void __iomem *ioaddr = tp->mmio_addr;
4ff96fa6 3305
5615d9f1 3306 rtl_hw_phy_config(dev);
4ff96fa6 3307
77332894
MS
3308 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3309 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3310 RTL_W8(0x82, 0x01);
3311 }
4ff96fa6 3312
6dccd16b
FR
3313 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3314
3315 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3316 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
4ff96fa6 3317
bcf0bf90 3318 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
3319 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3320 RTL_W8(0x82, 0x01);
3321 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
4da19633 3322 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
4ff96fa6
FR
3323 }
3324
bf793295
FR
3325 rtl8169_phy_reset(dev, tp);
3326
54405cde 3327 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
cecb5fd7
FR
3328 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3329 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3330 (tp->mii.supports_gmii ?
3331 ADVERTISED_1000baseT_Half |
3332 ADVERTISED_1000baseT_Full : 0));
4ff96fa6 3333
2544bfc0 3334 if (rtl_tbi_enabled(tp))
bf82c189 3335 netif_info(tp, link, dev, "TBI auto-negotiating\n");
4ff96fa6
FR
3336}
3337
773d2021
FR
3338static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3339{
3340 void __iomem *ioaddr = tp->mmio_addr;
3341 u32 high;
3342 u32 low;
3343
3344 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3345 high = addr[4] | (addr[5] << 8);
3346
3347 spin_lock_irq(&tp->lock);
3348
3349 RTL_W8(Cfg9346, Cfg9346_Unlock);
908ba2bf 3350
773d2021 3351 RTL_W32(MAC4, high);
908ba2bf 3352 RTL_R32(MAC4);
3353
78f1cd02 3354 RTL_W32(MAC0, low);
908ba2bf 3355 RTL_R32(MAC0);
3356
c28aa385 3357 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3358 const struct exgmac_reg e[] = {
3359 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3360 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3361 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3362 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3363 low >> 16 },
3364 };
3365
3366 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
3367 }
3368
773d2021
FR
3369 RTL_W8(Cfg9346, Cfg9346_Lock);
3370
3371 spin_unlock_irq(&tp->lock);
3372}
3373
3374static int rtl_set_mac_address(struct net_device *dev, void *p)
3375{
3376 struct rtl8169_private *tp = netdev_priv(dev);
3377 struct sockaddr *addr = p;
3378
3379 if (!is_valid_ether_addr(addr->sa_data))
3380 return -EADDRNOTAVAIL;
3381
3382 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3383
3384 rtl_rar_set(tp, dev->dev_addr);
3385
3386 return 0;
3387}
3388
5f787a1a
FR
3389static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3390{
3391 struct rtl8169_private *tp = netdev_priv(dev);
3392 struct mii_ioctl_data *data = if_mii(ifr);
3393
8b4ab28d
FR
3394 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3395}
5f787a1a 3396
cecb5fd7
FR
3397static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3398 struct mii_ioctl_data *data, int cmd)
8b4ab28d 3399{
5f787a1a
FR
3400 switch (cmd) {
3401 case SIOCGMIIPHY:
3402 data->phy_id = 32; /* Internal PHY */
3403 return 0;
3404
3405 case SIOCGMIIREG:
4da19633 3406 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
5f787a1a
FR
3407 return 0;
3408
3409 case SIOCSMIIREG:
4da19633 3410 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
5f787a1a
FR
3411 return 0;
3412 }
3413 return -EOPNOTSUPP;
3414}
3415
8b4ab28d
FR
3416static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3417{
3418 return -EOPNOTSUPP;
3419}
3420
0e485150
FR
3421static const struct rtl_cfg_info {
3422 void (*hw_start)(struct net_device *);
3423 unsigned int region;
3424 unsigned int align;
3425 u16 intr_event;
3426 u16 napi_event;
ccdffb9a 3427 unsigned features;
f21b75e9 3428 u8 default_ver;
0e485150
FR
3429} rtl_cfg_infos [] = {
3430 [RTL_CFG_0] = {
3431 .hw_start = rtl_hw_start_8169,
3432 .region = 1,
e9f63f30 3433 .align = 0,
0e485150
FR
3434 .intr_event = SYSErr | LinkChg | RxOverflow |
3435 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
fbac58fc 3436 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
f21b75e9
JD
3437 .features = RTL_FEATURE_GMII,
3438 .default_ver = RTL_GIGA_MAC_VER_01,
0e485150
FR
3439 },
3440 [RTL_CFG_1] = {
3441 .hw_start = rtl_hw_start_8168,
3442 .region = 2,
3443 .align = 8,
53f57357 3444 .intr_event = SYSErr | LinkChg | RxOverflow |
0e485150 3445 TxErr | TxOK | RxOK | RxErr,
fbac58fc 3446 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
f21b75e9
JD
3447 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
3448 .default_ver = RTL_GIGA_MAC_VER_11,
0e485150
FR
3449 },
3450 [RTL_CFG_2] = {
3451 .hw_start = rtl_hw_start_8101,
3452 .region = 2,
3453 .align = 8,
3454 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
3455 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
fbac58fc 3456 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
f21b75e9
JD
3457 .features = RTL_FEATURE_MSI,
3458 .default_ver = RTL_GIGA_MAC_VER_13,
0e485150
FR
3459 }
3460};
3461
fbac58fc 3462/* Cfg9346_Unlock assumed. */
2ca6cf06 3463static unsigned rtl_try_msi(struct rtl8169_private *tp,
fbac58fc
FR
3464 const struct rtl_cfg_info *cfg)
3465{
2ca6cf06 3466 void __iomem *ioaddr = tp->mmio_addr;
fbac58fc
FR
3467 unsigned msi = 0;
3468 u8 cfg2;
3469
3470 cfg2 = RTL_R8(Config2) & ~MSIEnable;
ccdffb9a 3471 if (cfg->features & RTL_FEATURE_MSI) {
2ca6cf06 3472 if (pci_enable_msi(tp->pci_dev)) {
3473 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
fbac58fc
FR
3474 } else {
3475 cfg2 |= MSIEnable;
3476 msi = RTL_FEATURE_MSI;
3477 }
3478 }
2ca6cf06 3479 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3480 RTL_W8(Config2, cfg2);
fbac58fc
FR
3481 return msi;
3482}
3483
3484static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3485{
3486 if (tp->features & RTL_FEATURE_MSI) {
3487 pci_disable_msi(pdev);
3488 tp->features &= ~RTL_FEATURE_MSI;
3489 }
3490}
3491
8b4ab28d
FR
3492static const struct net_device_ops rtl8169_netdev_ops = {
3493 .ndo_open = rtl8169_open,
3494 .ndo_stop = rtl8169_close,
3495 .ndo_get_stats = rtl8169_get_stats,
00829823 3496 .ndo_start_xmit = rtl8169_start_xmit,
8b4ab28d
FR
3497 .ndo_tx_timeout = rtl8169_tx_timeout,
3498 .ndo_validate_addr = eth_validate_addr,
3499 .ndo_change_mtu = rtl8169_change_mtu,
350fb32a
MM
3500 .ndo_fix_features = rtl8169_fix_features,
3501 .ndo_set_features = rtl8169_set_features,
8b4ab28d
FR
3502 .ndo_set_mac_address = rtl_set_mac_address,
3503 .ndo_do_ioctl = rtl8169_ioctl,
afc4b13d 3504 .ndo_set_rx_mode = rtl_set_rx_mode,
8b4ab28d
FR
3505#ifdef CONFIG_NET_POLL_CONTROLLER
3506 .ndo_poll_controller = rtl8169_netpoll,
3507#endif
3508
3509};
3510
c0e45c1c 3511static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3512{
3513 struct mdio_ops *ops = &tp->mdio_ops;
3514
3515 switch (tp->mac_version) {
3516 case RTL_GIGA_MAC_VER_27:
3517 ops->write = r8168dp_1_mdio_write;
3518 ops->read = r8168dp_1_mdio_read;
3519 break;
e6de30d6 3520 case RTL_GIGA_MAC_VER_28:
4804b3b3 3521 case RTL_GIGA_MAC_VER_31:
e6de30d6 3522 ops->write = r8168dp_2_mdio_write;
3523 ops->read = r8168dp_2_mdio_read;
3524 break;
c0e45c1c 3525 default:
3526 ops->write = r8169_mdio_write;
3527 ops->read = r8169_mdio_read;
3528 break;
3529 }
3530}
3531
649b3b8c 3532static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3533{
3534 void __iomem *ioaddr = tp->mmio_addr;
3535
3536 switch (tp->mac_version) {
3537 case RTL_GIGA_MAC_VER_29:
3538 case RTL_GIGA_MAC_VER_30:
3539 case RTL_GIGA_MAC_VER_32:
3540 case RTL_GIGA_MAC_VER_33:
3541 case RTL_GIGA_MAC_VER_34:
3542 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3543 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3544 break;
3545 default:
3546 break;
3547 }
3548}
3549
3550static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3551{
3552 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3553 return false;
3554
3555 rtl_writephy(tp, 0x1f, 0x0000);
3556 rtl_writephy(tp, MII_BMCR, 0x0000);
3557
3558 rtl_wol_suspend_quirk(tp);
3559
3560 return true;
3561}
3562
065c27c1 3563static void r810x_phy_power_down(struct rtl8169_private *tp)
3564{
3565 rtl_writephy(tp, 0x1f, 0x0000);
3566 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3567}
3568
3569static void r810x_phy_power_up(struct rtl8169_private *tp)
3570{
3571 rtl_writephy(tp, 0x1f, 0x0000);
3572 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3573}
3574
3575static void r810x_pll_power_down(struct rtl8169_private *tp)
3576{
649b3b8c 3577 if (rtl_wol_pll_power_down(tp))
065c27c1 3578 return;
065c27c1 3579
3580 r810x_phy_power_down(tp);
3581}
3582
3583static void r810x_pll_power_up(struct rtl8169_private *tp)
3584{
3585 r810x_phy_power_up(tp);
3586}
3587
3588static void r8168_phy_power_up(struct rtl8169_private *tp)
3589{
3590 rtl_writephy(tp, 0x1f, 0x0000);
01dc7fec 3591 switch (tp->mac_version) {
3592 case RTL_GIGA_MAC_VER_11:
3593 case RTL_GIGA_MAC_VER_12:
3594 case RTL_GIGA_MAC_VER_17:
3595 case RTL_GIGA_MAC_VER_18:
3596 case RTL_GIGA_MAC_VER_19:
3597 case RTL_GIGA_MAC_VER_20:
3598 case RTL_GIGA_MAC_VER_21:
3599 case RTL_GIGA_MAC_VER_22:
3600 case RTL_GIGA_MAC_VER_23:
3601 case RTL_GIGA_MAC_VER_24:
3602 case RTL_GIGA_MAC_VER_25:
3603 case RTL_GIGA_MAC_VER_26:
3604 case RTL_GIGA_MAC_VER_27:
3605 case RTL_GIGA_MAC_VER_28:
3606 case RTL_GIGA_MAC_VER_31:
3607 rtl_writephy(tp, 0x0e, 0x0000);
3608 break;
3609 default:
3610 break;
3611 }
065c27c1 3612 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3613}
3614
3615static void r8168_phy_power_down(struct rtl8169_private *tp)
3616{
3617 rtl_writephy(tp, 0x1f, 0x0000);
01dc7fec 3618 switch (tp->mac_version) {
3619 case RTL_GIGA_MAC_VER_32:
3620 case RTL_GIGA_MAC_VER_33:
3621 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3622 break;
3623
3624 case RTL_GIGA_MAC_VER_11:
3625 case RTL_GIGA_MAC_VER_12:
3626 case RTL_GIGA_MAC_VER_17:
3627 case RTL_GIGA_MAC_VER_18:
3628 case RTL_GIGA_MAC_VER_19:
3629 case RTL_GIGA_MAC_VER_20:
3630 case RTL_GIGA_MAC_VER_21:
3631 case RTL_GIGA_MAC_VER_22:
3632 case RTL_GIGA_MAC_VER_23:
3633 case RTL_GIGA_MAC_VER_24:
3634 case RTL_GIGA_MAC_VER_25:
3635 case RTL_GIGA_MAC_VER_26:
3636 case RTL_GIGA_MAC_VER_27:
3637 case RTL_GIGA_MAC_VER_28:
3638 case RTL_GIGA_MAC_VER_31:
3639 rtl_writephy(tp, 0x0e, 0x0200);
3640 default:
3641 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3642 break;
3643 }
065c27c1 3644}
3645
3646static void r8168_pll_power_down(struct rtl8169_private *tp)
3647{
3648 void __iomem *ioaddr = tp->mmio_addr;
3649
cecb5fd7
FR
3650 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3651 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3652 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4804b3b3 3653 r8168dp_check_dash(tp)) {
065c27c1 3654 return;
5d2e1957 3655 }
065c27c1 3656
cecb5fd7
FR
3657 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3658 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
065c27c1 3659 (RTL_R16(CPlusCmd) & ASF)) {
3660 return;
3661 }
3662
01dc7fec 3663 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3664 tp->mac_version == RTL_GIGA_MAC_VER_33)
3665 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3666
649b3b8c 3667 if (rtl_wol_pll_power_down(tp))
065c27c1 3668 return;
065c27c1 3669
3670 r8168_phy_power_down(tp);
3671
3672 switch (tp->mac_version) {
3673 case RTL_GIGA_MAC_VER_25:
3674 case RTL_GIGA_MAC_VER_26:
5d2e1957
HW
3675 case RTL_GIGA_MAC_VER_27:
3676 case RTL_GIGA_MAC_VER_28:
4804b3b3 3677 case RTL_GIGA_MAC_VER_31:
01dc7fec 3678 case RTL_GIGA_MAC_VER_32:
3679 case RTL_GIGA_MAC_VER_33:
065c27c1 3680 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3681 break;
3682 }
3683}
3684
3685static void r8168_pll_power_up(struct rtl8169_private *tp)
3686{
3687 void __iomem *ioaddr = tp->mmio_addr;
3688
cecb5fd7
FR
3689 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3690 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3691 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4804b3b3 3692 r8168dp_check_dash(tp)) {
065c27c1 3693 return;
5d2e1957 3694 }
065c27c1 3695
3696 switch (tp->mac_version) {
3697 case RTL_GIGA_MAC_VER_25:
3698 case RTL_GIGA_MAC_VER_26:
5d2e1957
HW
3699 case RTL_GIGA_MAC_VER_27:
3700 case RTL_GIGA_MAC_VER_28:
4804b3b3 3701 case RTL_GIGA_MAC_VER_31:
01dc7fec 3702 case RTL_GIGA_MAC_VER_32:
3703 case RTL_GIGA_MAC_VER_33:
065c27c1 3704 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3705 break;
3706 }
3707
3708 r8168_phy_power_up(tp);
3709}
3710
d58d46b5
FR
3711static void rtl_generic_op(struct rtl8169_private *tp,
3712 void (*op)(struct rtl8169_private *))
065c27c1 3713{
3714 if (op)
3715 op(tp);
3716}
3717
3718static void rtl_pll_power_down(struct rtl8169_private *tp)
3719{
d58d46b5 3720 rtl_generic_op(tp, tp->pll_power_ops.down);
065c27c1 3721}
3722
3723static void rtl_pll_power_up(struct rtl8169_private *tp)
3724{
d58d46b5 3725 rtl_generic_op(tp, tp->pll_power_ops.up);
065c27c1 3726}
3727
3728static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3729{
3730 struct pll_power_ops *ops = &tp->pll_power_ops;
3731
3732 switch (tp->mac_version) {
3733 case RTL_GIGA_MAC_VER_07:
3734 case RTL_GIGA_MAC_VER_08:
3735 case RTL_GIGA_MAC_VER_09:
3736 case RTL_GIGA_MAC_VER_10:
3737 case RTL_GIGA_MAC_VER_16:
5a5e4443
HW
3738 case RTL_GIGA_MAC_VER_29:
3739 case RTL_GIGA_MAC_VER_30:
065c27c1 3740 ops->down = r810x_pll_power_down;
3741 ops->up = r810x_pll_power_up;
3742 break;
3743
3744 case RTL_GIGA_MAC_VER_11:
3745 case RTL_GIGA_MAC_VER_12:
3746 case RTL_GIGA_MAC_VER_17:
3747 case RTL_GIGA_MAC_VER_18:
3748 case RTL_GIGA_MAC_VER_19:
3749 case RTL_GIGA_MAC_VER_20:
3750 case RTL_GIGA_MAC_VER_21:
3751 case RTL_GIGA_MAC_VER_22:
3752 case RTL_GIGA_MAC_VER_23:
3753 case RTL_GIGA_MAC_VER_24:
3754 case RTL_GIGA_MAC_VER_25:
3755 case RTL_GIGA_MAC_VER_26:
3756 case RTL_GIGA_MAC_VER_27:
e6de30d6 3757 case RTL_GIGA_MAC_VER_28:
4804b3b3 3758 case RTL_GIGA_MAC_VER_31:
01dc7fec 3759 case RTL_GIGA_MAC_VER_32:
3760 case RTL_GIGA_MAC_VER_33:
70090424 3761 case RTL_GIGA_MAC_VER_34:
c2218925
HW
3762 case RTL_GIGA_MAC_VER_35:
3763 case RTL_GIGA_MAC_VER_36:
065c27c1 3764 ops->down = r8168_pll_power_down;
3765 ops->up = r8168_pll_power_up;
3766 break;
3767
3768 default:
3769 ops->down = NULL;
3770 ops->up = NULL;
3771 break;
3772 }
3773}
3774
e542a226
HW
3775static void rtl_init_rxcfg(struct rtl8169_private *tp)
3776{
3777 void __iomem *ioaddr = tp->mmio_addr;
3778
3779 switch (tp->mac_version) {
3780 case RTL_GIGA_MAC_VER_01:
3781 case RTL_GIGA_MAC_VER_02:
3782 case RTL_GIGA_MAC_VER_03:
3783 case RTL_GIGA_MAC_VER_04:
3784 case RTL_GIGA_MAC_VER_05:
3785 case RTL_GIGA_MAC_VER_06:
3786 case RTL_GIGA_MAC_VER_10:
3787 case RTL_GIGA_MAC_VER_11:
3788 case RTL_GIGA_MAC_VER_12:
3789 case RTL_GIGA_MAC_VER_13:
3790 case RTL_GIGA_MAC_VER_14:
3791 case RTL_GIGA_MAC_VER_15:
3792 case RTL_GIGA_MAC_VER_16:
3793 case RTL_GIGA_MAC_VER_17:
3794 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3795 break;
3796 case RTL_GIGA_MAC_VER_18:
3797 case RTL_GIGA_MAC_VER_19:
3798 case RTL_GIGA_MAC_VER_20:
3799 case RTL_GIGA_MAC_VER_21:
3800 case RTL_GIGA_MAC_VER_22:
3801 case RTL_GIGA_MAC_VER_23:
3802 case RTL_GIGA_MAC_VER_24:
3803 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3804 break;
3805 default:
3806 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
3807 break;
3808 }
3809}
3810
92fc43b4
HW
3811static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3812{
3813 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
3814}
3815
d58d46b5
FR
3816static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
3817{
3818 rtl_generic_op(tp, tp->jumbo_ops.enable);
3819}
3820
3821static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
3822{
3823 rtl_generic_op(tp, tp->jumbo_ops.disable);
3824}
3825
3826static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
3827{
3828 void __iomem *ioaddr = tp->mmio_addr;
3829
3830 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3831 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
3832 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3833}
3834
3835static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
3836{
3837 void __iomem *ioaddr = tp->mmio_addr;
3838
3839 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3840 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
3841 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3842}
3843
3844static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
3845{
3846 void __iomem *ioaddr = tp->mmio_addr;
3847
3848 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3849}
3850
3851static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3852{
3853 void __iomem *ioaddr = tp->mmio_addr;
3854
3855 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3856}
3857
3858static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3859{
3860 void __iomem *ioaddr = tp->mmio_addr;
d58d46b5
FR
3861
3862 RTL_W8(MaxTxPacketSize, 0x3f);
3863 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3864 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4512ff9f 3865 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
d58d46b5
FR
3866}
3867
3868static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3869{
3870 void __iomem *ioaddr = tp->mmio_addr;
d58d46b5
FR
3871
3872 RTL_W8(MaxTxPacketSize, 0x0c);
3873 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3874 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4512ff9f 3875 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
d58d46b5
FR
3876}
3877
3878static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
3879{
3880 rtl_tx_performance_tweak(tp->pci_dev,
3881 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3882}
3883
3884static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
3885{
3886 rtl_tx_performance_tweak(tp->pci_dev,
3887 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3888}
3889
3890static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
3891{
3892 void __iomem *ioaddr = tp->mmio_addr;
3893
3894 r8168b_0_hw_jumbo_enable(tp);
3895
3896 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
3897}
3898
3899static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
3900{
3901 void __iomem *ioaddr = tp->mmio_addr;
3902
3903 r8168b_0_hw_jumbo_disable(tp);
3904
3905 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
3906}
3907
3908static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
3909{
3910 struct jumbo_ops *ops = &tp->jumbo_ops;
3911
3912 switch (tp->mac_version) {
3913 case RTL_GIGA_MAC_VER_11:
3914 ops->disable = r8168b_0_hw_jumbo_disable;
3915 ops->enable = r8168b_0_hw_jumbo_enable;
3916 break;
3917 case RTL_GIGA_MAC_VER_12:
3918 case RTL_GIGA_MAC_VER_17:
3919 ops->disable = r8168b_1_hw_jumbo_disable;
3920 ops->enable = r8168b_1_hw_jumbo_enable;
3921 break;
3922 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
3923 case RTL_GIGA_MAC_VER_19:
3924 case RTL_GIGA_MAC_VER_20:
3925 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
3926 case RTL_GIGA_MAC_VER_22:
3927 case RTL_GIGA_MAC_VER_23:
3928 case RTL_GIGA_MAC_VER_24:
3929 case RTL_GIGA_MAC_VER_25:
3930 case RTL_GIGA_MAC_VER_26:
3931 ops->disable = r8168c_hw_jumbo_disable;
3932 ops->enable = r8168c_hw_jumbo_enable;
3933 break;
3934 case RTL_GIGA_MAC_VER_27:
3935 case RTL_GIGA_MAC_VER_28:
3936 ops->disable = r8168dp_hw_jumbo_disable;
3937 ops->enable = r8168dp_hw_jumbo_enable;
3938 break;
3939 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
3940 case RTL_GIGA_MAC_VER_32:
3941 case RTL_GIGA_MAC_VER_33:
3942 case RTL_GIGA_MAC_VER_34:
3943 ops->disable = r8168e_hw_jumbo_disable;
3944 ops->enable = r8168e_hw_jumbo_enable;
3945 break;
3946
3947 /*
3948 * No action needed for jumbo frames with 8169.
3949 * No jumbo for 810x at all.
3950 */
3951 default:
3952 ops->disable = NULL;
3953 ops->enable = NULL;
3954 break;
3955 }
3956}
3957
6f43adc8
FR
3958static void rtl_hw_reset(struct rtl8169_private *tp)
3959{
3960 void __iomem *ioaddr = tp->mmio_addr;
3961 int i;
3962
3963 /* Soft reset the chip. */
3964 RTL_W8(ChipCmd, CmdReset);
3965
3966 /* Check that the chip has finished the reset. */
3967 for (i = 0; i < 100; i++) {
3968 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3969 break;
92fc43b4 3970 udelay(100);
6f43adc8
FR
3971 }
3972}
3973
1da177e4 3974static int __devinit
4ff96fa6 3975rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 3976{
0e485150
FR
3977 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
3978 const unsigned int region = cfg->region;
1da177e4 3979 struct rtl8169_private *tp;
ccdffb9a 3980 struct mii_if_info *mii;
4ff96fa6
FR
3981 struct net_device *dev;
3982 void __iomem *ioaddr;
2b7b4318 3983 int chipset, i;
07d3f51f 3984 int rc;
1da177e4 3985
4ff96fa6
FR
3986 if (netif_msg_drv(&debug)) {
3987 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
3988 MODULENAME, RTL8169_VERSION);
3989 }
1da177e4 3990
1da177e4 3991 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 3992 if (!dev) {
b57b7e5a 3993 if (netif_msg_drv(&debug))
9b91cf9d 3994 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
3995 rc = -ENOMEM;
3996 goto out;
1da177e4
LT
3997 }
3998
1da177e4 3999 SET_NETDEV_DEV(dev, &pdev->dev);
8b4ab28d 4000 dev->netdev_ops = &rtl8169_netdev_ops;
1da177e4 4001 tp = netdev_priv(dev);
c4028958 4002 tp->dev = dev;
21e197f2 4003 tp->pci_dev = pdev;
b57b7e5a 4004 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4 4005
ccdffb9a
FR
4006 mii = &tp->mii;
4007 mii->dev = dev;
4008 mii->mdio_read = rtl_mdio_read;
4009 mii->mdio_write = rtl_mdio_write;
4010 mii->phy_id_mask = 0x1f;
4011 mii->reg_num_mask = 0x1f;
4012 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
4013
ba04c7c9
SG
4014 /* disable ASPM completely as that cause random device stop working
4015 * problems as well as full system hangs for some PCIe devices users */
4016 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
4017 PCIE_LINK_STATE_CLKPM);
4018
1da177e4
LT
4019 /* enable device (incl. PCI PM wakeup and hotplug setup) */
4020 rc = pci_enable_device(pdev);
b57b7e5a 4021 if (rc < 0) {
bf82c189 4022 netif_err(tp, probe, dev, "enable failure\n");
4ff96fa6 4023 goto err_out_free_dev_1;
1da177e4
LT
4024 }
4025
87aeec76 4026 if (pci_set_mwi(pdev) < 0)
4027 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
1da177e4 4028
1da177e4 4029 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 4030 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
bf82c189
JP
4031 netif_err(tp, probe, dev,
4032 "region #%d not an MMIO resource, aborting\n",
4033 region);
1da177e4 4034 rc = -ENODEV;
87aeec76 4035 goto err_out_mwi_2;
1da177e4 4036 }
4ff96fa6 4037
1da177e4 4038 /* check for weird/broken PCI region reporting */
bcf0bf90 4039 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
bf82c189
JP
4040 netif_err(tp, probe, dev,
4041 "Invalid PCI region size(s), aborting\n");
1da177e4 4042 rc = -ENODEV;
87aeec76 4043 goto err_out_mwi_2;
1da177e4
LT
4044 }
4045
4046 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 4047 if (rc < 0) {
bf82c189 4048 netif_err(tp, probe, dev, "could not request regions\n");
87aeec76 4049 goto err_out_mwi_2;
1da177e4
LT
4050 }
4051
d24e9aaf 4052 tp->cp_cmd = RxChkSum;
1da177e4
LT
4053
4054 if ((sizeof(dma_addr_t) > 4) &&
4300e8c7 4055 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
1da177e4
LT
4056 tp->cp_cmd |= PCIDAC;
4057 dev->features |= NETIF_F_HIGHDMA;
4058 } else {
284901a9 4059 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1da177e4 4060 if (rc < 0) {
bf82c189 4061 netif_err(tp, probe, dev, "DMA configuration failed\n");
87aeec76 4062 goto err_out_free_res_3;
1da177e4
LT
4063 }
4064 }
4065
1da177e4 4066 /* ioremap MMIO region */
bcf0bf90 4067 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 4068 if (!ioaddr) {
bf82c189 4069 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
1da177e4 4070 rc = -EIO;
87aeec76 4071 goto err_out_free_res_3;
1da177e4 4072 }
6f43adc8 4073 tp->mmio_addr = ioaddr;
1da177e4 4074
e44daade
JM
4075 if (!pci_is_pcie(pdev))
4076 netif_info(tp, probe, dev, "not PCI Express\n");
4300e8c7 4077
e542a226
HW
4078 /* Identify chip attached to board */
4079 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
4080
4081 rtl_init_rxcfg(tp);
4082
9085cdfa 4083 rtl_irq_disable(tp);
1da177e4 4084
6f43adc8 4085 rtl_hw_reset(tp);
1da177e4 4086
9085cdfa 4087 rtl_ack_events(tp, 0xffff);
d78ad8cb 4088
ca52efd5 4089 pci_set_master(pdev);
4090
7a8fc77b
FR
4091 /*
4092 * Pretend we are using VLANs; This bypasses a nasty bug where
4093 * Interrupts stop flowing on high load on 8110SCd controllers.
4094 */
4095 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
4096 tp->cp_cmd |= RxVlan;
4097
c0e45c1c 4098 rtl_init_mdio_ops(tp);
065c27c1 4099 rtl_init_pll_power_ops(tp);
d58d46b5 4100 rtl_init_jumbo_ops(tp);
c0e45c1c 4101
1da177e4 4102 rtl8169_print_mac_version(tp);
1da177e4 4103
85bffe6c
FR
4104 chipset = tp->mac_version;
4105 tp->txd_version = rtl_chip_infos[chipset].txd_version;
1da177e4 4106
5d06a99f
FR
4107 RTL_W8(Cfg9346, Cfg9346_Unlock);
4108 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
4109 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
20037fa4
BP
4110 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
4111 tp->features |= RTL_FEATURE_WOL;
4112 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
4113 tp->features |= RTL_FEATURE_WOL;
2ca6cf06 4114 tp->features |= rtl_try_msi(tp, cfg);
5d06a99f
FR
4115 RTL_W8(Cfg9346, Cfg9346_Lock);
4116
2544bfc0 4117 if (rtl_tbi_enabled(tp)) {
1da177e4
LT
4118 tp->set_speed = rtl8169_set_speed_tbi;
4119 tp->get_settings = rtl8169_gset_tbi;
4120 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
4121 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
4122 tp->link_ok = rtl8169_tbi_link_ok;
8b4ab28d 4123 tp->do_ioctl = rtl_tbi_ioctl;
1da177e4
LT
4124 } else {
4125 tp->set_speed = rtl8169_set_speed_xmii;
4126 tp->get_settings = rtl8169_gset_xmii;
4127 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
4128 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
4129 tp->link_ok = rtl8169_xmii_link_ok;
8b4ab28d 4130 tp->do_ioctl = rtl_xmii_ioctl;
1da177e4
LT
4131 }
4132
df58ef51
FR
4133 spin_lock_init(&tp->lock);
4134
7bf6bf48 4135 /* Get MAC address */
6a3c910c 4136 for (i = 0; i < ETH_ALEN; i++)
1da177e4 4137 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 4138 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4 4139
1da177e4 4140 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1da177e4
LT
4141 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
4142 dev->irq = pdev->irq;
4143 dev->base_addr = (unsigned long) ioaddr;
1da177e4 4144
bea3348e 4145 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
1da177e4 4146
350fb32a
MM
4147 /* don't enable SG, IP_CSUM and TSO by default - it might not work
4148 * properly for all devices */
4149 dev->features |= NETIF_F_RXCSUM |
4150 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4151
4152 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
4153 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4154 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
4155 NETIF_F_HIGHDMA;
4156
4157 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
4158 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
4159 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
1da177e4
LT
4160
4161 tp->intr_mask = 0xffff;
0e485150
FR
4162 tp->hw_start = cfg->hw_start;
4163 tp->intr_event = cfg->intr_event;
4164 tp->napi_event = cfg->napi_event;
1da177e4 4165
e03f33af
FR
4166 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
4167 ~(RxBOVF | RxFOVF) : ~0;
4168
2efa53f3
FR
4169 init_timer(&tp->timer);
4170 tp->timer.data = (unsigned long) dev;
4171 tp->timer.function = rtl8169_phy_timer;
4172
b6ffd97f 4173 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
953a12cc 4174
1da177e4 4175 rc = register_netdev(dev);
4ff96fa6 4176 if (rc < 0)
87aeec76 4177 goto err_out_msi_4;
1da177e4
LT
4178
4179 pci_set_drvdata(pdev, dev);
4180
bf82c189 4181 netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
85bffe6c 4182 rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
bf82c189 4183 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
d58d46b5
FR
4184 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
4185 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
4186 "tx checksumming: %s]\n",
4187 rtl_chip_infos[chipset].jumbo_max,
4188 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
4189 }
1da177e4 4190
cecb5fd7
FR
4191 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4192 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4193 tp->mac_version == RTL_GIGA_MAC_VER_31) {
b646d900 4194 rtl8168_driver_start(tp);
e6de30d6 4195 }
b646d900 4196
8b76ab39 4197 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
1da177e4 4198
f3ec4f87
AS
4199 if (pci_dev_run_wake(pdev))
4200 pm_runtime_put_noidle(&pdev->dev);
e1759441 4201
0d672e9f
IV
4202 netif_carrier_off(dev);
4203
4ff96fa6
FR
4204out:
4205 return rc;
1da177e4 4206
87aeec76 4207err_out_msi_4:
fbac58fc 4208 rtl_disable_msi(pdev, tp);
4ff96fa6 4209 iounmap(ioaddr);
87aeec76 4210err_out_free_res_3:
4ff96fa6 4211 pci_release_regions(pdev);
87aeec76 4212err_out_mwi_2:
4ff96fa6 4213 pci_clear_mwi(pdev);
4ff96fa6
FR
4214 pci_disable_device(pdev);
4215err_out_free_dev_1:
4216 free_netdev(dev);
4217 goto out;
1da177e4
LT
4218}
4219
07d3f51f 4220static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
1da177e4
LT
4221{
4222 struct net_device *dev = pci_get_drvdata(pdev);
4223 struct rtl8169_private *tp = netdev_priv(dev);
4224
cecb5fd7
FR
4225 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4226 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4227 tp->mac_version == RTL_GIGA_MAC_VER_31) {
b646d900 4228 rtl8168_driver_stop(tp);
e6de30d6 4229 }
b646d900 4230
4422bcd4 4231 cancel_work_sync(&tp->wk.work);
eb2a021c 4232
1da177e4 4233 unregister_netdev(dev);
cc098dc7 4234
953a12cc
FR
4235 rtl_release_firmware(tp);
4236
f3ec4f87
AS
4237 if (pci_dev_run_wake(pdev))
4238 pm_runtime_get_noresume(&pdev->dev);
e1759441 4239
cc098dc7
IV
4240 /* restore original MAC address */
4241 rtl_rar_set(tp, dev->perm_addr);
4242
fbac58fc 4243 rtl_disable_msi(pdev, tp);
1da177e4
LT
4244 rtl8169_release_board(pdev, dev, tp->mmio_addr);
4245 pci_set_drvdata(pdev, NULL);
4246}
4247
b6ffd97f 4248static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
953a12cc 4249{
b6ffd97f
FR
4250 struct rtl_fw *rtl_fw;
4251 const char *name;
4252 int rc = -ENOMEM;
953a12cc 4253
b6ffd97f
FR
4254 name = rtl_lookup_firmware_name(tp);
4255 if (!name)
4256 goto out_no_firmware;
953a12cc 4257
b6ffd97f
FR
4258 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4259 if (!rtl_fw)
4260 goto err_warn;
31bd204f 4261
b6ffd97f
FR
4262 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4263 if (rc < 0)
4264 goto err_free;
4265
fd112f2e
FR
4266 rc = rtl_check_firmware(tp, rtl_fw);
4267 if (rc < 0)
4268 goto err_release_firmware;
4269
b6ffd97f
FR
4270 tp->rtl_fw = rtl_fw;
4271out:
4272 return;
4273
fd112f2e
FR
4274err_release_firmware:
4275 release_firmware(rtl_fw->fw);
b6ffd97f
FR
4276err_free:
4277 kfree(rtl_fw);
4278err_warn:
4279 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4280 name, rc);
4281out_no_firmware:
4282 tp->rtl_fw = NULL;
4283 goto out;
4284}
4285
4286static void rtl_request_firmware(struct rtl8169_private *tp)
4287{
4288 if (IS_ERR(tp->rtl_fw))
4289 rtl_request_uncached_firmware(tp);
953a12cc
FR
4290}
4291
4422bcd4
FR
4292static void rtl_task(struct work_struct *);
4293
1da177e4
LT
4294static int rtl8169_open(struct net_device *dev)
4295{
4296 struct rtl8169_private *tp = netdev_priv(dev);
eee3a96c 4297 void __iomem *ioaddr = tp->mmio_addr;
1da177e4 4298 struct pci_dev *pdev = tp->pci_dev;
99f252b0 4299 int retval = -ENOMEM;
1da177e4 4300
e1759441 4301 pm_runtime_get_sync(&pdev->dev);
1da177e4 4302
1da177e4
LT
4303 /*
4304 * Rx and Tx desscriptors needs 256 bytes alignment.
82553bb6 4305 * dma_alloc_coherent provides more.
1da177e4 4306 */
82553bb6
SG
4307 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
4308 &tp->TxPhyAddr, GFP_KERNEL);
1da177e4 4309 if (!tp->TxDescArray)
e1759441 4310 goto err_pm_runtime_put;
1da177e4 4311
82553bb6
SG
4312 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
4313 &tp->RxPhyAddr, GFP_KERNEL);
1da177e4 4314 if (!tp->RxDescArray)
99f252b0 4315 goto err_free_tx_0;
1da177e4
LT
4316
4317 retval = rtl8169_init_ring(dev);
4318 if (retval < 0)
99f252b0 4319 goto err_free_rx_1;
1da177e4 4320
4422bcd4 4321 INIT_WORK(&tp->wk.work, rtl_task);
1da177e4 4322
99f252b0
FR
4323 smp_mb();
4324
953a12cc
FR
4325 rtl_request_firmware(tp);
4326
fbac58fc
FR
4327 retval = request_irq(dev->irq, rtl8169_interrupt,
4328 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
99f252b0
FR
4329 dev->name, dev);
4330 if (retval < 0)
953a12cc 4331 goto err_release_fw_2;
99f252b0 4332
bea3348e 4333 napi_enable(&tp->napi);
bea3348e 4334
eee3a96c 4335 rtl8169_init_phy(dev, tp);
4336
350fb32a 4337 rtl8169_set_features(dev, dev->features);
eee3a96c 4338
065c27c1 4339 rtl_pll_power_up(tp);
4340
07ce4064 4341 rtl_hw_start(dev);
1da177e4 4342
e1759441
RW
4343 tp->saved_wolopts = 0;
4344 pm_runtime_put_noidle(&pdev->dev);
4345
eee3a96c 4346 rtl8169_check_link_status(dev, tp, ioaddr);
1da177e4
LT
4347out:
4348 return retval;
4349
953a12cc
FR
4350err_release_fw_2:
4351 rtl_release_firmware(tp);
99f252b0
FR
4352 rtl8169_rx_clear(tp);
4353err_free_rx_1:
82553bb6
SG
4354 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4355 tp->RxPhyAddr);
e1759441 4356 tp->RxDescArray = NULL;
99f252b0 4357err_free_tx_0:
82553bb6
SG
4358 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4359 tp->TxPhyAddr);
e1759441
RW
4360 tp->TxDescArray = NULL;
4361err_pm_runtime_put:
4362 pm_runtime_put_noidle(&pdev->dev);
1da177e4
LT
4363 goto out;
4364}
4365
92fc43b4
HW
4366static void rtl_rx_close(struct rtl8169_private *tp)
4367{
4368 void __iomem *ioaddr = tp->mmio_addr;
92fc43b4 4369
1687b566 4370 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
92fc43b4
HW
4371}
4372
e6de30d6 4373static void rtl8169_hw_reset(struct rtl8169_private *tp)
1da177e4 4374{
e6de30d6 4375 void __iomem *ioaddr = tp->mmio_addr;
4376
1da177e4 4377 /* Disable interrupts */
811fd301 4378 rtl8169_irq_mask_and_ack(tp);
1da177e4 4379
92fc43b4
HW
4380 rtl_rx_close(tp);
4381
5d2e1957 4382 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4804b3b3 4383 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4384 tp->mac_version == RTL_GIGA_MAC_VER_31) {
e6de30d6 4385 while (RTL_R8(TxPoll) & NPQ)
4386 udelay(20);
c2218925
HW
4387 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4388 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4389 tp->mac_version == RTL_GIGA_MAC_VER_36) {
c2b0c1e7 4390 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
70090424
HW
4391 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
4392 udelay(100);
92fc43b4
HW
4393 } else {
4394 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4395 udelay(100);
e6de30d6 4396 }
4397
92fc43b4 4398 rtl_hw_reset(tp);
1da177e4
LT
4399}
4400
7f796d83 4401static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
9cb427b6
FR
4402{
4403 void __iomem *ioaddr = tp->mmio_addr;
9cb427b6
FR
4404
4405 /* Set DMA burst size and Interframe Gap Time */
4406 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4407 (InterFrameGap << TxInterFrameGapShift));
4408}
4409
07ce4064 4410static void rtl_hw_start(struct net_device *dev)
1da177e4
LT
4411{
4412 struct rtl8169_private *tp = netdev_priv(dev);
1da177e4 4413
07ce4064
FR
4414 tp->hw_start(dev);
4415
3e990ff5
FR
4416 rtl_irq_enable(tp, tp->intr_event);
4417
07ce4064
FR
4418 netif_start_queue(dev);
4419}
4420
7f796d83
FR
4421static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4422 void __iomem *ioaddr)
4423{
4424 /*
4425 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4426 * register to be written before TxDescAddrLow to work.
4427 * Switching from MMIO to I/O access fixes the issue as well.
4428 */
4429 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
284901a9 4430 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
7f796d83 4431 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
284901a9 4432 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
7f796d83
FR
4433}
4434
4435static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4436{
4437 u16 cmd;
4438
4439 cmd = RTL_R16(CPlusCmd);
4440 RTL_W16(CPlusCmd, cmd);
4441 return cmd;
4442}
4443
fdd7b4c3 4444static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
7f796d83
FR
4445{
4446 /* Low hurts. Let's disable the filtering. */
207d6e87 4447 RTL_W16(RxMaxSize, rx_buf_sz + 1);
7f796d83
FR
4448}
4449
6dccd16b
FR
4450static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4451{
3744100e 4452 static const struct rtl_cfg2_info {
6dccd16b
FR
4453 u32 mac_version;
4454 u32 clk;
4455 u32 val;
4456 } cfg2_info [] = {
4457 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4458 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4459 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4460 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
3744100e
FR
4461 };
4462 const struct rtl_cfg2_info *p = cfg2_info;
6dccd16b
FR
4463 unsigned int i;
4464 u32 clk;
4465
4466 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
cadf1855 4467 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
6dccd16b
FR
4468 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4469 RTL_W32(0x7c, p->val);
4470 break;
4471 }
4472 }
4473}
4474
07ce4064
FR
4475static void rtl_hw_start_8169(struct net_device *dev)
4476{
4477 struct rtl8169_private *tp = netdev_priv(dev);
4478 void __iomem *ioaddr = tp->mmio_addr;
4479 struct pci_dev *pdev = tp->pci_dev;
07ce4064 4480
9cb427b6
FR
4481 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4482 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4483 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4484 }
4485
1da177e4 4486 RTL_W8(Cfg9346, Cfg9346_Unlock);
cecb5fd7
FR
4487 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4488 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4489 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4490 tp->mac_version == RTL_GIGA_MAC_VER_04)
9cb427b6
FR
4491 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4492
e542a226
HW
4493 rtl_init_rxcfg(tp);
4494
f0298f81 4495 RTL_W8(EarlyTxThres, NoEarlyTx);
1da177e4 4496
6f0333b8 4497 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
1da177e4 4498
cecb5fd7
FR
4499 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4500 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4501 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4502 tp->mac_version == RTL_GIGA_MAC_VER_04)
c946b304 4503 rtl_set_rx_tx_config_registers(tp);
1da177e4 4504
7f796d83 4505 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1da177e4 4506
cecb5fd7
FR
4507 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4508 tp->mac_version == RTL_GIGA_MAC_VER_03) {
06fa7358 4509 dprintk("Set MAC Reg C+CR Offset 0xE0. "
1da177e4 4510 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 4511 tp->cp_cmd |= (1 << 14);
1da177e4
LT
4512 }
4513
bcf0bf90
FR
4514 RTL_W16(CPlusCmd, tp->cp_cmd);
4515
6dccd16b
FR
4516 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4517
1da177e4
LT
4518 /*
4519 * Undocumented corner. Supposedly:
4520 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4521 */
4522 RTL_W16(IntrMitigate, 0x0000);
4523
7f796d83 4524 rtl_set_rx_tx_desc_registers(tp, ioaddr);
9cb427b6 4525
cecb5fd7
FR
4526 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4527 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4528 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4529 tp->mac_version != RTL_GIGA_MAC_VER_04) {
c946b304
FR
4530 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4531 rtl_set_rx_tx_config_registers(tp);
4532 }
4533
1da177e4 4534 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
4535
4536 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4537 RTL_R8(IntrMask);
1da177e4
LT
4538
4539 RTL_W32(RxMissed, 0);
4540
07ce4064 4541 rtl_set_rx_mode(dev);
1da177e4
LT
4542
4543 /* no early-rx interrupts */
4544 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 4545}
1da177e4 4546
650e8d5d 4547static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
dacf8154
FR
4548{
4549 u32 csi;
4550
4551 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
650e8d5d 4552 rtl_csi_write(ioaddr, 0x070c, csi | bits);
4553}
4554
e6de30d6 4555static void rtl_csi_access_enable_1(void __iomem *ioaddr)
4556{
4557 rtl_csi_access_enable(ioaddr, 0x17000000);
4558}
4559
650e8d5d 4560static void rtl_csi_access_enable_2(void __iomem *ioaddr)
4561{
4562 rtl_csi_access_enable(ioaddr, 0x27000000);
dacf8154
FR
4563}
4564
4565struct ephy_info {
4566 unsigned int offset;
4567 u16 mask;
4568 u16 bits;
4569};
4570
350f7596 4571static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
dacf8154
FR
4572{
4573 u16 w;
4574
4575 while (len-- > 0) {
4576 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
4577 rtl_ephy_write(ioaddr, e->offset, w);
4578 e++;
4579 }
4580}
4581
b726e493
FR
4582static void rtl_disable_clock_request(struct pci_dev *pdev)
4583{
e44daade 4584 int cap = pci_pcie_cap(pdev);
b726e493
FR
4585
4586 if (cap) {
4587 u16 ctl;
4588
4589 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4590 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4591 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4592 }
4593}
4594
e6de30d6 4595static void rtl_enable_clock_request(struct pci_dev *pdev)
4596{
e44daade 4597 int cap = pci_pcie_cap(pdev);
e6de30d6 4598
4599 if (cap) {
4600 u16 ctl;
4601
4602 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4603 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4604 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4605 }
4606}
4607
b726e493
FR
4608#define R8168_CPCMD_QUIRK_MASK (\
4609 EnableBist | \
4610 Mac_dbgo_oe | \
4611 Force_half_dup | \
4612 Force_rxflow_en | \
4613 Force_txflow_en | \
4614 Cxpl_dbg_sel | \
4615 ASF | \
4616 PktCntrDisable | \
4617 Mac_dbgo_sel)
4618
219a1e9d
FR
4619static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
4620{
b726e493
FR
4621 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4622
4623 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4624
2e68ae44
FR
4625 rtl_tx_performance_tweak(pdev,
4626 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
219a1e9d
FR
4627}
4628
4629static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
4630{
4631 rtl_hw_start_8168bb(ioaddr, pdev);
b726e493 4632
f0298f81 4633 RTL_W8(MaxTxPacketSize, TxPacketMax);
b726e493
FR
4634
4635 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
219a1e9d
FR
4636}
4637
4638static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
4639{
b726e493
FR
4640 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4641
4642 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4643
219a1e9d 4644 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
b726e493
FR
4645
4646 rtl_disable_clock_request(pdev);
4647
4648 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
219a1e9d
FR
4649}
4650
ef3386f0 4651static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
219a1e9d 4652{
350f7596 4653 static const struct ephy_info e_info_8168cp[] = {
b726e493
FR
4654 { 0x01, 0, 0x0001 },
4655 { 0x02, 0x0800, 0x1000 },
4656 { 0x03, 0, 0x0042 },
4657 { 0x06, 0x0080, 0x0000 },
4658 { 0x07, 0, 0x2000 }
4659 };
4660
650e8d5d 4661 rtl_csi_access_enable_2(ioaddr);
b726e493
FR
4662
4663 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4664
219a1e9d
FR
4665 __rtl_hw_start_8168cp(ioaddr, pdev);
4666}
4667
ef3386f0
FR
4668static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
4669{
650e8d5d 4670 rtl_csi_access_enable_2(ioaddr);
ef3386f0
FR
4671
4672 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4673
4674 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4675
4676 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4677}
4678
7f3e3d3a
FR
4679static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
4680{
650e8d5d 4681 rtl_csi_access_enable_2(ioaddr);
7f3e3d3a
FR
4682
4683 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4684
4685 /* Magic. */
4686 RTL_W8(DBG_REG, 0x20);
4687
f0298f81 4688 RTL_W8(MaxTxPacketSize, TxPacketMax);
7f3e3d3a
FR
4689
4690 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4691
4692 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4693}
4694
219a1e9d
FR
4695static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
4696{
350f7596 4697 static const struct ephy_info e_info_8168c_1[] = {
b726e493
FR
4698 { 0x02, 0x0800, 0x1000 },
4699 { 0x03, 0, 0x0002 },
4700 { 0x06, 0x0080, 0x0000 }
4701 };
4702
650e8d5d 4703 rtl_csi_access_enable_2(ioaddr);
b726e493
FR
4704
4705 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4706
4707 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4708
219a1e9d
FR
4709 __rtl_hw_start_8168cp(ioaddr, pdev);
4710}
4711
4712static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
4713{
350f7596 4714 static const struct ephy_info e_info_8168c_2[] = {
b726e493
FR
4715 { 0x01, 0, 0x0001 },
4716 { 0x03, 0x0400, 0x0220 }
4717 };
4718
650e8d5d 4719 rtl_csi_access_enable_2(ioaddr);
b726e493
FR
4720
4721 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4722
219a1e9d
FR
4723 __rtl_hw_start_8168cp(ioaddr, pdev);
4724}
4725
197ff761
FR
4726static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
4727{
4728 rtl_hw_start_8168c_2(ioaddr, pdev);
4729}
4730
6fb07058
FR
4731static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
4732{
650e8d5d 4733 rtl_csi_access_enable_2(ioaddr);
6fb07058
FR
4734
4735 __rtl_hw_start_8168cp(ioaddr, pdev);
4736}
4737
5b538df9
FR
4738static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
4739{
650e8d5d 4740 rtl_csi_access_enable_2(ioaddr);
5b538df9
FR
4741
4742 rtl_disable_clock_request(pdev);
4743
f0298f81 4744 RTL_W8(MaxTxPacketSize, TxPacketMax);
5b538df9
FR
4745
4746 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4747
4748 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4749}
4750
4804b3b3 4751static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
4752{
4753 rtl_csi_access_enable_1(ioaddr);
4754
4755 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4756
4757 RTL_W8(MaxTxPacketSize, TxPacketMax);
4758
4759 rtl_disable_clock_request(pdev);
4760}
4761
e6de30d6 4762static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4763{
4764 static const struct ephy_info e_info_8168d_4[] = {
4765 { 0x0b, ~0, 0x48 },
4766 { 0x19, 0x20, 0x50 },
4767 { 0x0c, ~0, 0x20 }
4768 };
4769 int i;
4770
4771 rtl_csi_access_enable_1(ioaddr);
4772
4773 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4774
4775 RTL_W8(MaxTxPacketSize, TxPacketMax);
4776
4777 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4778 const struct ephy_info *e = e_info_8168d_4 + i;
4779 u16 w;
4780
4781 w = rtl_ephy_read(ioaddr, e->offset);
4782 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
4783 }
4784
4785 rtl_enable_clock_request(pdev);
4786}
4787
70090424 4788static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
01dc7fec 4789{
70090424 4790 static const struct ephy_info e_info_8168e_1[] = {
01dc7fec 4791 { 0x00, 0x0200, 0x0100 },
4792 { 0x00, 0x0000, 0x0004 },
4793 { 0x06, 0x0002, 0x0001 },
4794 { 0x06, 0x0000, 0x0030 },
4795 { 0x07, 0x0000, 0x2000 },
4796 { 0x00, 0x0000, 0x0020 },
4797 { 0x03, 0x5800, 0x2000 },
4798 { 0x03, 0x0000, 0x0001 },
4799 { 0x01, 0x0800, 0x1000 },
4800 { 0x07, 0x0000, 0x4000 },
4801 { 0x1e, 0x0000, 0x2000 },
4802 { 0x19, 0xffff, 0xfe6c },
4803 { 0x0a, 0x0000, 0x0040 }
4804 };
4805
4806 rtl_csi_access_enable_2(ioaddr);
4807
70090424 4808 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
01dc7fec 4809
4810 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4811
4812 RTL_W8(MaxTxPacketSize, TxPacketMax);
4813
4814 rtl_disable_clock_request(pdev);
4815
4816 /* Reset tx FIFO pointer */
cecb5fd7
FR
4817 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4818 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
01dc7fec 4819
cecb5fd7 4820 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
01dc7fec 4821}
4822
70090424
HW
4823static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4824{
4825 static const struct ephy_info e_info_8168e_2[] = {
4826 { 0x09, 0x0000, 0x0080 },
4827 { 0x19, 0x0000, 0x0224 }
4828 };
4829
4830 rtl_csi_access_enable_1(ioaddr);
4831
4832 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4833
4834 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4835
4836 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4837 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4838 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4839 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4840 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4841 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4842 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4843 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4844 ERIAR_EXGMAC);
4845
3090bd9a 4846 RTL_W8(MaxTxPacketSize, EarlySize);
70090424
HW
4847
4848 rtl_disable_clock_request(pdev);
4849
4850 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4851 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4852
4853 /* Adjust EEE LED frequency */
4854 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4855
4856 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4857 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4858 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4859}
4860
c2218925
HW
4861static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4862{
4863 static const struct ephy_info e_info_8168f_1[] = {
4864 { 0x06, 0x00c0, 0x0020 },
4865 { 0x08, 0x0001, 0x0002 },
4866 { 0x09, 0x0000, 0x0080 },
4867 { 0x19, 0x0000, 0x0224 }
4868 };
4869
4870 rtl_csi_access_enable_1(ioaddr);
4871
4872 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4873
4874 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4875
4876 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4877 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4878 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4879 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4880 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4881 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4882 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4883 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4884 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4885 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4886 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4887 ERIAR_EXGMAC);
4888
4889 RTL_W8(MaxTxPacketSize, EarlySize);
4890
4891 rtl_disable_clock_request(pdev);
4892
4893 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4894 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4895
4896 /* Adjust EEE LED frequency */
4897 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4898
4899 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4900 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4901 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4902}
4903
07ce4064
FR
4904static void rtl_hw_start_8168(struct net_device *dev)
4905{
2dd99530
FR
4906 struct rtl8169_private *tp = netdev_priv(dev);
4907 void __iomem *ioaddr = tp->mmio_addr;
0e485150 4908 struct pci_dev *pdev = tp->pci_dev;
2dd99530
FR
4909
4910 RTL_W8(Cfg9346, Cfg9346_Unlock);
4911
f0298f81 4912 RTL_W8(MaxTxPacketSize, TxPacketMax);
2dd99530 4913
6f0333b8 4914 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
2dd99530 4915
0e485150 4916 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
2dd99530
FR
4917
4918 RTL_W16(CPlusCmd, tp->cp_cmd);
4919
0e485150 4920 RTL_W16(IntrMitigate, 0x5151);
2dd99530 4921
0e485150 4922 /* Work around for RxFIFO overflow. */
811fd301 4923 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
0e485150
FR
4924 tp->intr_event |= RxFIFOOver | PCSTimeout;
4925 tp->intr_event &= ~RxOverflow;
4926 }
4927
4928 rtl_set_rx_tx_desc_registers(tp, ioaddr);
2dd99530 4929
b8363901
FR
4930 rtl_set_rx_mode(dev);
4931
4932 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4933 (InterFrameGap << TxInterFrameGapShift));
2dd99530
FR
4934
4935 RTL_R8(IntrMask);
4936
219a1e9d
FR
4937 switch (tp->mac_version) {
4938 case RTL_GIGA_MAC_VER_11:
4939 rtl_hw_start_8168bb(ioaddr, pdev);
4804b3b3 4940 break;
219a1e9d
FR
4941
4942 case RTL_GIGA_MAC_VER_12:
4943 case RTL_GIGA_MAC_VER_17:
4944 rtl_hw_start_8168bef(ioaddr, pdev);
4804b3b3 4945 break;
219a1e9d
FR
4946
4947 case RTL_GIGA_MAC_VER_18:
ef3386f0 4948 rtl_hw_start_8168cp_1(ioaddr, pdev);
4804b3b3 4949 break;
219a1e9d
FR
4950
4951 case RTL_GIGA_MAC_VER_19:
4952 rtl_hw_start_8168c_1(ioaddr, pdev);
4804b3b3 4953 break;
219a1e9d
FR
4954
4955 case RTL_GIGA_MAC_VER_20:
4956 rtl_hw_start_8168c_2(ioaddr, pdev);
4804b3b3 4957 break;
219a1e9d 4958
197ff761
FR
4959 case RTL_GIGA_MAC_VER_21:
4960 rtl_hw_start_8168c_3(ioaddr, pdev);
4804b3b3 4961 break;
197ff761 4962
6fb07058
FR
4963 case RTL_GIGA_MAC_VER_22:
4964 rtl_hw_start_8168c_4(ioaddr, pdev);
4804b3b3 4965 break;
6fb07058 4966
ef3386f0
FR
4967 case RTL_GIGA_MAC_VER_23:
4968 rtl_hw_start_8168cp_2(ioaddr, pdev);
4804b3b3 4969 break;
ef3386f0 4970
7f3e3d3a
FR
4971 case RTL_GIGA_MAC_VER_24:
4972 rtl_hw_start_8168cp_3(ioaddr, pdev);
4804b3b3 4973 break;
7f3e3d3a 4974
5b538df9 4975 case RTL_GIGA_MAC_VER_25:
daf9df6d 4976 case RTL_GIGA_MAC_VER_26:
4977 case RTL_GIGA_MAC_VER_27:
5b538df9 4978 rtl_hw_start_8168d(ioaddr, pdev);
4804b3b3 4979 break;
5b538df9 4980
e6de30d6 4981 case RTL_GIGA_MAC_VER_28:
4982 rtl_hw_start_8168d_4(ioaddr, pdev);
4804b3b3 4983 break;
cecb5fd7 4984
4804b3b3 4985 case RTL_GIGA_MAC_VER_31:
4986 rtl_hw_start_8168dp(ioaddr, pdev);
4987 break;
4988
01dc7fec 4989 case RTL_GIGA_MAC_VER_32:
4990 case RTL_GIGA_MAC_VER_33:
70090424
HW
4991 rtl_hw_start_8168e_1(ioaddr, pdev);
4992 break;
4993 case RTL_GIGA_MAC_VER_34:
4994 rtl_hw_start_8168e_2(ioaddr, pdev);
01dc7fec 4995 break;
e6de30d6 4996
c2218925
HW
4997 case RTL_GIGA_MAC_VER_35:
4998 case RTL_GIGA_MAC_VER_36:
4999 rtl_hw_start_8168f_1(ioaddr, pdev);
5000 break;
5001
219a1e9d
FR
5002 default:
5003 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5004 dev->name, tp->mac_version);
4804b3b3 5005 break;
219a1e9d 5006 }
2dd99530 5007
0e485150
FR
5008 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5009
b8363901
FR
5010 RTL_W8(Cfg9346, Cfg9346_Lock);
5011
2dd99530 5012 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 5013}
1da177e4 5014
2857ffb7
FR
5015#define R810X_CPCMD_QUIRK_MASK (\
5016 EnableBist | \
5017 Mac_dbgo_oe | \
5018 Force_half_dup | \
5edcc537 5019 Force_rxflow_en | \
2857ffb7
FR
5020 Force_txflow_en | \
5021 Cxpl_dbg_sel | \
5022 ASF | \
5023 PktCntrDisable | \
d24e9aaf 5024 Mac_dbgo_sel)
2857ffb7
FR
5025
5026static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
5027{
350f7596 5028 static const struct ephy_info e_info_8102e_1[] = {
2857ffb7
FR
5029 { 0x01, 0, 0x6e65 },
5030 { 0x02, 0, 0x091f },
5031 { 0x03, 0, 0xc2f9 },
5032 { 0x06, 0, 0xafb5 },
5033 { 0x07, 0, 0x0e00 },
5034 { 0x19, 0, 0xec80 },
5035 { 0x01, 0, 0x2e65 },
5036 { 0x01, 0, 0x6e65 }
5037 };
5038 u8 cfg1;
5039
650e8d5d 5040 rtl_csi_access_enable_2(ioaddr);
2857ffb7
FR
5041
5042 RTL_W8(DBG_REG, FIX_NAK_1);
5043
5044 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5045
5046 RTL_W8(Config1,
5047 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5048 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5049
5050 cfg1 = RTL_R8(Config1);
5051 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5052 RTL_W8(Config1, cfg1 & ~LEDS0);
5053
2857ffb7
FR
5054 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5055}
5056
5057static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
5058{
650e8d5d 5059 rtl_csi_access_enable_2(ioaddr);
2857ffb7
FR
5060
5061 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5062
5063 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5064 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
2857ffb7
FR
5065}
5066
5067static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
5068{
5069 rtl_hw_start_8102e_2(ioaddr, pdev);
5070
5071 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
5072}
5073
5a5e4443
HW
5074static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
5075{
5076 static const struct ephy_info e_info_8105e_1[] = {
5077 { 0x07, 0, 0x4000 },
5078 { 0x19, 0, 0x0200 },
5079 { 0x19, 0, 0x0020 },
5080 { 0x1e, 0, 0x2000 },
5081 { 0x03, 0, 0x0001 },
5082 { 0x19, 0, 0x0100 },
5083 { 0x19, 0, 0x0004 },
5084 { 0x0a, 0, 0x0020 }
5085 };
5086
cecb5fd7 5087 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5a5e4443
HW
5088 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5089
cecb5fd7 5090 /* Disable Early Tally Counter */
5a5e4443
HW
5091 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5092
5093 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
4f6b00e5 5094 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5a5e4443
HW
5095
5096 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5097}
5098
5099static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
5100{
5101 rtl_hw_start_8105e_1(ioaddr, pdev);
5102 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
5103}
5104
07ce4064
FR
5105static void rtl_hw_start_8101(struct net_device *dev)
5106{
cdf1a608
FR
5107 struct rtl8169_private *tp = netdev_priv(dev);
5108 void __iomem *ioaddr = tp->mmio_addr;
5109 struct pci_dev *pdev = tp->pci_dev;
5110
811fd301 5111 if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
5112 tp->intr_event &= ~RxFIFOOver;
5113 tp->napi_event &= ~RxFIFOOver;
5114 }
5115
cecb5fd7
FR
5116 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5117 tp->mac_version == RTL_GIGA_MAC_VER_16) {
e44daade 5118 int cap = pci_pcie_cap(pdev);
9c14ceaf
FR
5119
5120 if (cap) {
5121 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
5122 PCI_EXP_DEVCTL_NOSNOOP_EN);
5123 }
cdf1a608
FR
5124 }
5125
d24e9aaf
HW
5126 RTL_W8(Cfg9346, Cfg9346_Unlock);
5127
2857ffb7
FR
5128 switch (tp->mac_version) {
5129 case RTL_GIGA_MAC_VER_07:
5130 rtl_hw_start_8102e_1(ioaddr, pdev);
5131 break;
5132
5133 case RTL_GIGA_MAC_VER_08:
5134 rtl_hw_start_8102e_3(ioaddr, pdev);
5135 break;
5136
5137 case RTL_GIGA_MAC_VER_09:
5138 rtl_hw_start_8102e_2(ioaddr, pdev);
5139 break;
5a5e4443
HW
5140
5141 case RTL_GIGA_MAC_VER_29:
5142 rtl_hw_start_8105e_1(ioaddr, pdev);
5143 break;
5144 case RTL_GIGA_MAC_VER_30:
5145 rtl_hw_start_8105e_2(ioaddr, pdev);
5146 break;
cdf1a608
FR
5147 }
5148
d24e9aaf 5149 RTL_W8(Cfg9346, Cfg9346_Lock);
cdf1a608 5150
f0298f81 5151 RTL_W8(MaxTxPacketSize, TxPacketMax);
cdf1a608 5152
6f0333b8 5153 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
cdf1a608 5154
d24e9aaf 5155 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
cdf1a608
FR
5156 RTL_W16(CPlusCmd, tp->cp_cmd);
5157
5158 RTL_W16(IntrMitigate, 0x0000);
5159
5160 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5161
5162 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5163 rtl_set_rx_tx_config_registers(tp);
5164
cdf1a608
FR
5165 RTL_R8(IntrMask);
5166
cdf1a608
FR
5167 rtl_set_rx_mode(dev);
5168
5169 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
1da177e4
LT
5170}
5171
5172static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5173{
d58d46b5
FR
5174 struct rtl8169_private *tp = netdev_priv(dev);
5175
5176 if (new_mtu < ETH_ZLEN ||
5177 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
1da177e4
LT
5178 return -EINVAL;
5179
d58d46b5
FR
5180 if (new_mtu > ETH_DATA_LEN)
5181 rtl_hw_jumbo_enable(tp);
5182 else
5183 rtl_hw_jumbo_disable(tp);
5184
1da177e4 5185 dev->mtu = new_mtu;
350fb32a
MM
5186 netdev_update_features(dev);
5187
323bb685 5188 return 0;
1da177e4
LT
5189}
5190
5191static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5192{
95e0918d 5193 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
1da177e4
LT
5194 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5195}
5196
6f0333b8
ED
5197static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5198 void **data_buff, struct RxDesc *desc)
1da177e4 5199{
48addcc9 5200 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
231aee63 5201 DMA_FROM_DEVICE);
48addcc9 5202
6f0333b8
ED
5203 kfree(*data_buff);
5204 *data_buff = NULL;
1da177e4
LT
5205 rtl8169_make_unusable_by_asic(desc);
5206}
5207
5208static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5209{
5210 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5211
5212 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5213}
5214
5215static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5216 u32 rx_buf_sz)
5217{
5218 desc->addr = cpu_to_le64(mapping);
5219 wmb();
5220 rtl8169_mark_to_asic(desc, rx_buf_sz);
5221}
5222
6f0333b8
ED
5223static inline void *rtl8169_align(void *data)
5224{
5225 return (void *)ALIGN((long)data, 16);
5226}
5227
0ecbe1ca
SG
5228static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5229 struct RxDesc *desc)
1da177e4 5230{
6f0333b8 5231 void *data;
1da177e4 5232 dma_addr_t mapping;
48addcc9 5233 struct device *d = &tp->pci_dev->dev;
0ecbe1ca 5234 struct net_device *dev = tp->dev;
6f0333b8 5235 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
1da177e4 5236
6f0333b8
ED
5237 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5238 if (!data)
5239 return NULL;
e9f63f30 5240
6f0333b8
ED
5241 if (rtl8169_align(data) != data) {
5242 kfree(data);
5243 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5244 if (!data)
5245 return NULL;
5246 }
3eafe507 5247
48addcc9 5248 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
231aee63 5249 DMA_FROM_DEVICE);
d827d86b
SG
5250 if (unlikely(dma_mapping_error(d, mapping))) {
5251 if (net_ratelimit())
5252 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
3eafe507 5253 goto err_out;
d827d86b 5254 }
1da177e4
LT
5255
5256 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
6f0333b8 5257 return data;
3eafe507
SG
5258
5259err_out:
5260 kfree(data);
5261 return NULL;
1da177e4
LT
5262}
5263
5264static void rtl8169_rx_clear(struct rtl8169_private *tp)
5265{
07d3f51f 5266 unsigned int i;
1da177e4
LT
5267
5268 for (i = 0; i < NUM_RX_DESC; i++) {
6f0333b8
ED
5269 if (tp->Rx_databuff[i]) {
5270 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
1da177e4
LT
5271 tp->RxDescArray + i);
5272 }
5273 }
5274}
5275
0ecbe1ca 5276static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
1da177e4 5277{
0ecbe1ca
SG
5278 desc->opts1 |= cpu_to_le32(RingEnd);
5279}
5b0384f4 5280
0ecbe1ca
SG
5281static int rtl8169_rx_fill(struct rtl8169_private *tp)
5282{
5283 unsigned int i;
1da177e4 5284
0ecbe1ca
SG
5285 for (i = 0; i < NUM_RX_DESC; i++) {
5286 void *data;
4ae47c2d 5287
6f0333b8 5288 if (tp->Rx_databuff[i])
1da177e4 5289 continue;
bcf0bf90 5290
0ecbe1ca 5291 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
6f0333b8
ED
5292 if (!data) {
5293 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
0ecbe1ca 5294 goto err_out;
6f0333b8
ED
5295 }
5296 tp->Rx_databuff[i] = data;
1da177e4 5297 }
1da177e4 5298
0ecbe1ca
SG
5299 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5300 return 0;
5301
5302err_out:
5303 rtl8169_rx_clear(tp);
5304 return -ENOMEM;
1da177e4
LT
5305}
5306
1da177e4
LT
5307static int rtl8169_init_ring(struct net_device *dev)
5308{
5309 struct rtl8169_private *tp = netdev_priv(dev);
5310
5311 rtl8169_init_ring_indexes(tp);
5312
5313 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
6f0333b8 5314 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
1da177e4 5315
0ecbe1ca 5316 return rtl8169_rx_fill(tp);
1da177e4
LT
5317}
5318
48addcc9 5319static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
1da177e4
LT
5320 struct TxDesc *desc)
5321{
5322 unsigned int len = tx_skb->len;
5323
48addcc9
SG
5324 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5325
1da177e4
LT
5326 desc->opts1 = 0x00;
5327 desc->opts2 = 0x00;
5328 desc->addr = 0x00;
5329 tx_skb->len = 0;
5330}
5331
3eafe507
SG
5332static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5333 unsigned int n)
1da177e4
LT
5334{
5335 unsigned int i;
5336
3eafe507
SG
5337 for (i = 0; i < n; i++) {
5338 unsigned int entry = (start + i) % NUM_TX_DESC;
1da177e4
LT
5339 struct ring_info *tx_skb = tp->tx_skb + entry;
5340 unsigned int len = tx_skb->len;
5341
5342 if (len) {
5343 struct sk_buff *skb = tx_skb->skb;
5344
48addcc9 5345 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
1da177e4
LT
5346 tp->TxDescArray + entry);
5347 if (skb) {
cac4b22f 5348 tp->dev->stats.tx_dropped++;
1da177e4
LT
5349 dev_kfree_skb(skb);
5350 tx_skb->skb = NULL;
5351 }
1da177e4
LT
5352 }
5353 }
3eafe507
SG
5354}
5355
5356static void rtl8169_tx_clear(struct rtl8169_private *tp)
5357{
5358 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
1da177e4
LT
5359 tp->cur_tx = tp->dirty_tx = 0;
5360}
5361
4422bcd4 5362static void rtl8169_schedule_work(struct net_device *dev)
1da177e4
LT
5363{
5364 struct rtl8169_private *tp = netdev_priv(dev);
5365
4422bcd4 5366 schedule_work(&tp->wk.work);
1da177e4
LT
5367}
5368
5369static void rtl8169_wait_for_quiescence(struct net_device *dev)
5370{
5371 struct rtl8169_private *tp = netdev_priv(dev);
5372 void __iomem *ioaddr = tp->mmio_addr;
5373
5374 synchronize_irq(dev->irq);
5375
5376 /* Wait for any pending NAPI task to complete */
bea3348e 5377 napi_disable(&tp->napi);
1da177e4 5378
811fd301 5379 rtl8169_irq_mask_and_ack(tp);
1da177e4 5380
d1d08d12
DM
5381 tp->intr_mask = 0xffff;
5382 RTL_W16(IntrMask, tp->intr_event);
bea3348e 5383 napi_enable(&tp->napi);
1da177e4
LT
5384}
5385
4422bcd4 5386static void rtl_reset_work(struct rtl8169_private *tp)
1da177e4 5387{
c4028958 5388 struct net_device *dev = tp->dev;
56de414c 5389 int i;
1da177e4 5390
eb2a021c
FR
5391 rtnl_lock();
5392
1da177e4 5393 if (!netif_running(dev))
eb2a021c 5394 goto out_unlock;
1da177e4 5395
c7c2c39b 5396 rtl8169_hw_reset(tp);
5397
1da177e4
LT
5398 rtl8169_wait_for_quiescence(dev);
5399
56de414c
FR
5400 for (i = 0; i < NUM_RX_DESC; i++)
5401 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5402
1da177e4 5403 rtl8169_tx_clear(tp);
c7c2c39b 5404 rtl8169_init_ring_indexes(tp);
1da177e4 5405
56de414c
FR
5406 rtl_hw_start(dev);
5407 netif_wake_queue(dev);
5408 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
eb2a021c
FR
5409
5410out_unlock:
5411 rtnl_unlock();
1da177e4
LT
5412}
5413
5414static void rtl8169_tx_timeout(struct net_device *dev)
5415{
4422bcd4 5416 rtl8169_schedule_work(dev);
1da177e4
LT
5417}
5418
5419static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2b7b4318 5420 u32 *opts)
1da177e4
LT
5421{
5422 struct skb_shared_info *info = skb_shinfo(skb);
5423 unsigned int cur_frag, entry;
a6343afb 5424 struct TxDesc * uninitialized_var(txd);
48addcc9 5425 struct device *d = &tp->pci_dev->dev;
1da177e4
LT
5426
5427 entry = tp->cur_tx;
5428 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
9e903e08 5429 const skb_frag_t *frag = info->frags + cur_frag;
1da177e4
LT
5430 dma_addr_t mapping;
5431 u32 status, len;
5432 void *addr;
5433
5434 entry = (entry + 1) % NUM_TX_DESC;
5435
5436 txd = tp->TxDescArray + entry;
9e903e08 5437 len = skb_frag_size(frag);
929f6189 5438 addr = skb_frag_address(frag);
48addcc9 5439 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
d827d86b
SG
5440 if (unlikely(dma_mapping_error(d, mapping))) {
5441 if (net_ratelimit())
5442 netif_err(tp, drv, tp->dev,
5443 "Failed to map TX fragments DMA!\n");
3eafe507 5444 goto err_out;
d827d86b 5445 }
1da177e4 5446
cecb5fd7 5447 /* Anti gcc 2.95.3 bugware (sic) */
2b7b4318
FR
5448 status = opts[0] | len |
5449 (RingEnd * !((entry + 1) % NUM_TX_DESC));
1da177e4
LT
5450
5451 txd->opts1 = cpu_to_le32(status);
2b7b4318 5452 txd->opts2 = cpu_to_le32(opts[1]);
1da177e4
LT
5453 txd->addr = cpu_to_le64(mapping);
5454
5455 tp->tx_skb[entry].len = len;
5456 }
5457
5458 if (cur_frag) {
5459 tp->tx_skb[entry].skb = skb;
5460 txd->opts1 |= cpu_to_le32(LastFrag);
5461 }
5462
5463 return cur_frag;
3eafe507
SG
5464
5465err_out:
5466 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5467 return -EIO;
1da177e4
LT
5468}
5469
2b7b4318
FR
5470static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5471 struct sk_buff *skb, u32 *opts)
1da177e4 5472{
2b7b4318 5473 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
350fb32a 5474 u32 mss = skb_shinfo(skb)->gso_size;
2b7b4318 5475 int offset = info->opts_offset;
350fb32a 5476
2b7b4318
FR
5477 if (mss) {
5478 opts[0] |= TD_LSO;
5479 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5480 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
eddc9ec5 5481 const struct iphdr *ip = ip_hdr(skb);
1da177e4
LT
5482
5483 if (ip->protocol == IPPROTO_TCP)
2b7b4318 5484 opts[offset] |= info->checksum.tcp;
1da177e4 5485 else if (ip->protocol == IPPROTO_UDP)
2b7b4318
FR
5486 opts[offset] |= info->checksum.udp;
5487 else
5488 WARN_ON_ONCE(1);
1da177e4 5489 }
1da177e4
LT
5490}
5491
61357325
SH
5492static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5493 struct net_device *dev)
1da177e4
LT
5494{
5495 struct rtl8169_private *tp = netdev_priv(dev);
3eafe507 5496 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
1da177e4
LT
5497 struct TxDesc *txd = tp->TxDescArray + entry;
5498 void __iomem *ioaddr = tp->mmio_addr;
48addcc9 5499 struct device *d = &tp->pci_dev->dev;
1da177e4
LT
5500 dma_addr_t mapping;
5501 u32 status, len;
2b7b4318 5502 u32 opts[2];
3eafe507 5503 int frags;
5b0384f4 5504
1da177e4 5505 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
bf82c189 5506 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
3eafe507 5507 goto err_stop_0;
1da177e4
LT
5508 }
5509
5510 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
3eafe507
SG
5511 goto err_stop_0;
5512
5513 len = skb_headlen(skb);
48addcc9 5514 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
d827d86b
SG
5515 if (unlikely(dma_mapping_error(d, mapping))) {
5516 if (net_ratelimit())
5517 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
3eafe507 5518 goto err_dma_0;
d827d86b 5519 }
3eafe507
SG
5520
5521 tp->tx_skb[entry].len = len;
5522 txd->addr = cpu_to_le64(mapping);
1da177e4 5523
2b7b4318
FR
5524 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5525 opts[0] = DescOwn;
1da177e4 5526
2b7b4318
FR
5527 rtl8169_tso_csum(tp, skb, opts);
5528
5529 frags = rtl8169_xmit_frags(tp, skb, opts);
3eafe507
SG
5530 if (frags < 0)
5531 goto err_dma_1;
5532 else if (frags)
2b7b4318 5533 opts[0] |= FirstFrag;
3eafe507 5534 else {
2b7b4318 5535 opts[0] |= FirstFrag | LastFrag;
1da177e4
LT
5536 tp->tx_skb[entry].skb = skb;
5537 }
5538
2b7b4318
FR
5539 txd->opts2 = cpu_to_le32(opts[1]);
5540
1da177e4
LT
5541 wmb();
5542
cecb5fd7 5543 /* Anti gcc 2.95.3 bugware (sic) */
2b7b4318 5544 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
1da177e4
LT
5545 txd->opts1 = cpu_to_le32(status);
5546
1da177e4
LT
5547 tp->cur_tx += frags + 1;
5548
4c020a96 5549 wmb();
1da177e4 5550
cecb5fd7 5551 RTL_W8(TxPoll, NPQ);
1da177e4
LT
5552
5553 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
5554 netif_stop_queue(dev);
1e874e04 5555 smp_mb();
1da177e4
LT
5556 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
5557 netif_wake_queue(dev);
5558 }
5559
61357325 5560 return NETDEV_TX_OK;
1da177e4 5561
3eafe507 5562err_dma_1:
48addcc9 5563 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
3eafe507
SG
5564err_dma_0:
5565 dev_kfree_skb(skb);
5566 dev->stats.tx_dropped++;
5567 return NETDEV_TX_OK;
5568
5569err_stop_0:
1da177e4 5570 netif_stop_queue(dev);
cebf8cc7 5571 dev->stats.tx_dropped++;
61357325 5572 return NETDEV_TX_BUSY;
1da177e4
LT
5573}
5574
5575static void rtl8169_pcierr_interrupt(struct net_device *dev)
5576{
5577 struct rtl8169_private *tp = netdev_priv(dev);
5578 struct pci_dev *pdev = tp->pci_dev;
1da177e4
LT
5579 u16 pci_status, pci_cmd;
5580
5581 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5582 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5583
bf82c189
JP
5584 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5585 pci_cmd, pci_status);
1da177e4
LT
5586
5587 /*
5588 * The recovery sequence below admits a very elaborated explanation:
5589 * - it seems to work;
d03902b8
FR
5590 * - I did not see what else could be done;
5591 * - it makes iop3xx happy.
1da177e4
LT
5592 *
5593 * Feel free to adjust to your needs.
5594 */
a27993f3 5595 if (pdev->broken_parity_status)
d03902b8
FR
5596 pci_cmd &= ~PCI_COMMAND_PARITY;
5597 else
5598 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5599
5600 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
5601
5602 pci_write_config_word(pdev, PCI_STATUS,
5603 pci_status & (PCI_STATUS_DETECTED_PARITY |
5604 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5605 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5606
5607 /* The infamous DAC f*ckup only happens at boot time */
5608 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
e6de30d6 5609 void __iomem *ioaddr = tp->mmio_addr;
5610
bf82c189 5611 netif_info(tp, intr, dev, "disabling PCI DAC\n");
1da177e4
LT
5612 tp->cp_cmd &= ~PCIDAC;
5613 RTL_W16(CPlusCmd, tp->cp_cmd);
5614 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
5615 }
5616
e6de30d6 5617 rtl8169_hw_reset(tp);
d03902b8 5618
4422bcd4 5619 rtl8169_schedule_work(dev);
1da177e4
LT
5620}
5621
07d3f51f
FR
5622static void rtl8169_tx_interrupt(struct net_device *dev,
5623 struct rtl8169_private *tp,
5624 void __iomem *ioaddr)
1da177e4
LT
5625{
5626 unsigned int dirty_tx, tx_left;
5627
1da177e4
LT
5628 dirty_tx = tp->dirty_tx;
5629 smp_rmb();
5630 tx_left = tp->cur_tx - dirty_tx;
5631
5632 while (tx_left > 0) {
5633 unsigned int entry = dirty_tx % NUM_TX_DESC;
5634 struct ring_info *tx_skb = tp->tx_skb + entry;
1da177e4
LT
5635 u32 status;
5636
5637 rmb();
5638 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5639 if (status & DescOwn)
5640 break;
5641
48addcc9
SG
5642 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5643 tp->TxDescArray + entry);
1da177e4 5644 if (status & LastFrag) {
cac4b22f
SG
5645 dev->stats.tx_packets++;
5646 dev->stats.tx_bytes += tx_skb->skb->len;
87433bfc 5647 dev_kfree_skb(tx_skb->skb);
1da177e4
LT
5648 tx_skb->skb = NULL;
5649 }
5650 dirty_tx++;
5651 tx_left--;
5652 }
5653
5654 if (tp->dirty_tx != dirty_tx) {
5655 tp->dirty_tx = dirty_tx;
1e874e04 5656 smp_mb();
1da177e4
LT
5657 if (netif_queue_stopped(dev) &&
5658 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
5659 netif_wake_queue(dev);
5660 }
d78ae2dc
FR
5661 /*
5662 * 8168 hack: TxPoll requests are lost when the Tx packets are
5663 * too close. Let's kick an extra TxPoll request when a burst
5664 * of start_xmit activity is detected (if it is not detected,
5665 * it is slow enough). -- FR
5666 */
d78ae2dc
FR
5667 if (tp->cur_tx != dirty_tx)
5668 RTL_W8(TxPoll, NPQ);
1da177e4
LT
5669 }
5670}
5671
126fa4b9
FR
5672static inline int rtl8169_fragmented_frame(u32 status)
5673{
5674 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5675}
5676
adea1ac7 5677static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
1da177e4 5678{
1da177e4
LT
5679 u32 status = opts1 & RxProtoMask;
5680
5681 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
d5d3ebe3 5682 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
1da177e4
LT
5683 skb->ip_summed = CHECKSUM_UNNECESSARY;
5684 else
bc8acf2c 5685 skb_checksum_none_assert(skb);
1da177e4
LT
5686}
5687
6f0333b8
ED
5688static struct sk_buff *rtl8169_try_rx_copy(void *data,
5689 struct rtl8169_private *tp,
5690 int pkt_size,
5691 dma_addr_t addr)
1da177e4 5692{
b449655f 5693 struct sk_buff *skb;
48addcc9 5694 struct device *d = &tp->pci_dev->dev;
b449655f 5695
6f0333b8 5696 data = rtl8169_align(data);
48addcc9 5697 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6f0333b8
ED
5698 prefetch(data);
5699 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5700 if (skb)
5701 memcpy(skb->data, data, pkt_size);
48addcc9
SG
5702 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5703
6f0333b8 5704 return skb;
1da177e4
LT
5705}
5706
07d3f51f
FR
5707static int rtl8169_rx_interrupt(struct net_device *dev,
5708 struct rtl8169_private *tp,
bea3348e 5709 void __iomem *ioaddr, u32 budget)
1da177e4
LT
5710{
5711 unsigned int cur_rx, rx_left;
6f0333b8 5712 unsigned int count;
1da177e4 5713
1da177e4
LT
5714 cur_rx = tp->cur_rx;
5715 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
865c652d 5716 rx_left = min(rx_left, budget);
1da177e4 5717
4dcb7d33 5718 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 5719 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 5720 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
5721 u32 status;
5722
5723 rmb();
e03f33af 5724 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
1da177e4
LT
5725
5726 if (status & DescOwn)
5727 break;
4dcb7d33 5728 if (unlikely(status & RxRES)) {
bf82c189
JP
5729 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5730 status);
cebf8cc7 5731 dev->stats.rx_errors++;
1da177e4 5732 if (status & (RxRWT | RxRUNT))
cebf8cc7 5733 dev->stats.rx_length_errors++;
1da177e4 5734 if (status & RxCRC)
cebf8cc7 5735 dev->stats.rx_crc_errors++;
9dccf611 5736 if (status & RxFOVF) {
4422bcd4 5737 rtl8169_schedule_work(dev);
cebf8cc7 5738 dev->stats.rx_fifo_errors++;
9dccf611 5739 }
6f0333b8 5740 rtl8169_mark_to_asic(desc, rx_buf_sz);
1da177e4 5741 } else {
6f0333b8 5742 struct sk_buff *skb;
b449655f 5743 dma_addr_t addr = le64_to_cpu(desc->addr);
deb9d93c 5744 int pkt_size = (status & 0x00003fff) - 4;
1da177e4 5745
126fa4b9
FR
5746 /*
5747 * The driver does not support incoming fragmented
5748 * frames. They are seen as a symptom of over-mtu
5749 * sized frames.
5750 */
5751 if (unlikely(rtl8169_fragmented_frame(status))) {
cebf8cc7
FR
5752 dev->stats.rx_dropped++;
5753 dev->stats.rx_length_errors++;
6f0333b8 5754 rtl8169_mark_to_asic(desc, rx_buf_sz);
4dcb7d33 5755 continue;
126fa4b9
FR
5756 }
5757
6f0333b8
ED
5758 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
5759 tp, pkt_size, addr);
5760 rtl8169_mark_to_asic(desc, rx_buf_sz);
5761 if (!skb) {
5762 dev->stats.rx_dropped++;
5763 continue;
1da177e4
LT
5764 }
5765
adea1ac7 5766 rtl8169_rx_csum(skb, status);
1da177e4
LT
5767 skb_put(skb, pkt_size);
5768 skb->protocol = eth_type_trans(skb, dev);
5769
7a8fc77b
FR
5770 rtl8169_rx_vlan_tag(desc, skb);
5771
56de414c 5772 napi_gro_receive(&tp->napi, skb);
1da177e4 5773
cebf8cc7
FR
5774 dev->stats.rx_bytes += pkt_size;
5775 dev->stats.rx_packets++;
1da177e4 5776 }
6dccd16b
FR
5777
5778 /* Work around for AMD plateform. */
95e0918d 5779 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
6dccd16b
FR
5780 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
5781 desc->opts2 = 0;
5782 cur_rx++;
5783 }
1da177e4
LT
5784 }
5785
5786 count = cur_rx - tp->cur_rx;
5787 tp->cur_rx = cur_rx;
5788
6f0333b8 5789 tp->dirty_rx += count;
1da177e4
LT
5790
5791 return count;
5792}
5793
07d3f51f 5794static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
1da177e4 5795{
07d3f51f 5796 struct net_device *dev = dev_instance;
1da177e4 5797 struct rtl8169_private *tp = netdev_priv(dev);
1da177e4 5798 void __iomem *ioaddr = tp->mmio_addr;
1da177e4 5799 int handled = 0;
9085cdfa 5800 u16 status;
1da177e4 5801
f11a377b
DD
5802 /* loop handling interrupts until we have no new ones or
5803 * we hit a invalid/hotplug case.
5804 */
9085cdfa 5805 status = rtl_get_events(tp);
f11a377b 5806 while (status && status != 0xffff) {
811fd301 5807 status &= tp->intr_event;
5808 if (!status)
5809 break;
5810
f11a377b 5811 handled = 1;
1da177e4 5812
f11a377b
DD
5813 /* Handle all of the error cases first. These will reset
5814 * the chip, so just exit the loop.
5815 */
5816 if (unlikely(!netif_running(dev))) {
92fc43b4 5817 rtl8169_hw_reset(tp);
f11a377b
DD
5818 break;
5819 }
1da177e4 5820
1519e57f
FR
5821 if (unlikely(status & RxFIFOOver)) {
5822 switch (tp->mac_version) {
5823 /* Work around for rx fifo overflow */
5824 case RTL_GIGA_MAC_VER_11:
1519e57f
FR
5825 netif_stop_queue(dev);
5826 rtl8169_tx_timeout(dev);
5827 goto done;
1519e57f
FR
5828 default:
5829 break;
5830 }
f11a377b 5831 }
1da177e4 5832
f11a377b
DD
5833 if (unlikely(status & SYSErr)) {
5834 rtl8169_pcierr_interrupt(dev);
5835 break;
5836 }
1da177e4 5837
f11a377b 5838 if (status & LinkChg)
e4fbce74 5839 __rtl8169_check_link_status(dev, tp, ioaddr, true);
0e485150 5840
f11a377b
DD
5841 /* We need to see the lastest version of tp->intr_mask to
5842 * avoid ignoring an MSI interrupt and having to wait for
5843 * another event which may never come.
5844 */
5845 smp_rmb();
5846 if (status & tp->intr_mask & tp->napi_event) {
5847 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
5848 tp->intr_mask = ~tp->napi_event;
5849
5850 if (likely(napi_schedule_prep(&tp->napi)))
5851 __napi_schedule(&tp->napi);
bf82c189
JP
5852 else
5853 netif_info(tp, intr, dev,
5854 "interrupt %04x in poll\n", status);
f11a377b 5855 }
1da177e4 5856
f11a377b
DD
5857 /* We only get a new MSI interrupt when all active irq
5858 * sources on the chip have been acknowledged. So, ack
5859 * everything we've seen and check if new sources have become
5860 * active to avoid blocking all interrupts from the chip.
5861 */
5862 RTL_W16(IntrStatus,
5863 (status & RxFIFOOver) ? (status | RxOverflow) : status);
9085cdfa 5864 status = rtl_get_events(tp);
865c652d 5865 }
1519e57f 5866done:
1da177e4
LT
5867 return IRQ_RETVAL(handled);
5868}
5869
4422bcd4
FR
5870static void rtl_task(struct work_struct *work)
5871{
5872 struct rtl8169_private *tp =
5873 container_of(work, struct rtl8169_private, wk.work);
5874
5875 rtl_reset_work(tp);
5876}
5877
bea3348e 5878static int rtl8169_poll(struct napi_struct *napi, int budget)
1da177e4 5879{
bea3348e
SH
5880 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5881 struct net_device *dev = tp->dev;
1da177e4 5882 void __iomem *ioaddr = tp->mmio_addr;
bea3348e 5883 int work_done;
1da177e4 5884
bea3348e 5885 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
1da177e4
LT
5886 rtl8169_tx_interrupt(dev, tp, ioaddr);
5887
bea3348e 5888 if (work_done < budget) {
288379f0 5889 napi_complete(napi);
f11a377b
DD
5890
5891 /* We need for force the visibility of tp->intr_mask
5892 * for other CPUs, as we can loose an MSI interrupt
5893 * and potentially wait for a retransmit timeout if we don't.
5894 * The posted write to IntrMask is safe, as it will
5895 * eventually make it to the chip and we won't loose anything
5896 * until it does.
1da177e4 5897 */
f11a377b 5898 tp->intr_mask = 0xffff;
4c020a96 5899 wmb();
0e485150 5900 RTL_W16(IntrMask, tp->intr_event);
1da177e4
LT
5901 }
5902
bea3348e 5903 return work_done;
1da177e4 5904}
1da177e4 5905
523a6094
FR
5906static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
5907{
5908 struct rtl8169_private *tp = netdev_priv(dev);
5909
5910 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
5911 return;
5912
5913 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
5914 RTL_W32(RxMissed, 0);
5915}
5916
1da177e4
LT
5917static void rtl8169_down(struct net_device *dev)
5918{
5919 struct rtl8169_private *tp = netdev_priv(dev);
5920 void __iomem *ioaddr = tp->mmio_addr;
1da177e4 5921
4876cc1e 5922 del_timer_sync(&tp->timer);
1da177e4
LT
5923
5924 netif_stop_queue(dev);
5925
93dd79e8 5926 napi_disable(&tp->napi);
93dd79e8 5927
1da177e4
LT
5928 spin_lock_irq(&tp->lock);
5929
92fc43b4 5930 rtl8169_hw_reset(tp);
323bb685
SG
5931 /*
5932 * At this point device interrupts can not be enabled in any function,
209e5ac8
FR
5933 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
5934 * and napi is disabled (rtl8169_poll).
323bb685 5935 */
523a6094 5936 rtl8169_rx_missed(dev, ioaddr);
1da177e4
LT
5937
5938 spin_unlock_irq(&tp->lock);
5939
5940 synchronize_irq(dev->irq);
5941
1da177e4 5942 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 5943 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4 5944
1da177e4
LT
5945 rtl8169_tx_clear(tp);
5946
5947 rtl8169_rx_clear(tp);
065c27c1 5948
5949 rtl_pll_power_down(tp);
1da177e4
LT
5950}
5951
5952static int rtl8169_close(struct net_device *dev)
5953{
5954 struct rtl8169_private *tp = netdev_priv(dev);
5955 struct pci_dev *pdev = tp->pci_dev;
5956
e1759441
RW
5957 pm_runtime_get_sync(&pdev->dev);
5958
cecb5fd7 5959 /* Update counters before going down */
355423d0
IV
5960 rtl8169_update_counters(dev);
5961
1da177e4
LT
5962 rtl8169_down(dev);
5963
5964 free_irq(dev->irq, dev);
5965
82553bb6
SG
5966 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5967 tp->RxPhyAddr);
5968 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5969 tp->TxPhyAddr);
1da177e4
LT
5970 tp->TxDescArray = NULL;
5971 tp->RxDescArray = NULL;
5972
e1759441
RW
5973 pm_runtime_put_sync(&pdev->dev);
5974
1da177e4
LT
5975 return 0;
5976}
5977
07ce4064 5978static void rtl_set_rx_mode(struct net_device *dev)
1da177e4
LT
5979{
5980 struct rtl8169_private *tp = netdev_priv(dev);
5981 void __iomem *ioaddr = tp->mmio_addr;
5982 unsigned long flags;
5983 u32 mc_filter[2]; /* Multicast hash filter */
07d3f51f 5984 int rx_mode;
1da177e4
LT
5985 u32 tmp = 0;
5986
5987 if (dev->flags & IFF_PROMISC) {
5988 /* Unconditionally log net taps. */
bf82c189 5989 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
1da177e4
LT
5990 rx_mode =
5991 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
5992 AcceptAllPhys;
5993 mc_filter[1] = mc_filter[0] = 0xffffffff;
4cd24eaf 5994 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
8e95a202 5995 (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
5996 /* Too many to filter perfectly -- accept all multicasts. */
5997 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
5998 mc_filter[1] = mc_filter[0] = 0xffffffff;
5999 } else {
22bedad3 6000 struct netdev_hw_addr *ha;
07d3f51f 6001
1da177e4
LT
6002 rx_mode = AcceptBroadcast | AcceptMyPhys;
6003 mc_filter[1] = mc_filter[0] = 0;
22bedad3
JP
6004 netdev_for_each_mc_addr(ha, dev) {
6005 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1da177e4
LT
6006 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
6007 rx_mode |= AcceptMulticast;
6008 }
6009 }
6010
6011 spin_lock_irqsave(&tp->lock, flags);
6012
1687b566 6013 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
1da177e4 6014
f887cce8 6015 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
1087f4f4
FR
6016 u32 data = mc_filter[0];
6017
6018 mc_filter[0] = swab32(mc_filter[1]);
6019 mc_filter[1] = swab32(data);
bcf0bf90
FR
6020 }
6021
1da177e4 6022 RTL_W32(MAR0 + 4, mc_filter[1]);
78f1cd02 6023 RTL_W32(MAR0 + 0, mc_filter[0]);
1da177e4 6024
57a9f236
FR
6025 RTL_W32(RxConfig, tmp);
6026
1da177e4
LT
6027 spin_unlock_irqrestore(&tp->lock, flags);
6028}
6029
6030/**
6031 * rtl8169_get_stats - Get rtl8169 read/write statistics
6032 * @dev: The Ethernet Device to get statistics for
6033 *
6034 * Get TX/RX statistics for rtl8169
6035 */
6036static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
6037{
6038 struct rtl8169_private *tp = netdev_priv(dev);
6039 void __iomem *ioaddr = tp->mmio_addr;
6040 unsigned long flags;
6041
6042 if (netif_running(dev)) {
6043 spin_lock_irqsave(&tp->lock, flags);
523a6094 6044 rtl8169_rx_missed(dev, ioaddr);
1da177e4
LT
6045 spin_unlock_irqrestore(&tp->lock, flags);
6046 }
5b0384f4 6047
cebf8cc7 6048 return &dev->stats;
1da177e4
LT
6049}
6050
861ab440 6051static void rtl8169_net_suspend(struct net_device *dev)
5d06a99f 6052{
065c27c1 6053 struct rtl8169_private *tp = netdev_priv(dev);
6054
5d06a99f 6055 if (!netif_running(dev))
861ab440 6056 return;
5d06a99f 6057
065c27c1 6058 rtl_pll_power_down(tp);
6059
5d06a99f
FR
6060 netif_device_detach(dev);
6061 netif_stop_queue(dev);
861ab440
RW
6062}
6063
6064#ifdef CONFIG_PM
6065
6066static int rtl8169_suspend(struct device *device)
6067{
6068 struct pci_dev *pdev = to_pci_dev(device);
6069 struct net_device *dev = pci_get_drvdata(pdev);
5d06a99f 6070
861ab440 6071 rtl8169_net_suspend(dev);
1371fa6d 6072
5d06a99f
FR
6073 return 0;
6074}
6075
e1759441
RW
6076static void __rtl8169_resume(struct net_device *dev)
6077{
065c27c1 6078 struct rtl8169_private *tp = netdev_priv(dev);
6079
e1759441 6080 netif_device_attach(dev);
065c27c1 6081
6082 rtl_pll_power_up(tp);
6083
4422bcd4 6084 rtl8169_schedule_work(dev);
e1759441
RW
6085}
6086
861ab440 6087static int rtl8169_resume(struct device *device)
5d06a99f 6088{
861ab440 6089 struct pci_dev *pdev = to_pci_dev(device);
5d06a99f 6090 struct net_device *dev = pci_get_drvdata(pdev);
fccec10b
SG
6091 struct rtl8169_private *tp = netdev_priv(dev);
6092
6093 rtl8169_init_phy(dev, tp);
5d06a99f 6094
e1759441
RW
6095 if (netif_running(dev))
6096 __rtl8169_resume(dev);
5d06a99f 6097
e1759441
RW
6098 return 0;
6099}
6100
6101static int rtl8169_runtime_suspend(struct device *device)
6102{
6103 struct pci_dev *pdev = to_pci_dev(device);
6104 struct net_device *dev = pci_get_drvdata(pdev);
6105 struct rtl8169_private *tp = netdev_priv(dev);
6106
6107 if (!tp->TxDescArray)
6108 return 0;
6109
6110 spin_lock_irq(&tp->lock);
6111 tp->saved_wolopts = __rtl8169_get_wol(tp);
6112 __rtl8169_set_wol(tp, WAKE_ANY);
6113 spin_unlock_irq(&tp->lock);
6114
6115 rtl8169_net_suspend(dev);
6116
6117 return 0;
6118}
6119
6120static int rtl8169_runtime_resume(struct device *device)
6121{
6122 struct pci_dev *pdev = to_pci_dev(device);
6123 struct net_device *dev = pci_get_drvdata(pdev);
6124 struct rtl8169_private *tp = netdev_priv(dev);
6125
6126 if (!tp->TxDescArray)
6127 return 0;
6128
6129 spin_lock_irq(&tp->lock);
6130 __rtl8169_set_wol(tp, tp->saved_wolopts);
6131 tp->saved_wolopts = 0;
6132 spin_unlock_irq(&tp->lock);
6133
fccec10b
SG
6134 rtl8169_init_phy(dev, tp);
6135
e1759441 6136 __rtl8169_resume(dev);
5d06a99f 6137
5d06a99f
FR
6138 return 0;
6139}
6140
e1759441
RW
6141static int rtl8169_runtime_idle(struct device *device)
6142{
6143 struct pci_dev *pdev = to_pci_dev(device);
6144 struct net_device *dev = pci_get_drvdata(pdev);
6145 struct rtl8169_private *tp = netdev_priv(dev);
6146
e4fbce74 6147 return tp->TxDescArray ? -EBUSY : 0;
e1759441
RW
6148}
6149
47145210 6150static const struct dev_pm_ops rtl8169_pm_ops = {
cecb5fd7
FR
6151 .suspend = rtl8169_suspend,
6152 .resume = rtl8169_resume,
6153 .freeze = rtl8169_suspend,
6154 .thaw = rtl8169_resume,
6155 .poweroff = rtl8169_suspend,
6156 .restore = rtl8169_resume,
6157 .runtime_suspend = rtl8169_runtime_suspend,
6158 .runtime_resume = rtl8169_runtime_resume,
6159 .runtime_idle = rtl8169_runtime_idle,
861ab440
RW
6160};
6161
6162#define RTL8169_PM_OPS (&rtl8169_pm_ops)
6163
6164#else /* !CONFIG_PM */
6165
6166#define RTL8169_PM_OPS NULL
6167
6168#endif /* !CONFIG_PM */
6169
649b3b8c 6170static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6171{
6172 void __iomem *ioaddr = tp->mmio_addr;
6173
6174 /* WoL fails with 8168b when the receiver is disabled. */
6175 switch (tp->mac_version) {
6176 case RTL_GIGA_MAC_VER_11:
6177 case RTL_GIGA_MAC_VER_12:
6178 case RTL_GIGA_MAC_VER_17:
6179 pci_clear_master(tp->pci_dev);
6180
6181 RTL_W8(ChipCmd, CmdRxEnb);
6182 /* PCI commit */
6183 RTL_R8(ChipCmd);
6184 break;
6185 default:
6186 break;
6187 }
6188}
6189
1765f95d
FR
6190static void rtl_shutdown(struct pci_dev *pdev)
6191{
861ab440 6192 struct net_device *dev = pci_get_drvdata(pdev);
4bb3f522 6193 struct rtl8169_private *tp = netdev_priv(dev);
861ab440
RW
6194
6195 rtl8169_net_suspend(dev);
1765f95d 6196
cecb5fd7 6197 /* Restore original MAC address */
cc098dc7
IV
6198 rtl_rar_set(tp, dev->perm_addr);
6199
4bb3f522 6200 spin_lock_irq(&tp->lock);
6201
92fc43b4 6202 rtl8169_hw_reset(tp);
4bb3f522 6203
6204 spin_unlock_irq(&tp->lock);
6205
861ab440 6206 if (system_state == SYSTEM_POWER_OFF) {
649b3b8c 6207 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6208 rtl_wol_suspend_quirk(tp);
6209 rtl_wol_shutdown_quirk(tp);
ca52efd5 6210 }
6211
861ab440
RW
6212 pci_wake_from_d3(pdev, true);
6213 pci_set_power_state(pdev, PCI_D3hot);
6214 }
6215}
5d06a99f 6216
1da177e4
LT
6217static struct pci_driver rtl8169_pci_driver = {
6218 .name = MODULENAME,
6219 .id_table = rtl8169_pci_tbl,
6220 .probe = rtl8169_init_one,
6221 .remove = __devexit_p(rtl8169_remove_one),
1765f95d 6222 .shutdown = rtl_shutdown,
861ab440 6223 .driver.pm = RTL8169_PM_OPS,
1da177e4
LT
6224};
6225
07d3f51f 6226static int __init rtl8169_init_module(void)
1da177e4 6227{
29917620 6228 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
6229}
6230
07d3f51f 6231static void __exit rtl8169_cleanup_module(void)
1da177e4
LT
6232{
6233 pci_unregister_driver(&rtl8169_pci_driver);
6234}
6235
6236module_init(rtl8169_init_module);
6237module_exit(rtl8169_cleanup_module);
This page took 1.605696 seconds and 5 git commands to generate.