d9bae307c14458e34ca654c9b40b0861789b22ad
[deliverable/linux.git] / drivers / net / ethernet / realtek / r8169.c
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
31
32 #include <asm/io.h>
33 #include <asm/irq.h>
34
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
38
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47
48 #ifdef RTL8169_DEBUG
49 #define assert(expr) \
50 if (!(expr)) { \
51 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
52 #expr,__FILE__,__func__,__LINE__); \
53 }
54 #define dprintk(fmt, args...) \
55 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
56 #else
57 #define assert(expr) do {} while (0)
58 #define dprintk(fmt, args...) do {} while (0)
59 #endif /* RTL8169_DEBUG */
60
61 #define R8169_MSG_DEFAULT \
62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
63
64 #define TX_BUFFS_AVAIL(tp) \
65 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
66
67 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
68 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
69 static const int multicast_filter_limit = 32;
70
71 #define MAX_READ_REQUEST_SHIFT 12
72 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
73 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
74 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
75
76 #define R8169_REGS_SIZE 256
77 #define R8169_NAPI_WEIGHT 64
78 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
79 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
80 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
81 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
82 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
83
84 #define RTL8169_TX_TIMEOUT (6*HZ)
85 #define RTL8169_PHY_TIMEOUT (10*HZ)
86
87 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
88 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
89 #define RTL_EEPROM_SIG_ADDR 0x0000
90
91 /* write/read MMIO register */
92 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
93 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
94 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
95 #define RTL_R8(reg) readb (ioaddr + (reg))
96 #define RTL_R16(reg) readw (ioaddr + (reg))
97 #define RTL_R32(reg) readl (ioaddr + (reg))
98
99 enum mac_version {
100 RTL_GIGA_MAC_VER_01 = 0,
101 RTL_GIGA_MAC_VER_02,
102 RTL_GIGA_MAC_VER_03,
103 RTL_GIGA_MAC_VER_04,
104 RTL_GIGA_MAC_VER_05,
105 RTL_GIGA_MAC_VER_06,
106 RTL_GIGA_MAC_VER_07,
107 RTL_GIGA_MAC_VER_08,
108 RTL_GIGA_MAC_VER_09,
109 RTL_GIGA_MAC_VER_10,
110 RTL_GIGA_MAC_VER_11,
111 RTL_GIGA_MAC_VER_12,
112 RTL_GIGA_MAC_VER_13,
113 RTL_GIGA_MAC_VER_14,
114 RTL_GIGA_MAC_VER_15,
115 RTL_GIGA_MAC_VER_16,
116 RTL_GIGA_MAC_VER_17,
117 RTL_GIGA_MAC_VER_18,
118 RTL_GIGA_MAC_VER_19,
119 RTL_GIGA_MAC_VER_20,
120 RTL_GIGA_MAC_VER_21,
121 RTL_GIGA_MAC_VER_22,
122 RTL_GIGA_MAC_VER_23,
123 RTL_GIGA_MAC_VER_24,
124 RTL_GIGA_MAC_VER_25,
125 RTL_GIGA_MAC_VER_26,
126 RTL_GIGA_MAC_VER_27,
127 RTL_GIGA_MAC_VER_28,
128 RTL_GIGA_MAC_VER_29,
129 RTL_GIGA_MAC_VER_30,
130 RTL_GIGA_MAC_VER_31,
131 RTL_GIGA_MAC_VER_32,
132 RTL_GIGA_MAC_VER_33,
133 RTL_GIGA_MAC_VER_34,
134 RTL_GIGA_MAC_VER_35,
135 RTL_GIGA_MAC_VER_36,
136 RTL_GIGA_MAC_NONE = 0xff,
137 };
138
139 enum rtl_tx_desc_version {
140 RTL_TD_0 = 0,
141 RTL_TD_1 = 1,
142 };
143
144 #define JUMBO_1K ETH_DATA_LEN
145 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
146 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
147 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
148 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
149
150 #define _R(NAME,TD,FW,SZ,B) { \
151 .name = NAME, \
152 .txd_version = TD, \
153 .fw_name = FW, \
154 .jumbo_max = SZ, \
155 .jumbo_tx_csum = B \
156 }
157
158 static const struct {
159 const char *name;
160 enum rtl_tx_desc_version txd_version;
161 const char *fw_name;
162 u16 jumbo_max;
163 bool jumbo_tx_csum;
164 } rtl_chip_infos[] = {
165 /* PCI devices. */
166 [RTL_GIGA_MAC_VER_01] =
167 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
168 [RTL_GIGA_MAC_VER_02] =
169 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
170 [RTL_GIGA_MAC_VER_03] =
171 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
172 [RTL_GIGA_MAC_VER_04] =
173 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
174 [RTL_GIGA_MAC_VER_05] =
175 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
176 [RTL_GIGA_MAC_VER_06] =
177 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
178 /* PCI-E devices. */
179 [RTL_GIGA_MAC_VER_07] =
180 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
181 [RTL_GIGA_MAC_VER_08] =
182 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
183 [RTL_GIGA_MAC_VER_09] =
184 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
185 [RTL_GIGA_MAC_VER_10] =
186 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
187 [RTL_GIGA_MAC_VER_11] =
188 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
189 [RTL_GIGA_MAC_VER_12] =
190 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
191 [RTL_GIGA_MAC_VER_13] =
192 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
193 [RTL_GIGA_MAC_VER_14] =
194 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
195 [RTL_GIGA_MAC_VER_15] =
196 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
197 [RTL_GIGA_MAC_VER_16] =
198 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
199 [RTL_GIGA_MAC_VER_17] =
200 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
201 [RTL_GIGA_MAC_VER_18] =
202 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
203 [RTL_GIGA_MAC_VER_19] =
204 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
205 [RTL_GIGA_MAC_VER_20] =
206 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
207 [RTL_GIGA_MAC_VER_21] =
208 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
209 [RTL_GIGA_MAC_VER_22] =
210 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
211 [RTL_GIGA_MAC_VER_23] =
212 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
213 [RTL_GIGA_MAC_VER_24] =
214 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
215 [RTL_GIGA_MAC_VER_25] =
216 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
217 JUMBO_9K, false),
218 [RTL_GIGA_MAC_VER_26] =
219 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
220 JUMBO_9K, false),
221 [RTL_GIGA_MAC_VER_27] =
222 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
223 [RTL_GIGA_MAC_VER_28] =
224 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
225 [RTL_GIGA_MAC_VER_29] =
226 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
227 JUMBO_1K, true),
228 [RTL_GIGA_MAC_VER_30] =
229 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
230 JUMBO_1K, true),
231 [RTL_GIGA_MAC_VER_31] =
232 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
233 [RTL_GIGA_MAC_VER_32] =
234 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
235 JUMBO_9K, false),
236 [RTL_GIGA_MAC_VER_33] =
237 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
238 JUMBO_9K, false),
239 [RTL_GIGA_MAC_VER_34] =
240 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
241 JUMBO_9K, false),
242 [RTL_GIGA_MAC_VER_35] =
243 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
244 JUMBO_9K, false),
245 [RTL_GIGA_MAC_VER_36] =
246 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
247 JUMBO_9K, false),
248 };
249 #undef _R
250
251 enum cfg_version {
252 RTL_CFG_0 = 0x00,
253 RTL_CFG_1,
254 RTL_CFG_2
255 };
256
257 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
258 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
259 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
260 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
261 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
262 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
263 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
264 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
265 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
266 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
267 { PCI_VENDOR_ID_LINKSYS, 0x1032,
268 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
269 { 0x0001, 0x8168,
270 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
271 {0,},
272 };
273
274 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
275
276 static int rx_buf_sz = 16383;
277 static int use_dac;
278 static struct {
279 u32 msg_enable;
280 } debug = { -1 };
281
282 enum rtl_registers {
283 MAC0 = 0, /* Ethernet hardware address. */
284 MAC4 = 4,
285 MAR0 = 8, /* Multicast filter. */
286 CounterAddrLow = 0x10,
287 CounterAddrHigh = 0x14,
288 TxDescStartAddrLow = 0x20,
289 TxDescStartAddrHigh = 0x24,
290 TxHDescStartAddrLow = 0x28,
291 TxHDescStartAddrHigh = 0x2c,
292 FLASH = 0x30,
293 ERSR = 0x36,
294 ChipCmd = 0x37,
295 TxPoll = 0x38,
296 IntrMask = 0x3c,
297 IntrStatus = 0x3e,
298
299 TxConfig = 0x40,
300 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
301 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
302
303 RxConfig = 0x44,
304 #define RX128_INT_EN (1 << 15) /* 8111c and later */
305 #define RX_MULTI_EN (1 << 14) /* 8111c only */
306 #define RXCFG_FIFO_SHIFT 13
307 /* No threshold before first PCI xfer */
308 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
309 #define RXCFG_DMA_SHIFT 8
310 /* Unlimited maximum PCI burst. */
311 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
312
313 RxMissed = 0x4c,
314 Cfg9346 = 0x50,
315 Config0 = 0x51,
316 Config1 = 0x52,
317 Config2 = 0x53,
318 #define PME_SIGNAL (1 << 5) /* 8168c and later */
319
320 Config3 = 0x54,
321 Config4 = 0x55,
322 Config5 = 0x56,
323 MultiIntr = 0x5c,
324 PHYAR = 0x60,
325 PHYstatus = 0x6c,
326 RxMaxSize = 0xda,
327 CPlusCmd = 0xe0,
328 IntrMitigate = 0xe2,
329 RxDescAddrLow = 0xe4,
330 RxDescAddrHigh = 0xe8,
331 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
332
333 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
334
335 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
336
337 #define TxPacketMax (8064 >> 7)
338 #define EarlySize 0x27
339
340 FuncEvent = 0xf0,
341 FuncEventMask = 0xf4,
342 FuncPresetState = 0xf8,
343 FuncForceEvent = 0xfc,
344 };
345
346 enum rtl8110_registers {
347 TBICSR = 0x64,
348 TBI_ANAR = 0x68,
349 TBI_LPAR = 0x6a,
350 };
351
352 enum rtl8168_8101_registers {
353 CSIDR = 0x64,
354 CSIAR = 0x68,
355 #define CSIAR_FLAG 0x80000000
356 #define CSIAR_WRITE_CMD 0x80000000
357 #define CSIAR_BYTE_ENABLE 0x0f
358 #define CSIAR_BYTE_ENABLE_SHIFT 12
359 #define CSIAR_ADDR_MASK 0x0fff
360 PMCH = 0x6f,
361 EPHYAR = 0x80,
362 #define EPHYAR_FLAG 0x80000000
363 #define EPHYAR_WRITE_CMD 0x80000000
364 #define EPHYAR_REG_MASK 0x1f
365 #define EPHYAR_REG_SHIFT 16
366 #define EPHYAR_DATA_MASK 0xffff
367 DLLPR = 0xd0,
368 #define PFM_EN (1 << 6)
369 DBG_REG = 0xd1,
370 #define FIX_NAK_1 (1 << 4)
371 #define FIX_NAK_2 (1 << 3)
372 TWSI = 0xd2,
373 MCU = 0xd3,
374 #define NOW_IS_OOB (1 << 7)
375 #define EN_NDP (1 << 3)
376 #define EN_OOB_RESET (1 << 2)
377 EFUSEAR = 0xdc,
378 #define EFUSEAR_FLAG 0x80000000
379 #define EFUSEAR_WRITE_CMD 0x80000000
380 #define EFUSEAR_READ_CMD 0x00000000
381 #define EFUSEAR_REG_MASK 0x03ff
382 #define EFUSEAR_REG_SHIFT 8
383 #define EFUSEAR_DATA_MASK 0xff
384 };
385
386 enum rtl8168_registers {
387 LED_FREQ = 0x1a,
388 EEE_LED = 0x1b,
389 ERIDR = 0x70,
390 ERIAR = 0x74,
391 #define ERIAR_FLAG 0x80000000
392 #define ERIAR_WRITE_CMD 0x80000000
393 #define ERIAR_READ_CMD 0x00000000
394 #define ERIAR_ADDR_BYTE_ALIGN 4
395 #define ERIAR_TYPE_SHIFT 16
396 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
397 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
398 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
399 #define ERIAR_MASK_SHIFT 12
400 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
401 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
402 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
403 EPHY_RXER_NUM = 0x7c,
404 OCPDR = 0xb0, /* OCP GPHY access */
405 #define OCPDR_WRITE_CMD 0x80000000
406 #define OCPDR_READ_CMD 0x00000000
407 #define OCPDR_REG_MASK 0x7f
408 #define OCPDR_GPHY_REG_SHIFT 16
409 #define OCPDR_DATA_MASK 0xffff
410 OCPAR = 0xb4,
411 #define OCPAR_FLAG 0x80000000
412 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
413 #define OCPAR_GPHY_READ_CMD 0x0000f060
414 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
415 MISC = 0xf0, /* 8168e only. */
416 #define TXPLA_RST (1 << 29)
417 #define PWM_EN (1 << 22)
418 };
419
420 enum rtl_register_content {
421 /* InterruptStatusBits */
422 SYSErr = 0x8000,
423 PCSTimeout = 0x4000,
424 SWInt = 0x0100,
425 TxDescUnavail = 0x0080,
426 RxFIFOOver = 0x0040,
427 LinkChg = 0x0020,
428 RxOverflow = 0x0010,
429 TxErr = 0x0008,
430 TxOK = 0x0004,
431 RxErr = 0x0002,
432 RxOK = 0x0001,
433
434 /* RxStatusDesc */
435 RxBOVF = (1 << 24),
436 RxFOVF = (1 << 23),
437 RxRWT = (1 << 22),
438 RxRES = (1 << 21),
439 RxRUNT = (1 << 20),
440 RxCRC = (1 << 19),
441
442 /* ChipCmdBits */
443 StopReq = 0x80,
444 CmdReset = 0x10,
445 CmdRxEnb = 0x08,
446 CmdTxEnb = 0x04,
447 RxBufEmpty = 0x01,
448
449 /* TXPoll register p.5 */
450 HPQ = 0x80, /* Poll cmd on the high prio queue */
451 NPQ = 0x40, /* Poll cmd on the low prio queue */
452 FSWInt = 0x01, /* Forced software interrupt */
453
454 /* Cfg9346Bits */
455 Cfg9346_Lock = 0x00,
456 Cfg9346_Unlock = 0xc0,
457
458 /* rx_mode_bits */
459 AcceptErr = 0x20,
460 AcceptRunt = 0x10,
461 AcceptBroadcast = 0x08,
462 AcceptMulticast = 0x04,
463 AcceptMyPhys = 0x02,
464 AcceptAllPhys = 0x01,
465 #define RX_CONFIG_ACCEPT_MASK 0x3f
466
467 /* TxConfigBits */
468 TxInterFrameGapShift = 24,
469 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
470
471 /* Config1 register p.24 */
472 LEDS1 = (1 << 7),
473 LEDS0 = (1 << 6),
474 Speed_down = (1 << 4),
475 MEMMAP = (1 << 3),
476 IOMAP = (1 << 2),
477 VPD = (1 << 1),
478 PMEnable = (1 << 0), /* Power Management Enable */
479
480 /* Config2 register p. 25 */
481 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
482 PCI_Clock_66MHz = 0x01,
483 PCI_Clock_33MHz = 0x00,
484
485 /* Config3 register p.25 */
486 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
487 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
488 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
489 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
490
491 /* Config4 register */
492 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
493
494 /* Config5 register p.27 */
495 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
496 MWF = (1 << 5), /* Accept Multicast wakeup frame */
497 UWF = (1 << 4), /* Accept Unicast wakeup frame */
498 Spi_en = (1 << 3),
499 LanWake = (1 << 1), /* LanWake enable/disable */
500 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
501
502 /* TBICSR p.28 */
503 TBIReset = 0x80000000,
504 TBILoopback = 0x40000000,
505 TBINwEnable = 0x20000000,
506 TBINwRestart = 0x10000000,
507 TBILinkOk = 0x02000000,
508 TBINwComplete = 0x01000000,
509
510 /* CPlusCmd p.31 */
511 EnableBist = (1 << 15), // 8168 8101
512 Mac_dbgo_oe = (1 << 14), // 8168 8101
513 Normal_mode = (1 << 13), // unused
514 Force_half_dup = (1 << 12), // 8168 8101
515 Force_rxflow_en = (1 << 11), // 8168 8101
516 Force_txflow_en = (1 << 10), // 8168 8101
517 Cxpl_dbg_sel = (1 << 9), // 8168 8101
518 ASF = (1 << 8), // 8168 8101
519 PktCntrDisable = (1 << 7), // 8168 8101
520 Mac_dbgo_sel = 0x001c, // 8168
521 RxVlan = (1 << 6),
522 RxChkSum = (1 << 5),
523 PCIDAC = (1 << 4),
524 PCIMulRW = (1 << 3),
525 INTT_0 = 0x0000, // 8168
526 INTT_1 = 0x0001, // 8168
527 INTT_2 = 0x0002, // 8168
528 INTT_3 = 0x0003, // 8168
529
530 /* rtl8169_PHYstatus */
531 TBI_Enable = 0x80,
532 TxFlowCtrl = 0x40,
533 RxFlowCtrl = 0x20,
534 _1000bpsF = 0x10,
535 _100bps = 0x08,
536 _10bps = 0x04,
537 LinkStatus = 0x02,
538 FullDup = 0x01,
539
540 /* _TBICSRBit */
541 TBILinkOK = 0x02000000,
542
543 /* DumpCounterCommand */
544 CounterDump = 0x8,
545 };
546
547 enum rtl_desc_bit {
548 /* First doubleword. */
549 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
550 RingEnd = (1 << 30), /* End of descriptor ring */
551 FirstFrag = (1 << 29), /* First segment of a packet */
552 LastFrag = (1 << 28), /* Final segment of a packet */
553 };
554
555 /* Generic case. */
556 enum rtl_tx_desc_bit {
557 /* First doubleword. */
558 TD_LSO = (1 << 27), /* Large Send Offload */
559 #define TD_MSS_MAX 0x07ffu /* MSS value */
560
561 /* Second doubleword. */
562 TxVlanTag = (1 << 17), /* Add VLAN tag */
563 };
564
565 /* 8169, 8168b and 810x except 8102e. */
566 enum rtl_tx_desc_bit_0 {
567 /* First doubleword. */
568 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
569 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
570 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
571 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
572 };
573
574 /* 8102e, 8168c and beyond. */
575 enum rtl_tx_desc_bit_1 {
576 /* Second doubleword. */
577 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
578 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
579 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
580 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
581 };
582
583 static const struct rtl_tx_desc_info {
584 struct {
585 u32 udp;
586 u32 tcp;
587 } checksum;
588 u16 mss_shift;
589 u16 opts_offset;
590 } tx_desc_info [] = {
591 [RTL_TD_0] = {
592 .checksum = {
593 .udp = TD0_IP_CS | TD0_UDP_CS,
594 .tcp = TD0_IP_CS | TD0_TCP_CS
595 },
596 .mss_shift = TD0_MSS_SHIFT,
597 .opts_offset = 0
598 },
599 [RTL_TD_1] = {
600 .checksum = {
601 .udp = TD1_IP_CS | TD1_UDP_CS,
602 .tcp = TD1_IP_CS | TD1_TCP_CS
603 },
604 .mss_shift = TD1_MSS_SHIFT,
605 .opts_offset = 1
606 }
607 };
608
609 enum rtl_rx_desc_bit {
610 /* Rx private */
611 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
612 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
613
614 #define RxProtoUDP (PID1)
615 #define RxProtoTCP (PID0)
616 #define RxProtoIP (PID1 | PID0)
617 #define RxProtoMask RxProtoIP
618
619 IPFail = (1 << 16), /* IP checksum failed */
620 UDPFail = (1 << 15), /* UDP/IP checksum failed */
621 TCPFail = (1 << 14), /* TCP/IP checksum failed */
622 RxVlanTag = (1 << 16), /* VLAN tag available */
623 };
624
625 #define RsvdMask 0x3fffc000
626
627 struct TxDesc {
628 __le32 opts1;
629 __le32 opts2;
630 __le64 addr;
631 };
632
633 struct RxDesc {
634 __le32 opts1;
635 __le32 opts2;
636 __le64 addr;
637 };
638
639 struct ring_info {
640 struct sk_buff *skb;
641 u32 len;
642 u8 __pad[sizeof(void *) - sizeof(u32)];
643 };
644
645 enum features {
646 RTL_FEATURE_WOL = (1 << 0),
647 RTL_FEATURE_MSI = (1 << 1),
648 RTL_FEATURE_GMII = (1 << 2),
649 };
650
651 struct rtl8169_counters {
652 __le64 tx_packets;
653 __le64 rx_packets;
654 __le64 tx_errors;
655 __le32 rx_errors;
656 __le16 rx_missed;
657 __le16 align_errors;
658 __le32 tx_one_collision;
659 __le32 tx_multi_collision;
660 __le64 rx_unicast;
661 __le64 rx_broadcast;
662 __le32 rx_multicast;
663 __le16 tx_aborted;
664 __le16 tx_underun;
665 };
666
667 enum rtl_flag {
668 RTL_FLAG_TASK_ENABLED,
669 RTL_FLAG_TASK_SLOW_PENDING,
670 RTL_FLAG_TASK_RESET_PENDING,
671 RTL_FLAG_TASK_PHY_PENDING,
672 RTL_FLAG_MAX
673 };
674
675 struct rtl8169_stats {
676 u64 packets;
677 u64 bytes;
678 struct u64_stats_sync syncp;
679 };
680
681 struct rtl8169_private {
682 void __iomem *mmio_addr; /* memory map physical address */
683 struct pci_dev *pci_dev;
684 struct net_device *dev;
685 struct napi_struct napi;
686 u32 msg_enable;
687 u16 txd_version;
688 u16 mac_version;
689 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
690 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
691 u32 dirty_rx;
692 u32 dirty_tx;
693 struct rtl8169_stats rx_stats;
694 struct rtl8169_stats tx_stats;
695 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
696 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
697 dma_addr_t TxPhyAddr;
698 dma_addr_t RxPhyAddr;
699 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
700 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
701 struct timer_list timer;
702 u16 cp_cmd;
703
704 u16 event_slow;
705
706 struct mdio_ops {
707 void (*write)(void __iomem *, int, int);
708 int (*read)(void __iomem *, int);
709 } mdio_ops;
710
711 struct pll_power_ops {
712 void (*down)(struct rtl8169_private *);
713 void (*up)(struct rtl8169_private *);
714 } pll_power_ops;
715
716 struct jumbo_ops {
717 void (*enable)(struct rtl8169_private *);
718 void (*disable)(struct rtl8169_private *);
719 } jumbo_ops;
720
721 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
722 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
723 void (*phy_reset_enable)(struct rtl8169_private *tp);
724 void (*hw_start)(struct net_device *);
725 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
726 unsigned int (*link_ok)(void __iomem *);
727 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
728
729 struct {
730 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
731 struct mutex mutex;
732 struct work_struct work;
733 } wk;
734
735 unsigned features;
736
737 struct mii_if_info mii;
738 struct rtl8169_counters counters;
739 u32 saved_wolopts;
740 u32 opts1_mask;
741
742 struct rtl_fw {
743 const struct firmware *fw;
744
745 #define RTL_VER_SIZE 32
746
747 char version[RTL_VER_SIZE];
748
749 struct rtl_fw_phy_action {
750 __le32 *code;
751 size_t size;
752 } phy_action;
753 } *rtl_fw;
754 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
755 };
756
757 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
758 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
759 module_param(use_dac, int, 0);
760 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
761 module_param_named(debug, debug.msg_enable, int, 0);
762 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
763 MODULE_LICENSE("GPL");
764 MODULE_VERSION(RTL8169_VERSION);
765 MODULE_FIRMWARE(FIRMWARE_8168D_1);
766 MODULE_FIRMWARE(FIRMWARE_8168D_2);
767 MODULE_FIRMWARE(FIRMWARE_8168E_1);
768 MODULE_FIRMWARE(FIRMWARE_8168E_2);
769 MODULE_FIRMWARE(FIRMWARE_8168E_3);
770 MODULE_FIRMWARE(FIRMWARE_8105E_1);
771 MODULE_FIRMWARE(FIRMWARE_8168F_1);
772 MODULE_FIRMWARE(FIRMWARE_8168F_2);
773
774 static void rtl_lock_work(struct rtl8169_private *tp)
775 {
776 mutex_lock(&tp->wk.mutex);
777 }
778
779 static void rtl_unlock_work(struct rtl8169_private *tp)
780 {
781 mutex_unlock(&tp->wk.mutex);
782 }
783
784 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
785 {
786 int cap = pci_pcie_cap(pdev);
787
788 if (cap) {
789 u16 ctl;
790
791 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
792 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
793 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
794 }
795 }
796
797 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
798 {
799 void __iomem *ioaddr = tp->mmio_addr;
800 int i;
801
802 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
803 for (i = 0; i < 20; i++) {
804 udelay(100);
805 if (RTL_R32(OCPAR) & OCPAR_FLAG)
806 break;
807 }
808 return RTL_R32(OCPDR);
809 }
810
811 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
812 {
813 void __iomem *ioaddr = tp->mmio_addr;
814 int i;
815
816 RTL_W32(OCPDR, data);
817 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
818 for (i = 0; i < 20; i++) {
819 udelay(100);
820 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
821 break;
822 }
823 }
824
825 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
826 {
827 void __iomem *ioaddr = tp->mmio_addr;
828 int i;
829
830 RTL_W8(ERIDR, cmd);
831 RTL_W32(ERIAR, 0x800010e8);
832 msleep(2);
833 for (i = 0; i < 5; i++) {
834 udelay(100);
835 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
836 break;
837 }
838
839 ocp_write(tp, 0x1, 0x30, 0x00000001);
840 }
841
842 #define OOB_CMD_RESET 0x00
843 #define OOB_CMD_DRIVER_START 0x05
844 #define OOB_CMD_DRIVER_STOP 0x06
845
846 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
847 {
848 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
849 }
850
851 static void rtl8168_driver_start(struct rtl8169_private *tp)
852 {
853 u16 reg;
854 int i;
855
856 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
857
858 reg = rtl8168_get_ocp_reg(tp);
859
860 for (i = 0; i < 10; i++) {
861 msleep(10);
862 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
863 break;
864 }
865 }
866
867 static void rtl8168_driver_stop(struct rtl8169_private *tp)
868 {
869 u16 reg;
870 int i;
871
872 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
873
874 reg = rtl8168_get_ocp_reg(tp);
875
876 for (i = 0; i < 10; i++) {
877 msleep(10);
878 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
879 break;
880 }
881 }
882
883 static int r8168dp_check_dash(struct rtl8169_private *tp)
884 {
885 u16 reg = rtl8168_get_ocp_reg(tp);
886
887 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
888 }
889
890 static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
891 {
892 int i;
893
894 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
895
896 for (i = 20; i > 0; i--) {
897 /*
898 * Check if the RTL8169 has completed writing to the specified
899 * MII register.
900 */
901 if (!(RTL_R32(PHYAR) & 0x80000000))
902 break;
903 udelay(25);
904 }
905 /*
906 * According to hardware specs a 20us delay is required after write
907 * complete indication, but before sending next command.
908 */
909 udelay(20);
910 }
911
912 static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
913 {
914 int i, value = -1;
915
916 RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
917
918 for (i = 20; i > 0; i--) {
919 /*
920 * Check if the RTL8169 has completed retrieving data from
921 * the specified MII register.
922 */
923 if (RTL_R32(PHYAR) & 0x80000000) {
924 value = RTL_R32(PHYAR) & 0xffff;
925 break;
926 }
927 udelay(25);
928 }
929 /*
930 * According to hardware specs a 20us delay is required after read
931 * complete indication, but before sending next command.
932 */
933 udelay(20);
934
935 return value;
936 }
937
938 static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
939 {
940 int i;
941
942 RTL_W32(OCPDR, data |
943 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
944 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
945 RTL_W32(EPHY_RXER_NUM, 0);
946
947 for (i = 0; i < 100; i++) {
948 mdelay(1);
949 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
950 break;
951 }
952 }
953
954 static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
955 {
956 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
957 (value & OCPDR_DATA_MASK));
958 }
959
960 static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
961 {
962 int i;
963
964 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
965
966 mdelay(1);
967 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
968 RTL_W32(EPHY_RXER_NUM, 0);
969
970 for (i = 0; i < 100; i++) {
971 mdelay(1);
972 if (RTL_R32(OCPAR) & OCPAR_FLAG)
973 break;
974 }
975
976 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
977 }
978
979 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
980
981 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
982 {
983 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
984 }
985
986 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
987 {
988 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
989 }
990
991 static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
992 {
993 r8168dp_2_mdio_start(ioaddr);
994
995 r8169_mdio_write(ioaddr, reg_addr, value);
996
997 r8168dp_2_mdio_stop(ioaddr);
998 }
999
1000 static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
1001 {
1002 int value;
1003
1004 r8168dp_2_mdio_start(ioaddr);
1005
1006 value = r8169_mdio_read(ioaddr, reg_addr);
1007
1008 r8168dp_2_mdio_stop(ioaddr);
1009
1010 return value;
1011 }
1012
1013 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1014 {
1015 tp->mdio_ops.write(tp->mmio_addr, location, val);
1016 }
1017
1018 static int rtl_readphy(struct rtl8169_private *tp, int location)
1019 {
1020 return tp->mdio_ops.read(tp->mmio_addr, location);
1021 }
1022
1023 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1024 {
1025 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1026 }
1027
1028 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1029 {
1030 int val;
1031
1032 val = rtl_readphy(tp, reg_addr);
1033 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1034 }
1035
1036 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1037 int val)
1038 {
1039 struct rtl8169_private *tp = netdev_priv(dev);
1040
1041 rtl_writephy(tp, location, val);
1042 }
1043
1044 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1045 {
1046 struct rtl8169_private *tp = netdev_priv(dev);
1047
1048 return rtl_readphy(tp, location);
1049 }
1050
1051 static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
1052 {
1053 unsigned int i;
1054
1055 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1056 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1057
1058 for (i = 0; i < 100; i++) {
1059 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
1060 break;
1061 udelay(10);
1062 }
1063 }
1064
1065 static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1066 {
1067 u16 value = 0xffff;
1068 unsigned int i;
1069
1070 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1071
1072 for (i = 0; i < 100; i++) {
1073 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
1074 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1075 break;
1076 }
1077 udelay(10);
1078 }
1079
1080 return value;
1081 }
1082
1083 static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
1084 {
1085 unsigned int i;
1086
1087 RTL_W32(CSIDR, value);
1088 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
1089 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1090
1091 for (i = 0; i < 100; i++) {
1092 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
1093 break;
1094 udelay(10);
1095 }
1096 }
1097
1098 static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
1099 {
1100 u32 value = ~0x00;
1101 unsigned int i;
1102
1103 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
1104 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1105
1106 for (i = 0; i < 100; i++) {
1107 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
1108 value = RTL_R32(CSIDR);
1109 break;
1110 }
1111 udelay(10);
1112 }
1113
1114 return value;
1115 }
1116
1117 static
1118 void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1119 {
1120 unsigned int i;
1121
1122 BUG_ON((addr & 3) || (mask == 0));
1123 RTL_W32(ERIDR, val);
1124 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1125
1126 for (i = 0; i < 100; i++) {
1127 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1128 break;
1129 udelay(100);
1130 }
1131 }
1132
1133 static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
1134 {
1135 u32 value = ~0x00;
1136 unsigned int i;
1137
1138 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1139
1140 for (i = 0; i < 100; i++) {
1141 if (RTL_R32(ERIAR) & ERIAR_FLAG) {
1142 value = RTL_R32(ERIDR);
1143 break;
1144 }
1145 udelay(100);
1146 }
1147
1148 return value;
1149 }
1150
1151 static void
1152 rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
1153 {
1154 u32 val;
1155
1156 val = rtl_eri_read(ioaddr, addr, type);
1157 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
1158 }
1159
1160 struct exgmac_reg {
1161 u16 addr;
1162 u16 mask;
1163 u32 val;
1164 };
1165
1166 static void rtl_write_exgmac_batch(void __iomem *ioaddr,
1167 const struct exgmac_reg *r, int len)
1168 {
1169 while (len-- > 0) {
1170 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1171 r++;
1172 }
1173 }
1174
1175 static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1176 {
1177 u8 value = 0xff;
1178 unsigned int i;
1179
1180 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1181
1182 for (i = 0; i < 300; i++) {
1183 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
1184 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
1185 break;
1186 }
1187 udelay(100);
1188 }
1189
1190 return value;
1191 }
1192
1193 static u16 rtl_get_events(struct rtl8169_private *tp)
1194 {
1195 void __iomem *ioaddr = tp->mmio_addr;
1196
1197 return RTL_R16(IntrStatus);
1198 }
1199
1200 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1201 {
1202 void __iomem *ioaddr = tp->mmio_addr;
1203
1204 RTL_W16(IntrStatus, bits);
1205 mmiowb();
1206 }
1207
1208 static void rtl_irq_disable(struct rtl8169_private *tp)
1209 {
1210 void __iomem *ioaddr = tp->mmio_addr;
1211
1212 RTL_W16(IntrMask, 0);
1213 mmiowb();
1214 }
1215
1216 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1217 {
1218 void __iomem *ioaddr = tp->mmio_addr;
1219
1220 RTL_W16(IntrMask, bits);
1221 }
1222
1223 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1224 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1225 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1226
1227 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1228 {
1229 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1230 }
1231
1232 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1233 {
1234 void __iomem *ioaddr = tp->mmio_addr;
1235
1236 rtl_irq_disable(tp);
1237 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1238 RTL_R8(ChipCmd);
1239 }
1240
1241 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1242 {
1243 void __iomem *ioaddr = tp->mmio_addr;
1244
1245 return RTL_R32(TBICSR) & TBIReset;
1246 }
1247
1248 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1249 {
1250 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1251 }
1252
1253 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1254 {
1255 return RTL_R32(TBICSR) & TBILinkOk;
1256 }
1257
1258 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1259 {
1260 return RTL_R8(PHYstatus) & LinkStatus;
1261 }
1262
1263 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1264 {
1265 void __iomem *ioaddr = tp->mmio_addr;
1266
1267 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1268 }
1269
1270 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1271 {
1272 unsigned int val;
1273
1274 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1275 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1276 }
1277
1278 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1279 {
1280 void __iomem *ioaddr = tp->mmio_addr;
1281 struct net_device *dev = tp->dev;
1282
1283 if (!netif_running(dev))
1284 return;
1285
1286 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
1287 if (RTL_R8(PHYstatus) & _1000bpsF) {
1288 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1289 0x00000011, ERIAR_EXGMAC);
1290 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1291 0x00000005, ERIAR_EXGMAC);
1292 } else if (RTL_R8(PHYstatus) & _100bps) {
1293 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1294 0x0000001f, ERIAR_EXGMAC);
1295 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1296 0x00000005, ERIAR_EXGMAC);
1297 } else {
1298 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1299 0x0000001f, ERIAR_EXGMAC);
1300 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1301 0x0000003f, ERIAR_EXGMAC);
1302 }
1303 /* Reset packet filter */
1304 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1305 ERIAR_EXGMAC);
1306 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1307 ERIAR_EXGMAC);
1308 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1309 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1310 if (RTL_R8(PHYstatus) & _1000bpsF) {
1311 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1312 0x00000011, ERIAR_EXGMAC);
1313 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1314 0x00000005, ERIAR_EXGMAC);
1315 } else {
1316 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1317 0x0000001f, ERIAR_EXGMAC);
1318 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1319 0x0000003f, ERIAR_EXGMAC);
1320 }
1321 }
1322 }
1323
1324 static void __rtl8169_check_link_status(struct net_device *dev,
1325 struct rtl8169_private *tp,
1326 void __iomem *ioaddr, bool pm)
1327 {
1328 if (tp->link_ok(ioaddr)) {
1329 rtl_link_chg_patch(tp);
1330 /* This is to cancel a scheduled suspend if there's one. */
1331 if (pm)
1332 pm_request_resume(&tp->pci_dev->dev);
1333 netif_carrier_on(dev);
1334 if (net_ratelimit())
1335 netif_info(tp, ifup, dev, "link up\n");
1336 } else {
1337 netif_carrier_off(dev);
1338 netif_info(tp, ifdown, dev, "link down\n");
1339 if (pm)
1340 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1341 }
1342 }
1343
1344 static void rtl8169_check_link_status(struct net_device *dev,
1345 struct rtl8169_private *tp,
1346 void __iomem *ioaddr)
1347 {
1348 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1349 }
1350
1351 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1352
1353 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1354 {
1355 void __iomem *ioaddr = tp->mmio_addr;
1356 u8 options;
1357 u32 wolopts = 0;
1358
1359 options = RTL_R8(Config1);
1360 if (!(options & PMEnable))
1361 return 0;
1362
1363 options = RTL_R8(Config3);
1364 if (options & LinkUp)
1365 wolopts |= WAKE_PHY;
1366 if (options & MagicPacket)
1367 wolopts |= WAKE_MAGIC;
1368
1369 options = RTL_R8(Config5);
1370 if (options & UWF)
1371 wolopts |= WAKE_UCAST;
1372 if (options & BWF)
1373 wolopts |= WAKE_BCAST;
1374 if (options & MWF)
1375 wolopts |= WAKE_MCAST;
1376
1377 return wolopts;
1378 }
1379
1380 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1381 {
1382 struct rtl8169_private *tp = netdev_priv(dev);
1383
1384 rtl_lock_work(tp);
1385
1386 wol->supported = WAKE_ANY;
1387 wol->wolopts = __rtl8169_get_wol(tp);
1388
1389 rtl_unlock_work(tp);
1390 }
1391
1392 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1393 {
1394 void __iomem *ioaddr = tp->mmio_addr;
1395 unsigned int i;
1396 static const struct {
1397 u32 opt;
1398 u16 reg;
1399 u8 mask;
1400 } cfg[] = {
1401 { WAKE_PHY, Config3, LinkUp },
1402 { WAKE_MAGIC, Config3, MagicPacket },
1403 { WAKE_UCAST, Config5, UWF },
1404 { WAKE_BCAST, Config5, BWF },
1405 { WAKE_MCAST, Config5, MWF },
1406 { WAKE_ANY, Config5, LanWake }
1407 };
1408 u8 options;
1409
1410 RTL_W8(Cfg9346, Cfg9346_Unlock);
1411
1412 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1413 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1414 if (wolopts & cfg[i].opt)
1415 options |= cfg[i].mask;
1416 RTL_W8(cfg[i].reg, options);
1417 }
1418
1419 switch (tp->mac_version) {
1420 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1421 options = RTL_R8(Config1) & ~PMEnable;
1422 if (wolopts)
1423 options |= PMEnable;
1424 RTL_W8(Config1, options);
1425 break;
1426 default:
1427 options = RTL_R8(Config2) & ~PME_SIGNAL;
1428 if (wolopts)
1429 options |= PME_SIGNAL;
1430 RTL_W8(Config2, options);
1431 break;
1432 }
1433
1434 RTL_W8(Cfg9346, Cfg9346_Lock);
1435 }
1436
1437 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1438 {
1439 struct rtl8169_private *tp = netdev_priv(dev);
1440
1441 rtl_lock_work(tp);
1442
1443 if (wol->wolopts)
1444 tp->features |= RTL_FEATURE_WOL;
1445 else
1446 tp->features &= ~RTL_FEATURE_WOL;
1447 __rtl8169_set_wol(tp, wol->wolopts);
1448
1449 rtl_unlock_work(tp);
1450
1451 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1452
1453 return 0;
1454 }
1455
1456 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1457 {
1458 return rtl_chip_infos[tp->mac_version].fw_name;
1459 }
1460
1461 static void rtl8169_get_drvinfo(struct net_device *dev,
1462 struct ethtool_drvinfo *info)
1463 {
1464 struct rtl8169_private *tp = netdev_priv(dev);
1465 struct rtl_fw *rtl_fw = tp->rtl_fw;
1466
1467 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1468 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1469 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1470 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1471 if (!IS_ERR_OR_NULL(rtl_fw))
1472 strlcpy(info->fw_version, rtl_fw->version,
1473 sizeof(info->fw_version));
1474 }
1475
1476 static int rtl8169_get_regs_len(struct net_device *dev)
1477 {
1478 return R8169_REGS_SIZE;
1479 }
1480
1481 static int rtl8169_set_speed_tbi(struct net_device *dev,
1482 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1483 {
1484 struct rtl8169_private *tp = netdev_priv(dev);
1485 void __iomem *ioaddr = tp->mmio_addr;
1486 int ret = 0;
1487 u32 reg;
1488
1489 reg = RTL_R32(TBICSR);
1490 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1491 (duplex == DUPLEX_FULL)) {
1492 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1493 } else if (autoneg == AUTONEG_ENABLE)
1494 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1495 else {
1496 netif_warn(tp, link, dev,
1497 "incorrect speed setting refused in TBI mode\n");
1498 ret = -EOPNOTSUPP;
1499 }
1500
1501 return ret;
1502 }
1503
1504 static int rtl8169_set_speed_xmii(struct net_device *dev,
1505 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1506 {
1507 struct rtl8169_private *tp = netdev_priv(dev);
1508 int giga_ctrl, bmcr;
1509 int rc = -EINVAL;
1510
1511 rtl_writephy(tp, 0x1f, 0x0000);
1512
1513 if (autoneg == AUTONEG_ENABLE) {
1514 int auto_nego;
1515
1516 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1517 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1518 ADVERTISE_100HALF | ADVERTISE_100FULL);
1519
1520 if (adv & ADVERTISED_10baseT_Half)
1521 auto_nego |= ADVERTISE_10HALF;
1522 if (adv & ADVERTISED_10baseT_Full)
1523 auto_nego |= ADVERTISE_10FULL;
1524 if (adv & ADVERTISED_100baseT_Half)
1525 auto_nego |= ADVERTISE_100HALF;
1526 if (adv & ADVERTISED_100baseT_Full)
1527 auto_nego |= ADVERTISE_100FULL;
1528
1529 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1530
1531 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1532 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1533
1534 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1535 if (tp->mii.supports_gmii) {
1536 if (adv & ADVERTISED_1000baseT_Half)
1537 giga_ctrl |= ADVERTISE_1000HALF;
1538 if (adv & ADVERTISED_1000baseT_Full)
1539 giga_ctrl |= ADVERTISE_1000FULL;
1540 } else if (adv & (ADVERTISED_1000baseT_Half |
1541 ADVERTISED_1000baseT_Full)) {
1542 netif_info(tp, link, dev,
1543 "PHY does not support 1000Mbps\n");
1544 goto out;
1545 }
1546
1547 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1548
1549 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1550 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1551 } else {
1552 giga_ctrl = 0;
1553
1554 if (speed == SPEED_10)
1555 bmcr = 0;
1556 else if (speed == SPEED_100)
1557 bmcr = BMCR_SPEED100;
1558 else
1559 goto out;
1560
1561 if (duplex == DUPLEX_FULL)
1562 bmcr |= BMCR_FULLDPLX;
1563 }
1564
1565 rtl_writephy(tp, MII_BMCR, bmcr);
1566
1567 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1568 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1569 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1570 rtl_writephy(tp, 0x17, 0x2138);
1571 rtl_writephy(tp, 0x0e, 0x0260);
1572 } else {
1573 rtl_writephy(tp, 0x17, 0x2108);
1574 rtl_writephy(tp, 0x0e, 0x0000);
1575 }
1576 }
1577
1578 rc = 0;
1579 out:
1580 return rc;
1581 }
1582
1583 static int rtl8169_set_speed(struct net_device *dev,
1584 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1585 {
1586 struct rtl8169_private *tp = netdev_priv(dev);
1587 int ret;
1588
1589 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1590 if (ret < 0)
1591 goto out;
1592
1593 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1594 (advertising & ADVERTISED_1000baseT_Full)) {
1595 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1596 }
1597 out:
1598 return ret;
1599 }
1600
1601 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1602 {
1603 struct rtl8169_private *tp = netdev_priv(dev);
1604 int ret;
1605
1606 del_timer_sync(&tp->timer);
1607
1608 rtl_lock_work(tp);
1609 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1610 cmd->duplex, cmd->advertising);
1611 rtl_unlock_work(tp);
1612
1613 return ret;
1614 }
1615
1616 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1617 netdev_features_t features)
1618 {
1619 struct rtl8169_private *tp = netdev_priv(dev);
1620
1621 if (dev->mtu > TD_MSS_MAX)
1622 features &= ~NETIF_F_ALL_TSO;
1623
1624 if (dev->mtu > JUMBO_1K &&
1625 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1626 features &= ~NETIF_F_IP_CSUM;
1627
1628 return features;
1629 }
1630
1631 static void __rtl8169_set_features(struct net_device *dev,
1632 netdev_features_t features)
1633 {
1634 struct rtl8169_private *tp = netdev_priv(dev);
1635 netdev_features_t changed = features ^ dev->features;
1636 void __iomem *ioaddr = tp->mmio_addr;
1637
1638 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1639 return;
1640
1641 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1642 if (features & NETIF_F_RXCSUM)
1643 tp->cp_cmd |= RxChkSum;
1644 else
1645 tp->cp_cmd &= ~RxChkSum;
1646
1647 if (dev->features & NETIF_F_HW_VLAN_RX)
1648 tp->cp_cmd |= RxVlan;
1649 else
1650 tp->cp_cmd &= ~RxVlan;
1651
1652 RTL_W16(CPlusCmd, tp->cp_cmd);
1653 RTL_R16(CPlusCmd);
1654 }
1655 if (changed & NETIF_F_RXALL) {
1656 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1657 if (features & NETIF_F_RXALL)
1658 tmp |= (AcceptErr | AcceptRunt);
1659 RTL_W32(RxConfig, tmp);
1660 }
1661 }
1662
1663 static int rtl8169_set_features(struct net_device *dev,
1664 netdev_features_t features)
1665 {
1666 struct rtl8169_private *tp = netdev_priv(dev);
1667
1668 rtl_lock_work(tp);
1669 __rtl8169_set_features(dev, features);
1670 rtl_unlock_work(tp);
1671
1672 return 0;
1673 }
1674
1675
1676 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1677 struct sk_buff *skb)
1678 {
1679 return (vlan_tx_tag_present(skb)) ?
1680 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1681 }
1682
1683 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1684 {
1685 u32 opts2 = le32_to_cpu(desc->opts2);
1686
1687 if (opts2 & RxVlanTag)
1688 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1689
1690 desc->opts2 = 0;
1691 }
1692
1693 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1694 {
1695 struct rtl8169_private *tp = netdev_priv(dev);
1696 void __iomem *ioaddr = tp->mmio_addr;
1697 u32 status;
1698
1699 cmd->supported =
1700 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1701 cmd->port = PORT_FIBRE;
1702 cmd->transceiver = XCVR_INTERNAL;
1703
1704 status = RTL_R32(TBICSR);
1705 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1706 cmd->autoneg = !!(status & TBINwEnable);
1707
1708 ethtool_cmd_speed_set(cmd, SPEED_1000);
1709 cmd->duplex = DUPLEX_FULL; /* Always set */
1710
1711 return 0;
1712 }
1713
1714 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1715 {
1716 struct rtl8169_private *tp = netdev_priv(dev);
1717
1718 return mii_ethtool_gset(&tp->mii, cmd);
1719 }
1720
1721 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1722 {
1723 struct rtl8169_private *tp = netdev_priv(dev);
1724 int rc;
1725
1726 rtl_lock_work(tp);
1727 rc = tp->get_settings(dev, cmd);
1728 rtl_unlock_work(tp);
1729
1730 return rc;
1731 }
1732
1733 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1734 void *p)
1735 {
1736 struct rtl8169_private *tp = netdev_priv(dev);
1737
1738 if (regs->len > R8169_REGS_SIZE)
1739 regs->len = R8169_REGS_SIZE;
1740
1741 rtl_lock_work(tp);
1742 memcpy_fromio(p, tp->mmio_addr, regs->len);
1743 rtl_unlock_work(tp);
1744 }
1745
1746 static u32 rtl8169_get_msglevel(struct net_device *dev)
1747 {
1748 struct rtl8169_private *tp = netdev_priv(dev);
1749
1750 return tp->msg_enable;
1751 }
1752
1753 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1754 {
1755 struct rtl8169_private *tp = netdev_priv(dev);
1756
1757 tp->msg_enable = value;
1758 }
1759
1760 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1761 "tx_packets",
1762 "rx_packets",
1763 "tx_errors",
1764 "rx_errors",
1765 "rx_missed",
1766 "align_errors",
1767 "tx_single_collisions",
1768 "tx_multi_collisions",
1769 "unicast",
1770 "broadcast",
1771 "multicast",
1772 "tx_aborted",
1773 "tx_underrun",
1774 };
1775
1776 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1777 {
1778 switch (sset) {
1779 case ETH_SS_STATS:
1780 return ARRAY_SIZE(rtl8169_gstrings);
1781 default:
1782 return -EOPNOTSUPP;
1783 }
1784 }
1785
1786 static void rtl8169_update_counters(struct net_device *dev)
1787 {
1788 struct rtl8169_private *tp = netdev_priv(dev);
1789 void __iomem *ioaddr = tp->mmio_addr;
1790 struct device *d = &tp->pci_dev->dev;
1791 struct rtl8169_counters *counters;
1792 dma_addr_t paddr;
1793 u32 cmd;
1794 int wait = 1000;
1795
1796 /*
1797 * Some chips are unable to dump tally counters when the receiver
1798 * is disabled.
1799 */
1800 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1801 return;
1802
1803 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1804 if (!counters)
1805 return;
1806
1807 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1808 cmd = (u64)paddr & DMA_BIT_MASK(32);
1809 RTL_W32(CounterAddrLow, cmd);
1810 RTL_W32(CounterAddrLow, cmd | CounterDump);
1811
1812 while (wait--) {
1813 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1814 memcpy(&tp->counters, counters, sizeof(*counters));
1815 break;
1816 }
1817 udelay(10);
1818 }
1819
1820 RTL_W32(CounterAddrLow, 0);
1821 RTL_W32(CounterAddrHigh, 0);
1822
1823 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1824 }
1825
1826 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1827 struct ethtool_stats *stats, u64 *data)
1828 {
1829 struct rtl8169_private *tp = netdev_priv(dev);
1830
1831 ASSERT_RTNL();
1832
1833 rtl8169_update_counters(dev);
1834
1835 data[0] = le64_to_cpu(tp->counters.tx_packets);
1836 data[1] = le64_to_cpu(tp->counters.rx_packets);
1837 data[2] = le64_to_cpu(tp->counters.tx_errors);
1838 data[3] = le32_to_cpu(tp->counters.rx_errors);
1839 data[4] = le16_to_cpu(tp->counters.rx_missed);
1840 data[5] = le16_to_cpu(tp->counters.align_errors);
1841 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1842 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1843 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1844 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1845 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1846 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1847 data[12] = le16_to_cpu(tp->counters.tx_underun);
1848 }
1849
1850 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1851 {
1852 switch(stringset) {
1853 case ETH_SS_STATS:
1854 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1855 break;
1856 }
1857 }
1858
1859 static const struct ethtool_ops rtl8169_ethtool_ops = {
1860 .get_drvinfo = rtl8169_get_drvinfo,
1861 .get_regs_len = rtl8169_get_regs_len,
1862 .get_link = ethtool_op_get_link,
1863 .get_settings = rtl8169_get_settings,
1864 .set_settings = rtl8169_set_settings,
1865 .get_msglevel = rtl8169_get_msglevel,
1866 .set_msglevel = rtl8169_set_msglevel,
1867 .get_regs = rtl8169_get_regs,
1868 .get_wol = rtl8169_get_wol,
1869 .set_wol = rtl8169_set_wol,
1870 .get_strings = rtl8169_get_strings,
1871 .get_sset_count = rtl8169_get_sset_count,
1872 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1873 .get_ts_info = ethtool_op_get_ts_info,
1874 };
1875
1876 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1877 struct net_device *dev, u8 default_version)
1878 {
1879 void __iomem *ioaddr = tp->mmio_addr;
1880 /*
1881 * The driver currently handles the 8168Bf and the 8168Be identically
1882 * but they can be identified more specifically through the test below
1883 * if needed:
1884 *
1885 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1886 *
1887 * Same thing for the 8101Eb and the 8101Ec:
1888 *
1889 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1890 */
1891 static const struct rtl_mac_info {
1892 u32 mask;
1893 u32 val;
1894 int mac_version;
1895 } mac_info[] = {
1896 /* 8168F family. */
1897 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1898 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1899
1900 /* 8168E family. */
1901 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
1902 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1903 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1904 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1905
1906 /* 8168D family. */
1907 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1908 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
1909 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
1910
1911 /* 8168DP family. */
1912 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1913 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
1914 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
1915
1916 /* 8168C family. */
1917 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
1918 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
1919 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
1920 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
1921 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1922 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
1923 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
1924 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
1925 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
1926
1927 /* 8168B family. */
1928 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1929 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1930 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1931 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1932
1933 /* 8101 family. */
1934 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1935 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1936 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1937 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
1938 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1939 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1940 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1941 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1942 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1943 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
1944 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1945 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
1946 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1947 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1948 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
1949 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1950 /* FIXME: where did these entries come from ? -- FR */
1951 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1952 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1953
1954 /* 8110 family. */
1955 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1956 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1957 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1958 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1959 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1960 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1961
1962 /* Catch-all */
1963 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1964 };
1965 const struct rtl_mac_info *p = mac_info;
1966 u32 reg;
1967
1968 reg = RTL_R32(TxConfig);
1969 while ((reg & p->mask) != p->val)
1970 p++;
1971 tp->mac_version = p->mac_version;
1972
1973 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
1974 netif_notice(tp, probe, dev,
1975 "unknown MAC, using family default\n");
1976 tp->mac_version = default_version;
1977 }
1978 }
1979
1980 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1981 {
1982 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1983 }
1984
1985 struct phy_reg {
1986 u16 reg;
1987 u16 val;
1988 };
1989
1990 static void rtl_writephy_batch(struct rtl8169_private *tp,
1991 const struct phy_reg *regs, int len)
1992 {
1993 while (len-- > 0) {
1994 rtl_writephy(tp, regs->reg, regs->val);
1995 regs++;
1996 }
1997 }
1998
1999 #define PHY_READ 0x00000000
2000 #define PHY_DATA_OR 0x10000000
2001 #define PHY_DATA_AND 0x20000000
2002 #define PHY_BJMPN 0x30000000
2003 #define PHY_READ_EFUSE 0x40000000
2004 #define PHY_READ_MAC_BYTE 0x50000000
2005 #define PHY_WRITE_MAC_BYTE 0x60000000
2006 #define PHY_CLEAR_READCOUNT 0x70000000
2007 #define PHY_WRITE 0x80000000
2008 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2009 #define PHY_COMP_EQ_SKIPN 0xa0000000
2010 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2011 #define PHY_WRITE_PREVIOUS 0xc0000000
2012 #define PHY_SKIPN 0xd0000000
2013 #define PHY_DELAY_MS 0xe0000000
2014 #define PHY_WRITE_ERI_WORD 0xf0000000
2015
2016 struct fw_info {
2017 u32 magic;
2018 char version[RTL_VER_SIZE];
2019 __le32 fw_start;
2020 __le32 fw_len;
2021 u8 chksum;
2022 } __packed;
2023
2024 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2025
2026 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2027 {
2028 const struct firmware *fw = rtl_fw->fw;
2029 struct fw_info *fw_info = (struct fw_info *)fw->data;
2030 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2031 char *version = rtl_fw->version;
2032 bool rc = false;
2033
2034 if (fw->size < FW_OPCODE_SIZE)
2035 goto out;
2036
2037 if (!fw_info->magic) {
2038 size_t i, size, start;
2039 u8 checksum = 0;
2040
2041 if (fw->size < sizeof(*fw_info))
2042 goto out;
2043
2044 for (i = 0; i < fw->size; i++)
2045 checksum += fw->data[i];
2046 if (checksum != 0)
2047 goto out;
2048
2049 start = le32_to_cpu(fw_info->fw_start);
2050 if (start > fw->size)
2051 goto out;
2052
2053 size = le32_to_cpu(fw_info->fw_len);
2054 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2055 goto out;
2056
2057 memcpy(version, fw_info->version, RTL_VER_SIZE);
2058
2059 pa->code = (__le32 *)(fw->data + start);
2060 pa->size = size;
2061 } else {
2062 if (fw->size % FW_OPCODE_SIZE)
2063 goto out;
2064
2065 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2066
2067 pa->code = (__le32 *)fw->data;
2068 pa->size = fw->size / FW_OPCODE_SIZE;
2069 }
2070 version[RTL_VER_SIZE - 1] = 0;
2071
2072 rc = true;
2073 out:
2074 return rc;
2075 }
2076
2077 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2078 struct rtl_fw_phy_action *pa)
2079 {
2080 bool rc = false;
2081 size_t index;
2082
2083 for (index = 0; index < pa->size; index++) {
2084 u32 action = le32_to_cpu(pa->code[index]);
2085 u32 regno = (action & 0x0fff0000) >> 16;
2086
2087 switch(action & 0xf0000000) {
2088 case PHY_READ:
2089 case PHY_DATA_OR:
2090 case PHY_DATA_AND:
2091 case PHY_READ_EFUSE:
2092 case PHY_CLEAR_READCOUNT:
2093 case PHY_WRITE:
2094 case PHY_WRITE_PREVIOUS:
2095 case PHY_DELAY_MS:
2096 break;
2097
2098 case PHY_BJMPN:
2099 if (regno > index) {
2100 netif_err(tp, ifup, tp->dev,
2101 "Out of range of firmware\n");
2102 goto out;
2103 }
2104 break;
2105 case PHY_READCOUNT_EQ_SKIP:
2106 if (index + 2 >= pa->size) {
2107 netif_err(tp, ifup, tp->dev,
2108 "Out of range of firmware\n");
2109 goto out;
2110 }
2111 break;
2112 case PHY_COMP_EQ_SKIPN:
2113 case PHY_COMP_NEQ_SKIPN:
2114 case PHY_SKIPN:
2115 if (index + 1 + regno >= pa->size) {
2116 netif_err(tp, ifup, tp->dev,
2117 "Out of range of firmware\n");
2118 goto out;
2119 }
2120 break;
2121
2122 case PHY_READ_MAC_BYTE:
2123 case PHY_WRITE_MAC_BYTE:
2124 case PHY_WRITE_ERI_WORD:
2125 default:
2126 netif_err(tp, ifup, tp->dev,
2127 "Invalid action 0x%08x\n", action);
2128 goto out;
2129 }
2130 }
2131 rc = true;
2132 out:
2133 return rc;
2134 }
2135
2136 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2137 {
2138 struct net_device *dev = tp->dev;
2139 int rc = -EINVAL;
2140
2141 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2142 netif_err(tp, ifup, dev, "invalid firwmare\n");
2143 goto out;
2144 }
2145
2146 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2147 rc = 0;
2148 out:
2149 return rc;
2150 }
2151
2152 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2153 {
2154 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2155 u32 predata, count;
2156 size_t index;
2157
2158 predata = count = 0;
2159
2160 for (index = 0; index < pa->size; ) {
2161 u32 action = le32_to_cpu(pa->code[index]);
2162 u32 data = action & 0x0000ffff;
2163 u32 regno = (action & 0x0fff0000) >> 16;
2164
2165 if (!action)
2166 break;
2167
2168 switch(action & 0xf0000000) {
2169 case PHY_READ:
2170 predata = rtl_readphy(tp, regno);
2171 count++;
2172 index++;
2173 break;
2174 case PHY_DATA_OR:
2175 predata |= data;
2176 index++;
2177 break;
2178 case PHY_DATA_AND:
2179 predata &= data;
2180 index++;
2181 break;
2182 case PHY_BJMPN:
2183 index -= regno;
2184 break;
2185 case PHY_READ_EFUSE:
2186 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
2187 index++;
2188 break;
2189 case PHY_CLEAR_READCOUNT:
2190 count = 0;
2191 index++;
2192 break;
2193 case PHY_WRITE:
2194 rtl_writephy(tp, regno, data);
2195 index++;
2196 break;
2197 case PHY_READCOUNT_EQ_SKIP:
2198 index += (count == data) ? 2 : 1;
2199 break;
2200 case PHY_COMP_EQ_SKIPN:
2201 if (predata == data)
2202 index += regno;
2203 index++;
2204 break;
2205 case PHY_COMP_NEQ_SKIPN:
2206 if (predata != data)
2207 index += regno;
2208 index++;
2209 break;
2210 case PHY_WRITE_PREVIOUS:
2211 rtl_writephy(tp, regno, predata);
2212 index++;
2213 break;
2214 case PHY_SKIPN:
2215 index += regno + 1;
2216 break;
2217 case PHY_DELAY_MS:
2218 mdelay(data);
2219 index++;
2220 break;
2221
2222 case PHY_READ_MAC_BYTE:
2223 case PHY_WRITE_MAC_BYTE:
2224 case PHY_WRITE_ERI_WORD:
2225 default:
2226 BUG();
2227 }
2228 }
2229 }
2230
2231 static void rtl_release_firmware(struct rtl8169_private *tp)
2232 {
2233 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2234 release_firmware(tp->rtl_fw->fw);
2235 kfree(tp->rtl_fw);
2236 }
2237 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2238 }
2239
2240 static void rtl_apply_firmware(struct rtl8169_private *tp)
2241 {
2242 struct rtl_fw *rtl_fw = tp->rtl_fw;
2243
2244 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2245 if (!IS_ERR_OR_NULL(rtl_fw))
2246 rtl_phy_write_fw(tp, rtl_fw);
2247 }
2248
2249 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2250 {
2251 if (rtl_readphy(tp, reg) != val)
2252 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2253 else
2254 rtl_apply_firmware(tp);
2255 }
2256
2257 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2258 {
2259 static const struct phy_reg phy_reg_init[] = {
2260 { 0x1f, 0x0001 },
2261 { 0x06, 0x006e },
2262 { 0x08, 0x0708 },
2263 { 0x15, 0x4000 },
2264 { 0x18, 0x65c7 },
2265
2266 { 0x1f, 0x0001 },
2267 { 0x03, 0x00a1 },
2268 { 0x02, 0x0008 },
2269 { 0x01, 0x0120 },
2270 { 0x00, 0x1000 },
2271 { 0x04, 0x0800 },
2272 { 0x04, 0x0000 },
2273
2274 { 0x03, 0xff41 },
2275 { 0x02, 0xdf60 },
2276 { 0x01, 0x0140 },
2277 { 0x00, 0x0077 },
2278 { 0x04, 0x7800 },
2279 { 0x04, 0x7000 },
2280
2281 { 0x03, 0x802f },
2282 { 0x02, 0x4f02 },
2283 { 0x01, 0x0409 },
2284 { 0x00, 0xf0f9 },
2285 { 0x04, 0x9800 },
2286 { 0x04, 0x9000 },
2287
2288 { 0x03, 0xdf01 },
2289 { 0x02, 0xdf20 },
2290 { 0x01, 0xff95 },
2291 { 0x00, 0xba00 },
2292 { 0x04, 0xa800 },
2293 { 0x04, 0xa000 },
2294
2295 { 0x03, 0xff41 },
2296 { 0x02, 0xdf20 },
2297 { 0x01, 0x0140 },
2298 { 0x00, 0x00bb },
2299 { 0x04, 0xb800 },
2300 { 0x04, 0xb000 },
2301
2302 { 0x03, 0xdf41 },
2303 { 0x02, 0xdc60 },
2304 { 0x01, 0x6340 },
2305 { 0x00, 0x007d },
2306 { 0x04, 0xd800 },
2307 { 0x04, 0xd000 },
2308
2309 { 0x03, 0xdf01 },
2310 { 0x02, 0xdf20 },
2311 { 0x01, 0x100a },
2312 { 0x00, 0xa0ff },
2313 { 0x04, 0xf800 },
2314 { 0x04, 0xf000 },
2315
2316 { 0x1f, 0x0000 },
2317 { 0x0b, 0x0000 },
2318 { 0x00, 0x9200 }
2319 };
2320
2321 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2322 }
2323
2324 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2325 {
2326 static const struct phy_reg phy_reg_init[] = {
2327 { 0x1f, 0x0002 },
2328 { 0x01, 0x90d0 },
2329 { 0x1f, 0x0000 }
2330 };
2331
2332 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2333 }
2334
2335 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2336 {
2337 struct pci_dev *pdev = tp->pci_dev;
2338
2339 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2340 (pdev->subsystem_device != 0xe000))
2341 return;
2342
2343 rtl_writephy(tp, 0x1f, 0x0001);
2344 rtl_writephy(tp, 0x10, 0xf01b);
2345 rtl_writephy(tp, 0x1f, 0x0000);
2346 }
2347
2348 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2349 {
2350 static const struct phy_reg phy_reg_init[] = {
2351 { 0x1f, 0x0001 },
2352 { 0x04, 0x0000 },
2353 { 0x03, 0x00a1 },
2354 { 0x02, 0x0008 },
2355 { 0x01, 0x0120 },
2356 { 0x00, 0x1000 },
2357 { 0x04, 0x0800 },
2358 { 0x04, 0x9000 },
2359 { 0x03, 0x802f },
2360 { 0x02, 0x4f02 },
2361 { 0x01, 0x0409 },
2362 { 0x00, 0xf099 },
2363 { 0x04, 0x9800 },
2364 { 0x04, 0xa000 },
2365 { 0x03, 0xdf01 },
2366 { 0x02, 0xdf20 },
2367 { 0x01, 0xff95 },
2368 { 0x00, 0xba00 },
2369 { 0x04, 0xa800 },
2370 { 0x04, 0xf000 },
2371 { 0x03, 0xdf01 },
2372 { 0x02, 0xdf20 },
2373 { 0x01, 0x101a },
2374 { 0x00, 0xa0ff },
2375 { 0x04, 0xf800 },
2376 { 0x04, 0x0000 },
2377 { 0x1f, 0x0000 },
2378
2379 { 0x1f, 0x0001 },
2380 { 0x10, 0xf41b },
2381 { 0x14, 0xfb54 },
2382 { 0x18, 0xf5c7 },
2383 { 0x1f, 0x0000 },
2384
2385 { 0x1f, 0x0001 },
2386 { 0x17, 0x0cc0 },
2387 { 0x1f, 0x0000 }
2388 };
2389
2390 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2391
2392 rtl8169scd_hw_phy_config_quirk(tp);
2393 }
2394
2395 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2396 {
2397 static const struct phy_reg phy_reg_init[] = {
2398 { 0x1f, 0x0001 },
2399 { 0x04, 0x0000 },
2400 { 0x03, 0x00a1 },
2401 { 0x02, 0x0008 },
2402 { 0x01, 0x0120 },
2403 { 0x00, 0x1000 },
2404 { 0x04, 0x0800 },
2405 { 0x04, 0x9000 },
2406 { 0x03, 0x802f },
2407 { 0x02, 0x4f02 },
2408 { 0x01, 0x0409 },
2409 { 0x00, 0xf099 },
2410 { 0x04, 0x9800 },
2411 { 0x04, 0xa000 },
2412 { 0x03, 0xdf01 },
2413 { 0x02, 0xdf20 },
2414 { 0x01, 0xff95 },
2415 { 0x00, 0xba00 },
2416 { 0x04, 0xa800 },
2417 { 0x04, 0xf000 },
2418 { 0x03, 0xdf01 },
2419 { 0x02, 0xdf20 },
2420 { 0x01, 0x101a },
2421 { 0x00, 0xa0ff },
2422 { 0x04, 0xf800 },
2423 { 0x04, 0x0000 },
2424 { 0x1f, 0x0000 },
2425
2426 { 0x1f, 0x0001 },
2427 { 0x0b, 0x8480 },
2428 { 0x1f, 0x0000 },
2429
2430 { 0x1f, 0x0001 },
2431 { 0x18, 0x67c7 },
2432 { 0x04, 0x2000 },
2433 { 0x03, 0x002f },
2434 { 0x02, 0x4360 },
2435 { 0x01, 0x0109 },
2436 { 0x00, 0x3022 },
2437 { 0x04, 0x2800 },
2438 { 0x1f, 0x0000 },
2439
2440 { 0x1f, 0x0001 },
2441 { 0x17, 0x0cc0 },
2442 { 0x1f, 0x0000 }
2443 };
2444
2445 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2446 }
2447
2448 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2449 {
2450 static const struct phy_reg phy_reg_init[] = {
2451 { 0x10, 0xf41b },
2452 { 0x1f, 0x0000 }
2453 };
2454
2455 rtl_writephy(tp, 0x1f, 0x0001);
2456 rtl_patchphy(tp, 0x16, 1 << 0);
2457
2458 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2459 }
2460
2461 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2462 {
2463 static const struct phy_reg phy_reg_init[] = {
2464 { 0x1f, 0x0001 },
2465 { 0x10, 0xf41b },
2466 { 0x1f, 0x0000 }
2467 };
2468
2469 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2470 }
2471
2472 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2473 {
2474 static const struct phy_reg phy_reg_init[] = {
2475 { 0x1f, 0x0000 },
2476 { 0x1d, 0x0f00 },
2477 { 0x1f, 0x0002 },
2478 { 0x0c, 0x1ec8 },
2479 { 0x1f, 0x0000 }
2480 };
2481
2482 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2483 }
2484
2485 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2486 {
2487 static const struct phy_reg phy_reg_init[] = {
2488 { 0x1f, 0x0001 },
2489 { 0x1d, 0x3d98 },
2490 { 0x1f, 0x0000 }
2491 };
2492
2493 rtl_writephy(tp, 0x1f, 0x0000);
2494 rtl_patchphy(tp, 0x14, 1 << 5);
2495 rtl_patchphy(tp, 0x0d, 1 << 5);
2496
2497 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2498 }
2499
2500 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2501 {
2502 static const struct phy_reg phy_reg_init[] = {
2503 { 0x1f, 0x0001 },
2504 { 0x12, 0x2300 },
2505 { 0x1f, 0x0002 },
2506 { 0x00, 0x88d4 },
2507 { 0x01, 0x82b1 },
2508 { 0x03, 0x7002 },
2509 { 0x08, 0x9e30 },
2510 { 0x09, 0x01f0 },
2511 { 0x0a, 0x5500 },
2512 { 0x0c, 0x00c8 },
2513 { 0x1f, 0x0003 },
2514 { 0x12, 0xc096 },
2515 { 0x16, 0x000a },
2516 { 0x1f, 0x0000 },
2517 { 0x1f, 0x0000 },
2518 { 0x09, 0x2000 },
2519 { 0x09, 0x0000 }
2520 };
2521
2522 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2523
2524 rtl_patchphy(tp, 0x14, 1 << 5);
2525 rtl_patchphy(tp, 0x0d, 1 << 5);
2526 rtl_writephy(tp, 0x1f, 0x0000);
2527 }
2528
2529 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2530 {
2531 static const struct phy_reg phy_reg_init[] = {
2532 { 0x1f, 0x0001 },
2533 { 0x12, 0x2300 },
2534 { 0x03, 0x802f },
2535 { 0x02, 0x4f02 },
2536 { 0x01, 0x0409 },
2537 { 0x00, 0xf099 },
2538 { 0x04, 0x9800 },
2539 { 0x04, 0x9000 },
2540 { 0x1d, 0x3d98 },
2541 { 0x1f, 0x0002 },
2542 { 0x0c, 0x7eb8 },
2543 { 0x06, 0x0761 },
2544 { 0x1f, 0x0003 },
2545 { 0x16, 0x0f0a },
2546 { 0x1f, 0x0000 }
2547 };
2548
2549 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2550
2551 rtl_patchphy(tp, 0x16, 1 << 0);
2552 rtl_patchphy(tp, 0x14, 1 << 5);
2553 rtl_patchphy(tp, 0x0d, 1 << 5);
2554 rtl_writephy(tp, 0x1f, 0x0000);
2555 }
2556
2557 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2558 {
2559 static const struct phy_reg phy_reg_init[] = {
2560 { 0x1f, 0x0001 },
2561 { 0x12, 0x2300 },
2562 { 0x1d, 0x3d98 },
2563 { 0x1f, 0x0002 },
2564 { 0x0c, 0x7eb8 },
2565 { 0x06, 0x5461 },
2566 { 0x1f, 0x0003 },
2567 { 0x16, 0x0f0a },
2568 { 0x1f, 0x0000 }
2569 };
2570
2571 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2572
2573 rtl_patchphy(tp, 0x16, 1 << 0);
2574 rtl_patchphy(tp, 0x14, 1 << 5);
2575 rtl_patchphy(tp, 0x0d, 1 << 5);
2576 rtl_writephy(tp, 0x1f, 0x0000);
2577 }
2578
2579 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2580 {
2581 rtl8168c_3_hw_phy_config(tp);
2582 }
2583
2584 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2585 {
2586 static const struct phy_reg phy_reg_init_0[] = {
2587 /* Channel Estimation */
2588 { 0x1f, 0x0001 },
2589 { 0x06, 0x4064 },
2590 { 0x07, 0x2863 },
2591 { 0x08, 0x059c },
2592 { 0x09, 0x26b4 },
2593 { 0x0a, 0x6a19 },
2594 { 0x0b, 0xdcc8 },
2595 { 0x10, 0xf06d },
2596 { 0x14, 0x7f68 },
2597 { 0x18, 0x7fd9 },
2598 { 0x1c, 0xf0ff },
2599 { 0x1d, 0x3d9c },
2600 { 0x1f, 0x0003 },
2601 { 0x12, 0xf49f },
2602 { 0x13, 0x070b },
2603 { 0x1a, 0x05ad },
2604 { 0x14, 0x94c0 },
2605
2606 /*
2607 * Tx Error Issue
2608 * Enhance line driver power
2609 */
2610 { 0x1f, 0x0002 },
2611 { 0x06, 0x5561 },
2612 { 0x1f, 0x0005 },
2613 { 0x05, 0x8332 },
2614 { 0x06, 0x5561 },
2615
2616 /*
2617 * Can not link to 1Gbps with bad cable
2618 * Decrease SNR threshold form 21.07dB to 19.04dB
2619 */
2620 { 0x1f, 0x0001 },
2621 { 0x17, 0x0cc0 },
2622
2623 { 0x1f, 0x0000 },
2624 { 0x0d, 0xf880 }
2625 };
2626 void __iomem *ioaddr = tp->mmio_addr;
2627
2628 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2629
2630 /*
2631 * Rx Error Issue
2632 * Fine Tune Switching regulator parameter
2633 */
2634 rtl_writephy(tp, 0x1f, 0x0002);
2635 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2636 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2637
2638 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2639 static const struct phy_reg phy_reg_init[] = {
2640 { 0x1f, 0x0002 },
2641 { 0x05, 0x669a },
2642 { 0x1f, 0x0005 },
2643 { 0x05, 0x8330 },
2644 { 0x06, 0x669a },
2645 { 0x1f, 0x0002 }
2646 };
2647 int val;
2648
2649 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2650
2651 val = rtl_readphy(tp, 0x0d);
2652
2653 if ((val & 0x00ff) != 0x006c) {
2654 static const u32 set[] = {
2655 0x0065, 0x0066, 0x0067, 0x0068,
2656 0x0069, 0x006a, 0x006b, 0x006c
2657 };
2658 int i;
2659
2660 rtl_writephy(tp, 0x1f, 0x0002);
2661
2662 val &= 0xff00;
2663 for (i = 0; i < ARRAY_SIZE(set); i++)
2664 rtl_writephy(tp, 0x0d, val | set[i]);
2665 }
2666 } else {
2667 static const struct phy_reg phy_reg_init[] = {
2668 { 0x1f, 0x0002 },
2669 { 0x05, 0x6662 },
2670 { 0x1f, 0x0005 },
2671 { 0x05, 0x8330 },
2672 { 0x06, 0x6662 }
2673 };
2674
2675 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2676 }
2677
2678 /* RSET couple improve */
2679 rtl_writephy(tp, 0x1f, 0x0002);
2680 rtl_patchphy(tp, 0x0d, 0x0300);
2681 rtl_patchphy(tp, 0x0f, 0x0010);
2682
2683 /* Fine tune PLL performance */
2684 rtl_writephy(tp, 0x1f, 0x0002);
2685 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2686 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2687
2688 rtl_writephy(tp, 0x1f, 0x0005);
2689 rtl_writephy(tp, 0x05, 0x001b);
2690
2691 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2692
2693 rtl_writephy(tp, 0x1f, 0x0000);
2694 }
2695
2696 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2697 {
2698 static const struct phy_reg phy_reg_init_0[] = {
2699 /* Channel Estimation */
2700 { 0x1f, 0x0001 },
2701 { 0x06, 0x4064 },
2702 { 0x07, 0x2863 },
2703 { 0x08, 0x059c },
2704 { 0x09, 0x26b4 },
2705 { 0x0a, 0x6a19 },
2706 { 0x0b, 0xdcc8 },
2707 { 0x10, 0xf06d },
2708 { 0x14, 0x7f68 },
2709 { 0x18, 0x7fd9 },
2710 { 0x1c, 0xf0ff },
2711 { 0x1d, 0x3d9c },
2712 { 0x1f, 0x0003 },
2713 { 0x12, 0xf49f },
2714 { 0x13, 0x070b },
2715 { 0x1a, 0x05ad },
2716 { 0x14, 0x94c0 },
2717
2718 /*
2719 * Tx Error Issue
2720 * Enhance line driver power
2721 */
2722 { 0x1f, 0x0002 },
2723 { 0x06, 0x5561 },
2724 { 0x1f, 0x0005 },
2725 { 0x05, 0x8332 },
2726 { 0x06, 0x5561 },
2727
2728 /*
2729 * Can not link to 1Gbps with bad cable
2730 * Decrease SNR threshold form 21.07dB to 19.04dB
2731 */
2732 { 0x1f, 0x0001 },
2733 { 0x17, 0x0cc0 },
2734
2735 { 0x1f, 0x0000 },
2736 { 0x0d, 0xf880 }
2737 };
2738 void __iomem *ioaddr = tp->mmio_addr;
2739
2740 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2741
2742 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2743 static const struct phy_reg phy_reg_init[] = {
2744 { 0x1f, 0x0002 },
2745 { 0x05, 0x669a },
2746 { 0x1f, 0x0005 },
2747 { 0x05, 0x8330 },
2748 { 0x06, 0x669a },
2749
2750 { 0x1f, 0x0002 }
2751 };
2752 int val;
2753
2754 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2755
2756 val = rtl_readphy(tp, 0x0d);
2757 if ((val & 0x00ff) != 0x006c) {
2758 static const u32 set[] = {
2759 0x0065, 0x0066, 0x0067, 0x0068,
2760 0x0069, 0x006a, 0x006b, 0x006c
2761 };
2762 int i;
2763
2764 rtl_writephy(tp, 0x1f, 0x0002);
2765
2766 val &= 0xff00;
2767 for (i = 0; i < ARRAY_SIZE(set); i++)
2768 rtl_writephy(tp, 0x0d, val | set[i]);
2769 }
2770 } else {
2771 static const struct phy_reg phy_reg_init[] = {
2772 { 0x1f, 0x0002 },
2773 { 0x05, 0x2642 },
2774 { 0x1f, 0x0005 },
2775 { 0x05, 0x8330 },
2776 { 0x06, 0x2642 }
2777 };
2778
2779 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2780 }
2781
2782 /* Fine tune PLL performance */
2783 rtl_writephy(tp, 0x1f, 0x0002);
2784 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2785 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2786
2787 /* Switching regulator Slew rate */
2788 rtl_writephy(tp, 0x1f, 0x0002);
2789 rtl_patchphy(tp, 0x0f, 0x0017);
2790
2791 rtl_writephy(tp, 0x1f, 0x0005);
2792 rtl_writephy(tp, 0x05, 0x001b);
2793
2794 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2795
2796 rtl_writephy(tp, 0x1f, 0x0000);
2797 }
2798
2799 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2800 {
2801 static const struct phy_reg phy_reg_init[] = {
2802 { 0x1f, 0x0002 },
2803 { 0x10, 0x0008 },
2804 { 0x0d, 0x006c },
2805
2806 { 0x1f, 0x0000 },
2807 { 0x0d, 0xf880 },
2808
2809 { 0x1f, 0x0001 },
2810 { 0x17, 0x0cc0 },
2811
2812 { 0x1f, 0x0001 },
2813 { 0x0b, 0xa4d8 },
2814 { 0x09, 0x281c },
2815 { 0x07, 0x2883 },
2816 { 0x0a, 0x6b35 },
2817 { 0x1d, 0x3da4 },
2818 { 0x1c, 0xeffd },
2819 { 0x14, 0x7f52 },
2820 { 0x18, 0x7fc6 },
2821 { 0x08, 0x0601 },
2822 { 0x06, 0x4063 },
2823 { 0x10, 0xf074 },
2824 { 0x1f, 0x0003 },
2825 { 0x13, 0x0789 },
2826 { 0x12, 0xf4bd },
2827 { 0x1a, 0x04fd },
2828 { 0x14, 0x84b0 },
2829 { 0x1f, 0x0000 },
2830 { 0x00, 0x9200 },
2831
2832 { 0x1f, 0x0005 },
2833 { 0x01, 0x0340 },
2834 { 0x1f, 0x0001 },
2835 { 0x04, 0x4000 },
2836 { 0x03, 0x1d21 },
2837 { 0x02, 0x0c32 },
2838 { 0x01, 0x0200 },
2839 { 0x00, 0x5554 },
2840 { 0x04, 0x4800 },
2841 { 0x04, 0x4000 },
2842 { 0x04, 0xf000 },
2843 { 0x03, 0xdf01 },
2844 { 0x02, 0xdf20 },
2845 { 0x01, 0x101a },
2846 { 0x00, 0xa0ff },
2847 { 0x04, 0xf800 },
2848 { 0x04, 0xf000 },
2849 { 0x1f, 0x0000 },
2850
2851 { 0x1f, 0x0007 },
2852 { 0x1e, 0x0023 },
2853 { 0x16, 0x0000 },
2854 { 0x1f, 0x0000 }
2855 };
2856
2857 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2858 }
2859
2860 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2861 {
2862 static const struct phy_reg phy_reg_init[] = {
2863 { 0x1f, 0x0001 },
2864 { 0x17, 0x0cc0 },
2865
2866 { 0x1f, 0x0007 },
2867 { 0x1e, 0x002d },
2868 { 0x18, 0x0040 },
2869 { 0x1f, 0x0000 }
2870 };
2871
2872 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2873 rtl_patchphy(tp, 0x0d, 1 << 5);
2874 }
2875
2876 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
2877 {
2878 static const struct phy_reg phy_reg_init[] = {
2879 /* Enable Delay cap */
2880 { 0x1f, 0x0005 },
2881 { 0x05, 0x8b80 },
2882 { 0x06, 0xc896 },
2883 { 0x1f, 0x0000 },
2884
2885 /* Channel estimation fine tune */
2886 { 0x1f, 0x0001 },
2887 { 0x0b, 0x6c20 },
2888 { 0x07, 0x2872 },
2889 { 0x1c, 0xefff },
2890 { 0x1f, 0x0003 },
2891 { 0x14, 0x6420 },
2892 { 0x1f, 0x0000 },
2893
2894 /* Update PFM & 10M TX idle timer */
2895 { 0x1f, 0x0007 },
2896 { 0x1e, 0x002f },
2897 { 0x15, 0x1919 },
2898 { 0x1f, 0x0000 },
2899
2900 { 0x1f, 0x0007 },
2901 { 0x1e, 0x00ac },
2902 { 0x18, 0x0006 },
2903 { 0x1f, 0x0000 }
2904 };
2905
2906 rtl_apply_firmware(tp);
2907
2908 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2909
2910 /* DCO enable for 10M IDLE Power */
2911 rtl_writephy(tp, 0x1f, 0x0007);
2912 rtl_writephy(tp, 0x1e, 0x0023);
2913 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2914 rtl_writephy(tp, 0x1f, 0x0000);
2915
2916 /* For impedance matching */
2917 rtl_writephy(tp, 0x1f, 0x0002);
2918 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
2919 rtl_writephy(tp, 0x1f, 0x0000);
2920
2921 /* PHY auto speed down */
2922 rtl_writephy(tp, 0x1f, 0x0007);
2923 rtl_writephy(tp, 0x1e, 0x002d);
2924 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2925 rtl_writephy(tp, 0x1f, 0x0000);
2926 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2927
2928 rtl_writephy(tp, 0x1f, 0x0005);
2929 rtl_writephy(tp, 0x05, 0x8b86);
2930 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2931 rtl_writephy(tp, 0x1f, 0x0000);
2932
2933 rtl_writephy(tp, 0x1f, 0x0005);
2934 rtl_writephy(tp, 0x05, 0x8b85);
2935 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2936 rtl_writephy(tp, 0x1f, 0x0007);
2937 rtl_writephy(tp, 0x1e, 0x0020);
2938 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2939 rtl_writephy(tp, 0x1f, 0x0006);
2940 rtl_writephy(tp, 0x00, 0x5a00);
2941 rtl_writephy(tp, 0x1f, 0x0000);
2942 rtl_writephy(tp, 0x0d, 0x0007);
2943 rtl_writephy(tp, 0x0e, 0x003c);
2944 rtl_writephy(tp, 0x0d, 0x4007);
2945 rtl_writephy(tp, 0x0e, 0x0000);
2946 rtl_writephy(tp, 0x0d, 0x0000);
2947 }
2948
2949 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2950 {
2951 static const struct phy_reg phy_reg_init[] = {
2952 /* Enable Delay cap */
2953 { 0x1f, 0x0004 },
2954 { 0x1f, 0x0007 },
2955 { 0x1e, 0x00ac },
2956 { 0x18, 0x0006 },
2957 { 0x1f, 0x0002 },
2958 { 0x1f, 0x0000 },
2959 { 0x1f, 0x0000 },
2960
2961 /* Channel estimation fine tune */
2962 { 0x1f, 0x0003 },
2963 { 0x09, 0xa20f },
2964 { 0x1f, 0x0000 },
2965 { 0x1f, 0x0000 },
2966
2967 /* Green Setting */
2968 { 0x1f, 0x0005 },
2969 { 0x05, 0x8b5b },
2970 { 0x06, 0x9222 },
2971 { 0x05, 0x8b6d },
2972 { 0x06, 0x8000 },
2973 { 0x05, 0x8b76 },
2974 { 0x06, 0x8000 },
2975 { 0x1f, 0x0000 }
2976 };
2977
2978 rtl_apply_firmware(tp);
2979
2980 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2981
2982 /* For 4-corner performance improve */
2983 rtl_writephy(tp, 0x1f, 0x0005);
2984 rtl_writephy(tp, 0x05, 0x8b80);
2985 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2986 rtl_writephy(tp, 0x1f, 0x0000);
2987
2988 /* PHY auto speed down */
2989 rtl_writephy(tp, 0x1f, 0x0004);
2990 rtl_writephy(tp, 0x1f, 0x0007);
2991 rtl_writephy(tp, 0x1e, 0x002d);
2992 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
2993 rtl_writephy(tp, 0x1f, 0x0002);
2994 rtl_writephy(tp, 0x1f, 0x0000);
2995 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2996
2997 /* improve 10M EEE waveform */
2998 rtl_writephy(tp, 0x1f, 0x0005);
2999 rtl_writephy(tp, 0x05, 0x8b86);
3000 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3001 rtl_writephy(tp, 0x1f, 0x0000);
3002
3003 /* Improve 2-pair detection performance */
3004 rtl_writephy(tp, 0x1f, 0x0005);
3005 rtl_writephy(tp, 0x05, 0x8b85);
3006 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3007 rtl_writephy(tp, 0x1f, 0x0000);
3008
3009 /* EEE setting */
3010 rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
3011 ERIAR_EXGMAC);
3012 rtl_writephy(tp, 0x1f, 0x0005);
3013 rtl_writephy(tp, 0x05, 0x8b85);
3014 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3015 rtl_writephy(tp, 0x1f, 0x0004);
3016 rtl_writephy(tp, 0x1f, 0x0007);
3017 rtl_writephy(tp, 0x1e, 0x0020);
3018 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3019 rtl_writephy(tp, 0x1f, 0x0002);
3020 rtl_writephy(tp, 0x1f, 0x0000);
3021 rtl_writephy(tp, 0x0d, 0x0007);
3022 rtl_writephy(tp, 0x0e, 0x003c);
3023 rtl_writephy(tp, 0x0d, 0x4007);
3024 rtl_writephy(tp, 0x0e, 0x0000);
3025 rtl_writephy(tp, 0x0d, 0x0000);
3026
3027 /* Green feature */
3028 rtl_writephy(tp, 0x1f, 0x0003);
3029 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3030 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3031 rtl_writephy(tp, 0x1f, 0x0000);
3032 }
3033
3034 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3035 {
3036 static const struct phy_reg phy_reg_init[] = {
3037 /* Channel estimation fine tune */
3038 { 0x1f, 0x0003 },
3039 { 0x09, 0xa20f },
3040 { 0x1f, 0x0000 },
3041
3042 /* Modify green table for giga & fnet */
3043 { 0x1f, 0x0005 },
3044 { 0x05, 0x8b55 },
3045 { 0x06, 0x0000 },
3046 { 0x05, 0x8b5e },
3047 { 0x06, 0x0000 },
3048 { 0x05, 0x8b67 },
3049 { 0x06, 0x0000 },
3050 { 0x05, 0x8b70 },
3051 { 0x06, 0x0000 },
3052 { 0x1f, 0x0000 },
3053 { 0x1f, 0x0007 },
3054 { 0x1e, 0x0078 },
3055 { 0x17, 0x0000 },
3056 { 0x19, 0x00fb },
3057 { 0x1f, 0x0000 },
3058
3059 /* Modify green table for 10M */
3060 { 0x1f, 0x0005 },
3061 { 0x05, 0x8b79 },
3062 { 0x06, 0xaa00 },
3063 { 0x1f, 0x0000 },
3064
3065 /* Disable hiimpedance detection (RTCT) */
3066 { 0x1f, 0x0003 },
3067 { 0x01, 0x328a },
3068 { 0x1f, 0x0000 }
3069 };
3070
3071 rtl_apply_firmware(tp);
3072
3073 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3074
3075 /* For 4-corner performance improve */
3076 rtl_writephy(tp, 0x1f, 0x0005);
3077 rtl_writephy(tp, 0x05, 0x8b80);
3078 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3079 rtl_writephy(tp, 0x1f, 0x0000);
3080
3081 /* PHY auto speed down */
3082 rtl_writephy(tp, 0x1f, 0x0007);
3083 rtl_writephy(tp, 0x1e, 0x002d);
3084 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3085 rtl_writephy(tp, 0x1f, 0x0000);
3086 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3087
3088 /* Improve 10M EEE waveform */
3089 rtl_writephy(tp, 0x1f, 0x0005);
3090 rtl_writephy(tp, 0x05, 0x8b86);
3091 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3092 rtl_writephy(tp, 0x1f, 0x0000);
3093
3094 /* Improve 2-pair detection performance */
3095 rtl_writephy(tp, 0x1f, 0x0005);
3096 rtl_writephy(tp, 0x05, 0x8b85);
3097 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3098 rtl_writephy(tp, 0x1f, 0x0000);
3099 }
3100
3101 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3102 {
3103 rtl_apply_firmware(tp);
3104
3105 /* For 4-corner performance improve */
3106 rtl_writephy(tp, 0x1f, 0x0005);
3107 rtl_writephy(tp, 0x05, 0x8b80);
3108 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3109 rtl_writephy(tp, 0x1f, 0x0000);
3110
3111 /* PHY auto speed down */
3112 rtl_writephy(tp, 0x1f, 0x0007);
3113 rtl_writephy(tp, 0x1e, 0x002d);
3114 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3115 rtl_writephy(tp, 0x1f, 0x0000);
3116 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3117
3118 /* Improve 10M EEE waveform */
3119 rtl_writephy(tp, 0x1f, 0x0005);
3120 rtl_writephy(tp, 0x05, 0x8b86);
3121 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3122 rtl_writephy(tp, 0x1f, 0x0000);
3123 }
3124
3125 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3126 {
3127 static const struct phy_reg phy_reg_init[] = {
3128 { 0x1f, 0x0003 },
3129 { 0x08, 0x441d },
3130 { 0x01, 0x9100 },
3131 { 0x1f, 0x0000 }
3132 };
3133
3134 rtl_writephy(tp, 0x1f, 0x0000);
3135 rtl_patchphy(tp, 0x11, 1 << 12);
3136 rtl_patchphy(tp, 0x19, 1 << 13);
3137 rtl_patchphy(tp, 0x10, 1 << 15);
3138
3139 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3140 }
3141
3142 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3143 {
3144 static const struct phy_reg phy_reg_init[] = {
3145 { 0x1f, 0x0005 },
3146 { 0x1a, 0x0000 },
3147 { 0x1f, 0x0000 },
3148
3149 { 0x1f, 0x0004 },
3150 { 0x1c, 0x0000 },
3151 { 0x1f, 0x0000 },
3152
3153 { 0x1f, 0x0001 },
3154 { 0x15, 0x7701 },
3155 { 0x1f, 0x0000 }
3156 };
3157
3158 /* Disable ALDPS before ram code */
3159 rtl_writephy(tp, 0x1f, 0x0000);
3160 rtl_writephy(tp, 0x18, 0x0310);
3161 msleep(100);
3162
3163 rtl_apply_firmware(tp);
3164
3165 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3166 }
3167
3168 static void rtl_hw_phy_config(struct net_device *dev)
3169 {
3170 struct rtl8169_private *tp = netdev_priv(dev);
3171
3172 rtl8169_print_mac_version(tp);
3173
3174 switch (tp->mac_version) {
3175 case RTL_GIGA_MAC_VER_01:
3176 break;
3177 case RTL_GIGA_MAC_VER_02:
3178 case RTL_GIGA_MAC_VER_03:
3179 rtl8169s_hw_phy_config(tp);
3180 break;
3181 case RTL_GIGA_MAC_VER_04:
3182 rtl8169sb_hw_phy_config(tp);
3183 break;
3184 case RTL_GIGA_MAC_VER_05:
3185 rtl8169scd_hw_phy_config(tp);
3186 break;
3187 case RTL_GIGA_MAC_VER_06:
3188 rtl8169sce_hw_phy_config(tp);
3189 break;
3190 case RTL_GIGA_MAC_VER_07:
3191 case RTL_GIGA_MAC_VER_08:
3192 case RTL_GIGA_MAC_VER_09:
3193 rtl8102e_hw_phy_config(tp);
3194 break;
3195 case RTL_GIGA_MAC_VER_11:
3196 rtl8168bb_hw_phy_config(tp);
3197 break;
3198 case RTL_GIGA_MAC_VER_12:
3199 rtl8168bef_hw_phy_config(tp);
3200 break;
3201 case RTL_GIGA_MAC_VER_17:
3202 rtl8168bef_hw_phy_config(tp);
3203 break;
3204 case RTL_GIGA_MAC_VER_18:
3205 rtl8168cp_1_hw_phy_config(tp);
3206 break;
3207 case RTL_GIGA_MAC_VER_19:
3208 rtl8168c_1_hw_phy_config(tp);
3209 break;
3210 case RTL_GIGA_MAC_VER_20:
3211 rtl8168c_2_hw_phy_config(tp);
3212 break;
3213 case RTL_GIGA_MAC_VER_21:
3214 rtl8168c_3_hw_phy_config(tp);
3215 break;
3216 case RTL_GIGA_MAC_VER_22:
3217 rtl8168c_4_hw_phy_config(tp);
3218 break;
3219 case RTL_GIGA_MAC_VER_23:
3220 case RTL_GIGA_MAC_VER_24:
3221 rtl8168cp_2_hw_phy_config(tp);
3222 break;
3223 case RTL_GIGA_MAC_VER_25:
3224 rtl8168d_1_hw_phy_config(tp);
3225 break;
3226 case RTL_GIGA_MAC_VER_26:
3227 rtl8168d_2_hw_phy_config(tp);
3228 break;
3229 case RTL_GIGA_MAC_VER_27:
3230 rtl8168d_3_hw_phy_config(tp);
3231 break;
3232 case RTL_GIGA_MAC_VER_28:
3233 rtl8168d_4_hw_phy_config(tp);
3234 break;
3235 case RTL_GIGA_MAC_VER_29:
3236 case RTL_GIGA_MAC_VER_30:
3237 rtl8105e_hw_phy_config(tp);
3238 break;
3239 case RTL_GIGA_MAC_VER_31:
3240 /* None. */
3241 break;
3242 case RTL_GIGA_MAC_VER_32:
3243 case RTL_GIGA_MAC_VER_33:
3244 rtl8168e_1_hw_phy_config(tp);
3245 break;
3246 case RTL_GIGA_MAC_VER_34:
3247 rtl8168e_2_hw_phy_config(tp);
3248 break;
3249 case RTL_GIGA_MAC_VER_35:
3250 rtl8168f_1_hw_phy_config(tp);
3251 break;
3252 case RTL_GIGA_MAC_VER_36:
3253 rtl8168f_2_hw_phy_config(tp);
3254 break;
3255
3256 default:
3257 break;
3258 }
3259 }
3260
3261 static void rtl_phy_work(struct rtl8169_private *tp)
3262 {
3263 struct timer_list *timer = &tp->timer;
3264 void __iomem *ioaddr = tp->mmio_addr;
3265 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3266
3267 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3268
3269 if (tp->phy_reset_pending(tp)) {
3270 /*
3271 * A busy loop could burn quite a few cycles on nowadays CPU.
3272 * Let's delay the execution of the timer for a few ticks.
3273 */
3274 timeout = HZ/10;
3275 goto out_mod_timer;
3276 }
3277
3278 if (tp->link_ok(ioaddr))
3279 return;
3280
3281 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3282
3283 tp->phy_reset_enable(tp);
3284
3285 out_mod_timer:
3286 mod_timer(timer, jiffies + timeout);
3287 }
3288
3289 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3290 {
3291 if (!test_and_set_bit(flag, tp->wk.flags))
3292 schedule_work(&tp->wk.work);
3293 }
3294
3295 static void rtl8169_phy_timer(unsigned long __opaque)
3296 {
3297 struct net_device *dev = (struct net_device *)__opaque;
3298 struct rtl8169_private *tp = netdev_priv(dev);
3299
3300 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3301 }
3302
3303 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3304 void __iomem *ioaddr)
3305 {
3306 iounmap(ioaddr);
3307 pci_release_regions(pdev);
3308 pci_clear_mwi(pdev);
3309 pci_disable_device(pdev);
3310 free_netdev(dev);
3311 }
3312
3313 static void rtl8169_phy_reset(struct net_device *dev,
3314 struct rtl8169_private *tp)
3315 {
3316 unsigned int i;
3317
3318 tp->phy_reset_enable(tp);
3319 for (i = 0; i < 100; i++) {
3320 if (!tp->phy_reset_pending(tp))
3321 return;
3322 msleep(1);
3323 }
3324 netif_err(tp, link, dev, "PHY reset failed\n");
3325 }
3326
3327 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3328 {
3329 void __iomem *ioaddr = tp->mmio_addr;
3330
3331 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3332 (RTL_R8(PHYstatus) & TBI_Enable);
3333 }
3334
3335 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3336 {
3337 void __iomem *ioaddr = tp->mmio_addr;
3338
3339 rtl_hw_phy_config(dev);
3340
3341 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3342 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3343 RTL_W8(0x82, 0x01);
3344 }
3345
3346 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3347
3348 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3349 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3350
3351 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3352 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3353 RTL_W8(0x82, 0x01);
3354 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3355 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3356 }
3357
3358 rtl8169_phy_reset(dev, tp);
3359
3360 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3361 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3362 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3363 (tp->mii.supports_gmii ?
3364 ADVERTISED_1000baseT_Half |
3365 ADVERTISED_1000baseT_Full : 0));
3366
3367 if (rtl_tbi_enabled(tp))
3368 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3369 }
3370
3371 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3372 {
3373 void __iomem *ioaddr = tp->mmio_addr;
3374 u32 high;
3375 u32 low;
3376
3377 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3378 high = addr[4] | (addr[5] << 8);
3379
3380 rtl_lock_work(tp);
3381
3382 RTL_W8(Cfg9346, Cfg9346_Unlock);
3383
3384 RTL_W32(MAC4, high);
3385 RTL_R32(MAC4);
3386
3387 RTL_W32(MAC0, low);
3388 RTL_R32(MAC0);
3389
3390 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3391 const struct exgmac_reg e[] = {
3392 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3393 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3394 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3395 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3396 low >> 16 },
3397 };
3398
3399 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
3400 }
3401
3402 RTL_W8(Cfg9346, Cfg9346_Lock);
3403
3404 rtl_unlock_work(tp);
3405 }
3406
3407 static int rtl_set_mac_address(struct net_device *dev, void *p)
3408 {
3409 struct rtl8169_private *tp = netdev_priv(dev);
3410 struct sockaddr *addr = p;
3411
3412 if (!is_valid_ether_addr(addr->sa_data))
3413 return -EADDRNOTAVAIL;
3414
3415 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3416
3417 rtl_rar_set(tp, dev->dev_addr);
3418
3419 return 0;
3420 }
3421
3422 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3423 {
3424 struct rtl8169_private *tp = netdev_priv(dev);
3425 struct mii_ioctl_data *data = if_mii(ifr);
3426
3427 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3428 }
3429
3430 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3431 struct mii_ioctl_data *data, int cmd)
3432 {
3433 switch (cmd) {
3434 case SIOCGMIIPHY:
3435 data->phy_id = 32; /* Internal PHY */
3436 return 0;
3437
3438 case SIOCGMIIREG:
3439 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3440 return 0;
3441
3442 case SIOCSMIIREG:
3443 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3444 return 0;
3445 }
3446 return -EOPNOTSUPP;
3447 }
3448
3449 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3450 {
3451 return -EOPNOTSUPP;
3452 }
3453
3454 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3455 {
3456 if (tp->features & RTL_FEATURE_MSI) {
3457 pci_disable_msi(pdev);
3458 tp->features &= ~RTL_FEATURE_MSI;
3459 }
3460 }
3461
3462 static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3463 {
3464 struct mdio_ops *ops = &tp->mdio_ops;
3465
3466 switch (tp->mac_version) {
3467 case RTL_GIGA_MAC_VER_27:
3468 ops->write = r8168dp_1_mdio_write;
3469 ops->read = r8168dp_1_mdio_read;
3470 break;
3471 case RTL_GIGA_MAC_VER_28:
3472 case RTL_GIGA_MAC_VER_31:
3473 ops->write = r8168dp_2_mdio_write;
3474 ops->read = r8168dp_2_mdio_read;
3475 break;
3476 default:
3477 ops->write = r8169_mdio_write;
3478 ops->read = r8169_mdio_read;
3479 break;
3480 }
3481 }
3482
3483 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3484 {
3485 void __iomem *ioaddr = tp->mmio_addr;
3486
3487 switch (tp->mac_version) {
3488 case RTL_GIGA_MAC_VER_29:
3489 case RTL_GIGA_MAC_VER_30:
3490 case RTL_GIGA_MAC_VER_32:
3491 case RTL_GIGA_MAC_VER_33:
3492 case RTL_GIGA_MAC_VER_34:
3493 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3494 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3495 break;
3496 default:
3497 break;
3498 }
3499 }
3500
3501 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3502 {
3503 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3504 return false;
3505
3506 rtl_writephy(tp, 0x1f, 0x0000);
3507 rtl_writephy(tp, MII_BMCR, 0x0000);
3508
3509 rtl_wol_suspend_quirk(tp);
3510
3511 return true;
3512 }
3513
3514 static void r810x_phy_power_down(struct rtl8169_private *tp)
3515 {
3516 rtl_writephy(tp, 0x1f, 0x0000);
3517 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3518 }
3519
3520 static void r810x_phy_power_up(struct rtl8169_private *tp)
3521 {
3522 rtl_writephy(tp, 0x1f, 0x0000);
3523 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3524 }
3525
3526 static void r810x_pll_power_down(struct rtl8169_private *tp)
3527 {
3528 void __iomem *ioaddr = tp->mmio_addr;
3529
3530 if (rtl_wol_pll_power_down(tp))
3531 return;
3532
3533 r810x_phy_power_down(tp);
3534
3535 switch (tp->mac_version) {
3536 case RTL_GIGA_MAC_VER_07:
3537 case RTL_GIGA_MAC_VER_08:
3538 case RTL_GIGA_MAC_VER_09:
3539 case RTL_GIGA_MAC_VER_10:
3540 case RTL_GIGA_MAC_VER_13:
3541 case RTL_GIGA_MAC_VER_16:
3542 break;
3543 default:
3544 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3545 break;
3546 }
3547 }
3548
3549 static void r810x_pll_power_up(struct rtl8169_private *tp)
3550 {
3551 void __iomem *ioaddr = tp->mmio_addr;
3552
3553 r810x_phy_power_up(tp);
3554
3555 switch (tp->mac_version) {
3556 case RTL_GIGA_MAC_VER_07:
3557 case RTL_GIGA_MAC_VER_08:
3558 case RTL_GIGA_MAC_VER_09:
3559 case RTL_GIGA_MAC_VER_10:
3560 case RTL_GIGA_MAC_VER_13:
3561 case RTL_GIGA_MAC_VER_16:
3562 break;
3563 default:
3564 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3565 break;
3566 }
3567 }
3568
3569 static void r8168_phy_power_up(struct rtl8169_private *tp)
3570 {
3571 rtl_writephy(tp, 0x1f, 0x0000);
3572 switch (tp->mac_version) {
3573 case RTL_GIGA_MAC_VER_11:
3574 case RTL_GIGA_MAC_VER_12:
3575 case RTL_GIGA_MAC_VER_17:
3576 case RTL_GIGA_MAC_VER_18:
3577 case RTL_GIGA_MAC_VER_19:
3578 case RTL_GIGA_MAC_VER_20:
3579 case RTL_GIGA_MAC_VER_21:
3580 case RTL_GIGA_MAC_VER_22:
3581 case RTL_GIGA_MAC_VER_23:
3582 case RTL_GIGA_MAC_VER_24:
3583 case RTL_GIGA_MAC_VER_25:
3584 case RTL_GIGA_MAC_VER_26:
3585 case RTL_GIGA_MAC_VER_27:
3586 case RTL_GIGA_MAC_VER_28:
3587 case RTL_GIGA_MAC_VER_31:
3588 rtl_writephy(tp, 0x0e, 0x0000);
3589 break;
3590 default:
3591 break;
3592 }
3593 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3594 }
3595
3596 static void r8168_phy_power_down(struct rtl8169_private *tp)
3597 {
3598 rtl_writephy(tp, 0x1f, 0x0000);
3599 switch (tp->mac_version) {
3600 case RTL_GIGA_MAC_VER_32:
3601 case RTL_GIGA_MAC_VER_33:
3602 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3603 break;
3604
3605 case RTL_GIGA_MAC_VER_11:
3606 case RTL_GIGA_MAC_VER_12:
3607 case RTL_GIGA_MAC_VER_17:
3608 case RTL_GIGA_MAC_VER_18:
3609 case RTL_GIGA_MAC_VER_19:
3610 case RTL_GIGA_MAC_VER_20:
3611 case RTL_GIGA_MAC_VER_21:
3612 case RTL_GIGA_MAC_VER_22:
3613 case RTL_GIGA_MAC_VER_23:
3614 case RTL_GIGA_MAC_VER_24:
3615 case RTL_GIGA_MAC_VER_25:
3616 case RTL_GIGA_MAC_VER_26:
3617 case RTL_GIGA_MAC_VER_27:
3618 case RTL_GIGA_MAC_VER_28:
3619 case RTL_GIGA_MAC_VER_31:
3620 rtl_writephy(tp, 0x0e, 0x0200);
3621 default:
3622 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3623 break;
3624 }
3625 }
3626
3627 static void r8168_pll_power_down(struct rtl8169_private *tp)
3628 {
3629 void __iomem *ioaddr = tp->mmio_addr;
3630
3631 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3632 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3633 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3634 r8168dp_check_dash(tp)) {
3635 return;
3636 }
3637
3638 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3639 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3640 (RTL_R16(CPlusCmd) & ASF)) {
3641 return;
3642 }
3643
3644 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3645 tp->mac_version == RTL_GIGA_MAC_VER_33)
3646 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3647
3648 if (rtl_wol_pll_power_down(tp))
3649 return;
3650
3651 r8168_phy_power_down(tp);
3652
3653 switch (tp->mac_version) {
3654 case RTL_GIGA_MAC_VER_25:
3655 case RTL_GIGA_MAC_VER_26:
3656 case RTL_GIGA_MAC_VER_27:
3657 case RTL_GIGA_MAC_VER_28:
3658 case RTL_GIGA_MAC_VER_31:
3659 case RTL_GIGA_MAC_VER_32:
3660 case RTL_GIGA_MAC_VER_33:
3661 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3662 break;
3663 }
3664 }
3665
3666 static void r8168_pll_power_up(struct rtl8169_private *tp)
3667 {
3668 void __iomem *ioaddr = tp->mmio_addr;
3669
3670 switch (tp->mac_version) {
3671 case RTL_GIGA_MAC_VER_25:
3672 case RTL_GIGA_MAC_VER_26:
3673 case RTL_GIGA_MAC_VER_27:
3674 case RTL_GIGA_MAC_VER_28:
3675 case RTL_GIGA_MAC_VER_31:
3676 case RTL_GIGA_MAC_VER_32:
3677 case RTL_GIGA_MAC_VER_33:
3678 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3679 break;
3680 }
3681
3682 r8168_phy_power_up(tp);
3683 }
3684
3685 static void rtl_generic_op(struct rtl8169_private *tp,
3686 void (*op)(struct rtl8169_private *))
3687 {
3688 if (op)
3689 op(tp);
3690 }
3691
3692 static void rtl_pll_power_down(struct rtl8169_private *tp)
3693 {
3694 rtl_generic_op(tp, tp->pll_power_ops.down);
3695 }
3696
3697 static void rtl_pll_power_up(struct rtl8169_private *tp)
3698 {
3699 rtl_generic_op(tp, tp->pll_power_ops.up);
3700 }
3701
3702 static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3703 {
3704 struct pll_power_ops *ops = &tp->pll_power_ops;
3705
3706 switch (tp->mac_version) {
3707 case RTL_GIGA_MAC_VER_07:
3708 case RTL_GIGA_MAC_VER_08:
3709 case RTL_GIGA_MAC_VER_09:
3710 case RTL_GIGA_MAC_VER_10:
3711 case RTL_GIGA_MAC_VER_16:
3712 case RTL_GIGA_MAC_VER_29:
3713 case RTL_GIGA_MAC_VER_30:
3714 ops->down = r810x_pll_power_down;
3715 ops->up = r810x_pll_power_up;
3716 break;
3717
3718 case RTL_GIGA_MAC_VER_11:
3719 case RTL_GIGA_MAC_VER_12:
3720 case RTL_GIGA_MAC_VER_17:
3721 case RTL_GIGA_MAC_VER_18:
3722 case RTL_GIGA_MAC_VER_19:
3723 case RTL_GIGA_MAC_VER_20:
3724 case RTL_GIGA_MAC_VER_21:
3725 case RTL_GIGA_MAC_VER_22:
3726 case RTL_GIGA_MAC_VER_23:
3727 case RTL_GIGA_MAC_VER_24:
3728 case RTL_GIGA_MAC_VER_25:
3729 case RTL_GIGA_MAC_VER_26:
3730 case RTL_GIGA_MAC_VER_27:
3731 case RTL_GIGA_MAC_VER_28:
3732 case RTL_GIGA_MAC_VER_31:
3733 case RTL_GIGA_MAC_VER_32:
3734 case RTL_GIGA_MAC_VER_33:
3735 case RTL_GIGA_MAC_VER_34:
3736 case RTL_GIGA_MAC_VER_35:
3737 case RTL_GIGA_MAC_VER_36:
3738 ops->down = r8168_pll_power_down;
3739 ops->up = r8168_pll_power_up;
3740 break;
3741
3742 default:
3743 ops->down = NULL;
3744 ops->up = NULL;
3745 break;
3746 }
3747 }
3748
3749 static void rtl_init_rxcfg(struct rtl8169_private *tp)
3750 {
3751 void __iomem *ioaddr = tp->mmio_addr;
3752
3753 switch (tp->mac_version) {
3754 case RTL_GIGA_MAC_VER_01:
3755 case RTL_GIGA_MAC_VER_02:
3756 case RTL_GIGA_MAC_VER_03:
3757 case RTL_GIGA_MAC_VER_04:
3758 case RTL_GIGA_MAC_VER_05:
3759 case RTL_GIGA_MAC_VER_06:
3760 case RTL_GIGA_MAC_VER_10:
3761 case RTL_GIGA_MAC_VER_11:
3762 case RTL_GIGA_MAC_VER_12:
3763 case RTL_GIGA_MAC_VER_13:
3764 case RTL_GIGA_MAC_VER_14:
3765 case RTL_GIGA_MAC_VER_15:
3766 case RTL_GIGA_MAC_VER_16:
3767 case RTL_GIGA_MAC_VER_17:
3768 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3769 break;
3770 case RTL_GIGA_MAC_VER_18:
3771 case RTL_GIGA_MAC_VER_19:
3772 case RTL_GIGA_MAC_VER_20:
3773 case RTL_GIGA_MAC_VER_21:
3774 case RTL_GIGA_MAC_VER_22:
3775 case RTL_GIGA_MAC_VER_23:
3776 case RTL_GIGA_MAC_VER_24:
3777 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3778 break;
3779 default:
3780 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
3781 break;
3782 }
3783 }
3784
3785 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3786 {
3787 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
3788 }
3789
3790 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
3791 {
3792 void __iomem *ioaddr = tp->mmio_addr;
3793
3794 RTL_W8(Cfg9346, Cfg9346_Unlock);
3795 rtl_generic_op(tp, tp->jumbo_ops.enable);
3796 RTL_W8(Cfg9346, Cfg9346_Lock);
3797 }
3798
3799 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
3800 {
3801 void __iomem *ioaddr = tp->mmio_addr;
3802
3803 RTL_W8(Cfg9346, Cfg9346_Unlock);
3804 rtl_generic_op(tp, tp->jumbo_ops.disable);
3805 RTL_W8(Cfg9346, Cfg9346_Lock);
3806 }
3807
3808 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
3809 {
3810 void __iomem *ioaddr = tp->mmio_addr;
3811
3812 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3813 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
3814 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3815 }
3816
3817 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
3818 {
3819 void __iomem *ioaddr = tp->mmio_addr;
3820
3821 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3822 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
3823 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3824 }
3825
3826 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
3827 {
3828 void __iomem *ioaddr = tp->mmio_addr;
3829
3830 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3831 }
3832
3833 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3834 {
3835 void __iomem *ioaddr = tp->mmio_addr;
3836
3837 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3838 }
3839
3840 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3841 {
3842 void __iomem *ioaddr = tp->mmio_addr;
3843
3844 RTL_W8(MaxTxPacketSize, 0x3f);
3845 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3846 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
3847 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3848 }
3849
3850 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3851 {
3852 void __iomem *ioaddr = tp->mmio_addr;
3853
3854 RTL_W8(MaxTxPacketSize, 0x0c);
3855 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3856 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
3857 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3858 }
3859
3860 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
3861 {
3862 rtl_tx_performance_tweak(tp->pci_dev,
3863 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3864 }
3865
3866 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
3867 {
3868 rtl_tx_performance_tweak(tp->pci_dev,
3869 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3870 }
3871
3872 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
3873 {
3874 void __iomem *ioaddr = tp->mmio_addr;
3875
3876 r8168b_0_hw_jumbo_enable(tp);
3877
3878 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
3879 }
3880
3881 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
3882 {
3883 void __iomem *ioaddr = tp->mmio_addr;
3884
3885 r8168b_0_hw_jumbo_disable(tp);
3886
3887 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
3888 }
3889
3890 static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
3891 {
3892 struct jumbo_ops *ops = &tp->jumbo_ops;
3893
3894 switch (tp->mac_version) {
3895 case RTL_GIGA_MAC_VER_11:
3896 ops->disable = r8168b_0_hw_jumbo_disable;
3897 ops->enable = r8168b_0_hw_jumbo_enable;
3898 break;
3899 case RTL_GIGA_MAC_VER_12:
3900 case RTL_GIGA_MAC_VER_17:
3901 ops->disable = r8168b_1_hw_jumbo_disable;
3902 ops->enable = r8168b_1_hw_jumbo_enable;
3903 break;
3904 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
3905 case RTL_GIGA_MAC_VER_19:
3906 case RTL_GIGA_MAC_VER_20:
3907 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
3908 case RTL_GIGA_MAC_VER_22:
3909 case RTL_GIGA_MAC_VER_23:
3910 case RTL_GIGA_MAC_VER_24:
3911 case RTL_GIGA_MAC_VER_25:
3912 case RTL_GIGA_MAC_VER_26:
3913 ops->disable = r8168c_hw_jumbo_disable;
3914 ops->enable = r8168c_hw_jumbo_enable;
3915 break;
3916 case RTL_GIGA_MAC_VER_27:
3917 case RTL_GIGA_MAC_VER_28:
3918 ops->disable = r8168dp_hw_jumbo_disable;
3919 ops->enable = r8168dp_hw_jumbo_enable;
3920 break;
3921 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
3922 case RTL_GIGA_MAC_VER_32:
3923 case RTL_GIGA_MAC_VER_33:
3924 case RTL_GIGA_MAC_VER_34:
3925 ops->disable = r8168e_hw_jumbo_disable;
3926 ops->enable = r8168e_hw_jumbo_enable;
3927 break;
3928
3929 /*
3930 * No action needed for jumbo frames with 8169.
3931 * No jumbo for 810x at all.
3932 */
3933 default:
3934 ops->disable = NULL;
3935 ops->enable = NULL;
3936 break;
3937 }
3938 }
3939
3940 static void rtl_hw_reset(struct rtl8169_private *tp)
3941 {
3942 void __iomem *ioaddr = tp->mmio_addr;
3943 int i;
3944
3945 /* Soft reset the chip. */
3946 RTL_W8(ChipCmd, CmdReset);
3947
3948 /* Check that the chip has finished the reset. */
3949 for (i = 0; i < 100; i++) {
3950 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3951 break;
3952 udelay(100);
3953 }
3954 }
3955
3956 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
3957 {
3958 struct rtl_fw *rtl_fw;
3959 const char *name;
3960 int rc = -ENOMEM;
3961
3962 name = rtl_lookup_firmware_name(tp);
3963 if (!name)
3964 goto out_no_firmware;
3965
3966 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
3967 if (!rtl_fw)
3968 goto err_warn;
3969
3970 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
3971 if (rc < 0)
3972 goto err_free;
3973
3974 rc = rtl_check_firmware(tp, rtl_fw);
3975 if (rc < 0)
3976 goto err_release_firmware;
3977
3978 tp->rtl_fw = rtl_fw;
3979 out:
3980 return;
3981
3982 err_release_firmware:
3983 release_firmware(rtl_fw->fw);
3984 err_free:
3985 kfree(rtl_fw);
3986 err_warn:
3987 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
3988 name, rc);
3989 out_no_firmware:
3990 tp->rtl_fw = NULL;
3991 goto out;
3992 }
3993
3994 static void rtl_request_firmware(struct rtl8169_private *tp)
3995 {
3996 if (IS_ERR(tp->rtl_fw))
3997 rtl_request_uncached_firmware(tp);
3998 }
3999
4000 static void rtl_rx_close(struct rtl8169_private *tp)
4001 {
4002 void __iomem *ioaddr = tp->mmio_addr;
4003
4004 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4005 }
4006
4007 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4008 {
4009 void __iomem *ioaddr = tp->mmio_addr;
4010
4011 /* Disable interrupts */
4012 rtl8169_irq_mask_and_ack(tp);
4013
4014 rtl_rx_close(tp);
4015
4016 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4017 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4018 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4019 while (RTL_R8(TxPoll) & NPQ)
4020 udelay(20);
4021 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4022 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4023 tp->mac_version == RTL_GIGA_MAC_VER_36) {
4024 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4025 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
4026 udelay(100);
4027 } else {
4028 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4029 udelay(100);
4030 }
4031
4032 rtl_hw_reset(tp);
4033 }
4034
4035 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4036 {
4037 void __iomem *ioaddr = tp->mmio_addr;
4038
4039 /* Set DMA burst size and Interframe Gap Time */
4040 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4041 (InterFrameGap << TxInterFrameGapShift));
4042 }
4043
4044 static void rtl_hw_start(struct net_device *dev)
4045 {
4046 struct rtl8169_private *tp = netdev_priv(dev);
4047
4048 tp->hw_start(dev);
4049
4050 rtl_irq_enable_all(tp);
4051 }
4052
4053 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4054 void __iomem *ioaddr)
4055 {
4056 /*
4057 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4058 * register to be written before TxDescAddrLow to work.
4059 * Switching from MMIO to I/O access fixes the issue as well.
4060 */
4061 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4062 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4063 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4064 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4065 }
4066
4067 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4068 {
4069 u16 cmd;
4070
4071 cmd = RTL_R16(CPlusCmd);
4072 RTL_W16(CPlusCmd, cmd);
4073 return cmd;
4074 }
4075
4076 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4077 {
4078 /* Low hurts. Let's disable the filtering. */
4079 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4080 }
4081
4082 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4083 {
4084 static const struct rtl_cfg2_info {
4085 u32 mac_version;
4086 u32 clk;
4087 u32 val;
4088 } cfg2_info [] = {
4089 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4090 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4091 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4092 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4093 };
4094 const struct rtl_cfg2_info *p = cfg2_info;
4095 unsigned int i;
4096 u32 clk;
4097
4098 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4099 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4100 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4101 RTL_W32(0x7c, p->val);
4102 break;
4103 }
4104 }
4105 }
4106
4107 static void rtl_set_rx_mode(struct net_device *dev)
4108 {
4109 struct rtl8169_private *tp = netdev_priv(dev);
4110 void __iomem *ioaddr = tp->mmio_addr;
4111 u32 mc_filter[2]; /* Multicast hash filter */
4112 int rx_mode;
4113 u32 tmp = 0;
4114
4115 if (dev->flags & IFF_PROMISC) {
4116 /* Unconditionally log net taps. */
4117 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4118 rx_mode =
4119 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4120 AcceptAllPhys;
4121 mc_filter[1] = mc_filter[0] = 0xffffffff;
4122 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4123 (dev->flags & IFF_ALLMULTI)) {
4124 /* Too many to filter perfectly -- accept all multicasts. */
4125 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4126 mc_filter[1] = mc_filter[0] = 0xffffffff;
4127 } else {
4128 struct netdev_hw_addr *ha;
4129
4130 rx_mode = AcceptBroadcast | AcceptMyPhys;
4131 mc_filter[1] = mc_filter[0] = 0;
4132 netdev_for_each_mc_addr(ha, dev) {
4133 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4134 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4135 rx_mode |= AcceptMulticast;
4136 }
4137 }
4138
4139 if (dev->features & NETIF_F_RXALL)
4140 rx_mode |= (AcceptErr | AcceptRunt);
4141
4142 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4143
4144 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4145 u32 data = mc_filter[0];
4146
4147 mc_filter[0] = swab32(mc_filter[1]);
4148 mc_filter[1] = swab32(data);
4149 }
4150
4151 RTL_W32(MAR0 + 4, mc_filter[1]);
4152 RTL_W32(MAR0 + 0, mc_filter[0]);
4153
4154 RTL_W32(RxConfig, tmp);
4155 }
4156
4157 static void rtl_hw_start_8169(struct net_device *dev)
4158 {
4159 struct rtl8169_private *tp = netdev_priv(dev);
4160 void __iomem *ioaddr = tp->mmio_addr;
4161 struct pci_dev *pdev = tp->pci_dev;
4162
4163 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4164 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4165 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4166 }
4167
4168 RTL_W8(Cfg9346, Cfg9346_Unlock);
4169 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4170 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4171 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4172 tp->mac_version == RTL_GIGA_MAC_VER_04)
4173 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4174
4175 rtl_init_rxcfg(tp);
4176
4177 RTL_W8(EarlyTxThres, NoEarlyTx);
4178
4179 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4180
4181 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4182 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4183 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4184 tp->mac_version == RTL_GIGA_MAC_VER_04)
4185 rtl_set_rx_tx_config_registers(tp);
4186
4187 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4188
4189 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4190 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4191 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4192 "Bit-3 and bit-14 MUST be 1\n");
4193 tp->cp_cmd |= (1 << 14);
4194 }
4195
4196 RTL_W16(CPlusCmd, tp->cp_cmd);
4197
4198 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4199
4200 /*
4201 * Undocumented corner. Supposedly:
4202 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4203 */
4204 RTL_W16(IntrMitigate, 0x0000);
4205
4206 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4207
4208 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4209 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4210 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4211 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4212 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4213 rtl_set_rx_tx_config_registers(tp);
4214 }
4215
4216 RTL_W8(Cfg9346, Cfg9346_Lock);
4217
4218 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4219 RTL_R8(IntrMask);
4220
4221 RTL_W32(RxMissed, 0);
4222
4223 rtl_set_rx_mode(dev);
4224
4225 /* no early-rx interrupts */
4226 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4227 }
4228
4229 static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
4230 {
4231 u32 csi;
4232
4233 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
4234 rtl_csi_write(ioaddr, 0x070c, csi | bits);
4235 }
4236
4237 static void rtl_csi_access_enable_1(void __iomem *ioaddr)
4238 {
4239 rtl_csi_access_enable(ioaddr, 0x17000000);
4240 }
4241
4242 static void rtl_csi_access_enable_2(void __iomem *ioaddr)
4243 {
4244 rtl_csi_access_enable(ioaddr, 0x27000000);
4245 }
4246
4247 struct ephy_info {
4248 unsigned int offset;
4249 u16 mask;
4250 u16 bits;
4251 };
4252
4253 static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
4254 {
4255 u16 w;
4256
4257 while (len-- > 0) {
4258 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
4259 rtl_ephy_write(ioaddr, e->offset, w);
4260 e++;
4261 }
4262 }
4263
4264 static void rtl_disable_clock_request(struct pci_dev *pdev)
4265 {
4266 int cap = pci_pcie_cap(pdev);
4267
4268 if (cap) {
4269 u16 ctl;
4270
4271 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4272 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4273 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4274 }
4275 }
4276
4277 static void rtl_enable_clock_request(struct pci_dev *pdev)
4278 {
4279 int cap = pci_pcie_cap(pdev);
4280
4281 if (cap) {
4282 u16 ctl;
4283
4284 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4285 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4286 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4287 }
4288 }
4289
4290 #define R8168_CPCMD_QUIRK_MASK (\
4291 EnableBist | \
4292 Mac_dbgo_oe | \
4293 Force_half_dup | \
4294 Force_rxflow_en | \
4295 Force_txflow_en | \
4296 Cxpl_dbg_sel | \
4297 ASF | \
4298 PktCntrDisable | \
4299 Mac_dbgo_sel)
4300
4301 static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
4302 {
4303 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4304
4305 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4306
4307 rtl_tx_performance_tweak(pdev,
4308 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4309 }
4310
4311 static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
4312 {
4313 rtl_hw_start_8168bb(ioaddr, pdev);
4314
4315 RTL_W8(MaxTxPacketSize, TxPacketMax);
4316
4317 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4318 }
4319
4320 static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
4321 {
4322 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4323
4324 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4325
4326 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4327
4328 rtl_disable_clock_request(pdev);
4329
4330 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4331 }
4332
4333 static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
4334 {
4335 static const struct ephy_info e_info_8168cp[] = {
4336 { 0x01, 0, 0x0001 },
4337 { 0x02, 0x0800, 0x1000 },
4338 { 0x03, 0, 0x0042 },
4339 { 0x06, 0x0080, 0x0000 },
4340 { 0x07, 0, 0x2000 }
4341 };
4342
4343 rtl_csi_access_enable_2(ioaddr);
4344
4345 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4346
4347 __rtl_hw_start_8168cp(ioaddr, pdev);
4348 }
4349
4350 static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
4351 {
4352 rtl_csi_access_enable_2(ioaddr);
4353
4354 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4355
4356 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4357
4358 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4359 }
4360
4361 static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
4362 {
4363 rtl_csi_access_enable_2(ioaddr);
4364
4365 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4366
4367 /* Magic. */
4368 RTL_W8(DBG_REG, 0x20);
4369
4370 RTL_W8(MaxTxPacketSize, TxPacketMax);
4371
4372 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4373
4374 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4375 }
4376
4377 static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
4378 {
4379 static const struct ephy_info e_info_8168c_1[] = {
4380 { 0x02, 0x0800, 0x1000 },
4381 { 0x03, 0, 0x0002 },
4382 { 0x06, 0x0080, 0x0000 }
4383 };
4384
4385 rtl_csi_access_enable_2(ioaddr);
4386
4387 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4388
4389 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4390
4391 __rtl_hw_start_8168cp(ioaddr, pdev);
4392 }
4393
4394 static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
4395 {
4396 static const struct ephy_info e_info_8168c_2[] = {
4397 { 0x01, 0, 0x0001 },
4398 { 0x03, 0x0400, 0x0220 }
4399 };
4400
4401 rtl_csi_access_enable_2(ioaddr);
4402
4403 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4404
4405 __rtl_hw_start_8168cp(ioaddr, pdev);
4406 }
4407
4408 static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
4409 {
4410 rtl_hw_start_8168c_2(ioaddr, pdev);
4411 }
4412
4413 static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
4414 {
4415 rtl_csi_access_enable_2(ioaddr);
4416
4417 __rtl_hw_start_8168cp(ioaddr, pdev);
4418 }
4419
4420 static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
4421 {
4422 rtl_csi_access_enable_2(ioaddr);
4423
4424 rtl_disable_clock_request(pdev);
4425
4426 RTL_W8(MaxTxPacketSize, TxPacketMax);
4427
4428 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4429
4430 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4431 }
4432
4433 static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
4434 {
4435 rtl_csi_access_enable_1(ioaddr);
4436
4437 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4438
4439 RTL_W8(MaxTxPacketSize, TxPacketMax);
4440
4441 rtl_disable_clock_request(pdev);
4442 }
4443
4444 static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4445 {
4446 static const struct ephy_info e_info_8168d_4[] = {
4447 { 0x0b, ~0, 0x48 },
4448 { 0x19, 0x20, 0x50 },
4449 { 0x0c, ~0, 0x20 }
4450 };
4451 int i;
4452
4453 rtl_csi_access_enable_1(ioaddr);
4454
4455 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4456
4457 RTL_W8(MaxTxPacketSize, TxPacketMax);
4458
4459 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4460 const struct ephy_info *e = e_info_8168d_4 + i;
4461 u16 w;
4462
4463 w = rtl_ephy_read(ioaddr, e->offset);
4464 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
4465 }
4466
4467 rtl_enable_clock_request(pdev);
4468 }
4469
4470 static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4471 {
4472 static const struct ephy_info e_info_8168e_1[] = {
4473 { 0x00, 0x0200, 0x0100 },
4474 { 0x00, 0x0000, 0x0004 },
4475 { 0x06, 0x0002, 0x0001 },
4476 { 0x06, 0x0000, 0x0030 },
4477 { 0x07, 0x0000, 0x2000 },
4478 { 0x00, 0x0000, 0x0020 },
4479 { 0x03, 0x5800, 0x2000 },
4480 { 0x03, 0x0000, 0x0001 },
4481 { 0x01, 0x0800, 0x1000 },
4482 { 0x07, 0x0000, 0x4000 },
4483 { 0x1e, 0x0000, 0x2000 },
4484 { 0x19, 0xffff, 0xfe6c },
4485 { 0x0a, 0x0000, 0x0040 }
4486 };
4487
4488 rtl_csi_access_enable_2(ioaddr);
4489
4490 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4491
4492 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4493
4494 RTL_W8(MaxTxPacketSize, TxPacketMax);
4495
4496 rtl_disable_clock_request(pdev);
4497
4498 /* Reset tx FIFO pointer */
4499 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4500 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4501
4502 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4503 }
4504
4505 static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4506 {
4507 static const struct ephy_info e_info_8168e_2[] = {
4508 { 0x09, 0x0000, 0x0080 },
4509 { 0x19, 0x0000, 0x0224 }
4510 };
4511
4512 rtl_csi_access_enable_1(ioaddr);
4513
4514 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4515
4516 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4517
4518 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4519 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4520 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4521 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4522 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4523 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4524 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4525 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4526 ERIAR_EXGMAC);
4527
4528 RTL_W8(MaxTxPacketSize, EarlySize);
4529
4530 rtl_disable_clock_request(pdev);
4531
4532 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4533 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4534
4535 /* Adjust EEE LED frequency */
4536 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4537
4538 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4539 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4540 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4541 }
4542
4543 static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4544 {
4545 static const struct ephy_info e_info_8168f_1[] = {
4546 { 0x06, 0x00c0, 0x0020 },
4547 { 0x08, 0x0001, 0x0002 },
4548 { 0x09, 0x0000, 0x0080 },
4549 { 0x19, 0x0000, 0x0224 }
4550 };
4551
4552 rtl_csi_access_enable_1(ioaddr);
4553
4554 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4555
4556 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4557
4558 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4559 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4560 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4561 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4562 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4563 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4564 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4565 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4566 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4567 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4568 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4569 ERIAR_EXGMAC);
4570
4571 RTL_W8(MaxTxPacketSize, EarlySize);
4572
4573 rtl_disable_clock_request(pdev);
4574
4575 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4576 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4577
4578 /* Adjust EEE LED frequency */
4579 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4580
4581 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4582 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4583 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4584 }
4585
4586 static void rtl_hw_start_8168(struct net_device *dev)
4587 {
4588 struct rtl8169_private *tp = netdev_priv(dev);
4589 void __iomem *ioaddr = tp->mmio_addr;
4590 struct pci_dev *pdev = tp->pci_dev;
4591
4592 RTL_W8(Cfg9346, Cfg9346_Unlock);
4593
4594 RTL_W8(MaxTxPacketSize, TxPacketMax);
4595
4596 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4597
4598 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
4599
4600 RTL_W16(CPlusCmd, tp->cp_cmd);
4601
4602 RTL_W16(IntrMitigate, 0x5151);
4603
4604 /* Work around for RxFIFO overflow. */
4605 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4606 tp->event_slow |= RxFIFOOver | PCSTimeout;
4607 tp->event_slow &= ~RxOverflow;
4608 }
4609
4610 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4611
4612 rtl_set_rx_mode(dev);
4613
4614 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4615 (InterFrameGap << TxInterFrameGapShift));
4616
4617 RTL_R8(IntrMask);
4618
4619 switch (tp->mac_version) {
4620 case RTL_GIGA_MAC_VER_11:
4621 rtl_hw_start_8168bb(ioaddr, pdev);
4622 break;
4623
4624 case RTL_GIGA_MAC_VER_12:
4625 case RTL_GIGA_MAC_VER_17:
4626 rtl_hw_start_8168bef(ioaddr, pdev);
4627 break;
4628
4629 case RTL_GIGA_MAC_VER_18:
4630 rtl_hw_start_8168cp_1(ioaddr, pdev);
4631 break;
4632
4633 case RTL_GIGA_MAC_VER_19:
4634 rtl_hw_start_8168c_1(ioaddr, pdev);
4635 break;
4636
4637 case RTL_GIGA_MAC_VER_20:
4638 rtl_hw_start_8168c_2(ioaddr, pdev);
4639 break;
4640
4641 case RTL_GIGA_MAC_VER_21:
4642 rtl_hw_start_8168c_3(ioaddr, pdev);
4643 break;
4644
4645 case RTL_GIGA_MAC_VER_22:
4646 rtl_hw_start_8168c_4(ioaddr, pdev);
4647 break;
4648
4649 case RTL_GIGA_MAC_VER_23:
4650 rtl_hw_start_8168cp_2(ioaddr, pdev);
4651 break;
4652
4653 case RTL_GIGA_MAC_VER_24:
4654 rtl_hw_start_8168cp_3(ioaddr, pdev);
4655 break;
4656
4657 case RTL_GIGA_MAC_VER_25:
4658 case RTL_GIGA_MAC_VER_26:
4659 case RTL_GIGA_MAC_VER_27:
4660 rtl_hw_start_8168d(ioaddr, pdev);
4661 break;
4662
4663 case RTL_GIGA_MAC_VER_28:
4664 rtl_hw_start_8168d_4(ioaddr, pdev);
4665 break;
4666
4667 case RTL_GIGA_MAC_VER_31:
4668 rtl_hw_start_8168dp(ioaddr, pdev);
4669 break;
4670
4671 case RTL_GIGA_MAC_VER_32:
4672 case RTL_GIGA_MAC_VER_33:
4673 rtl_hw_start_8168e_1(ioaddr, pdev);
4674 break;
4675 case RTL_GIGA_MAC_VER_34:
4676 rtl_hw_start_8168e_2(ioaddr, pdev);
4677 break;
4678
4679 case RTL_GIGA_MAC_VER_35:
4680 case RTL_GIGA_MAC_VER_36:
4681 rtl_hw_start_8168f_1(ioaddr, pdev);
4682 break;
4683
4684 default:
4685 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
4686 dev->name, tp->mac_version);
4687 break;
4688 }
4689
4690 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4691
4692 RTL_W8(Cfg9346, Cfg9346_Lock);
4693
4694 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4695 }
4696
4697 #define R810X_CPCMD_QUIRK_MASK (\
4698 EnableBist | \
4699 Mac_dbgo_oe | \
4700 Force_half_dup | \
4701 Force_rxflow_en | \
4702 Force_txflow_en | \
4703 Cxpl_dbg_sel | \
4704 ASF | \
4705 PktCntrDisable | \
4706 Mac_dbgo_sel)
4707
4708 static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4709 {
4710 static const struct ephy_info e_info_8102e_1[] = {
4711 { 0x01, 0, 0x6e65 },
4712 { 0x02, 0, 0x091f },
4713 { 0x03, 0, 0xc2f9 },
4714 { 0x06, 0, 0xafb5 },
4715 { 0x07, 0, 0x0e00 },
4716 { 0x19, 0, 0xec80 },
4717 { 0x01, 0, 0x2e65 },
4718 { 0x01, 0, 0x6e65 }
4719 };
4720 u8 cfg1;
4721
4722 rtl_csi_access_enable_2(ioaddr);
4723
4724 RTL_W8(DBG_REG, FIX_NAK_1);
4725
4726 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4727
4728 RTL_W8(Config1,
4729 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
4730 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4731
4732 cfg1 = RTL_R8(Config1);
4733 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
4734 RTL_W8(Config1, cfg1 & ~LEDS0);
4735
4736 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
4737 }
4738
4739 static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4740 {
4741 rtl_csi_access_enable_2(ioaddr);
4742
4743 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4744
4745 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
4746 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4747 }
4748
4749 static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
4750 {
4751 rtl_hw_start_8102e_2(ioaddr, pdev);
4752
4753 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
4754 }
4755
4756 static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4757 {
4758 static const struct ephy_info e_info_8105e_1[] = {
4759 { 0x07, 0, 0x4000 },
4760 { 0x19, 0, 0x0200 },
4761 { 0x19, 0, 0x0020 },
4762 { 0x1e, 0, 0x2000 },
4763 { 0x03, 0, 0x0001 },
4764 { 0x19, 0, 0x0100 },
4765 { 0x19, 0, 0x0004 },
4766 { 0x0a, 0, 0x0020 }
4767 };
4768
4769 /* Force LAN exit from ASPM if Rx/Tx are not idle */
4770 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
4771
4772 /* Disable Early Tally Counter */
4773 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
4774
4775 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
4776 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4777
4778 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
4779 }
4780
4781 static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4782 {
4783 rtl_hw_start_8105e_1(ioaddr, pdev);
4784 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
4785 }
4786
4787 static void rtl_hw_start_8101(struct net_device *dev)
4788 {
4789 struct rtl8169_private *tp = netdev_priv(dev);
4790 void __iomem *ioaddr = tp->mmio_addr;
4791 struct pci_dev *pdev = tp->pci_dev;
4792
4793 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
4794 tp->event_slow &= ~RxFIFOOver;
4795
4796 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
4797 tp->mac_version == RTL_GIGA_MAC_VER_16) {
4798 int cap = pci_pcie_cap(pdev);
4799
4800 if (cap) {
4801 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
4802 PCI_EXP_DEVCTL_NOSNOOP_EN);
4803 }
4804 }
4805
4806 RTL_W8(Cfg9346, Cfg9346_Unlock);
4807
4808 switch (tp->mac_version) {
4809 case RTL_GIGA_MAC_VER_07:
4810 rtl_hw_start_8102e_1(ioaddr, pdev);
4811 break;
4812
4813 case RTL_GIGA_MAC_VER_08:
4814 rtl_hw_start_8102e_3(ioaddr, pdev);
4815 break;
4816
4817 case RTL_GIGA_MAC_VER_09:
4818 rtl_hw_start_8102e_2(ioaddr, pdev);
4819 break;
4820
4821 case RTL_GIGA_MAC_VER_29:
4822 rtl_hw_start_8105e_1(ioaddr, pdev);
4823 break;
4824 case RTL_GIGA_MAC_VER_30:
4825 rtl_hw_start_8105e_2(ioaddr, pdev);
4826 break;
4827 }
4828
4829 RTL_W8(Cfg9346, Cfg9346_Lock);
4830
4831 RTL_W8(MaxTxPacketSize, TxPacketMax);
4832
4833 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4834
4835 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
4836 RTL_W16(CPlusCmd, tp->cp_cmd);
4837
4838 RTL_W16(IntrMitigate, 0x0000);
4839
4840 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4841
4842 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4843 rtl_set_rx_tx_config_registers(tp);
4844
4845 RTL_R8(IntrMask);
4846
4847 rtl_set_rx_mode(dev);
4848
4849 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
4850 }
4851
4852 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
4853 {
4854 struct rtl8169_private *tp = netdev_priv(dev);
4855
4856 if (new_mtu < ETH_ZLEN ||
4857 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
4858 return -EINVAL;
4859
4860 if (new_mtu > ETH_DATA_LEN)
4861 rtl_hw_jumbo_enable(tp);
4862 else
4863 rtl_hw_jumbo_disable(tp);
4864
4865 dev->mtu = new_mtu;
4866 netdev_update_features(dev);
4867
4868 return 0;
4869 }
4870
4871 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
4872 {
4873 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
4874 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
4875 }
4876
4877 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
4878 void **data_buff, struct RxDesc *desc)
4879 {
4880 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
4881 DMA_FROM_DEVICE);
4882
4883 kfree(*data_buff);
4884 *data_buff = NULL;
4885 rtl8169_make_unusable_by_asic(desc);
4886 }
4887
4888 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
4889 {
4890 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
4891
4892 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
4893 }
4894
4895 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
4896 u32 rx_buf_sz)
4897 {
4898 desc->addr = cpu_to_le64(mapping);
4899 wmb();
4900 rtl8169_mark_to_asic(desc, rx_buf_sz);
4901 }
4902
4903 static inline void *rtl8169_align(void *data)
4904 {
4905 return (void *)ALIGN((long)data, 16);
4906 }
4907
4908 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
4909 struct RxDesc *desc)
4910 {
4911 void *data;
4912 dma_addr_t mapping;
4913 struct device *d = &tp->pci_dev->dev;
4914 struct net_device *dev = tp->dev;
4915 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
4916
4917 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
4918 if (!data)
4919 return NULL;
4920
4921 if (rtl8169_align(data) != data) {
4922 kfree(data);
4923 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
4924 if (!data)
4925 return NULL;
4926 }
4927
4928 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
4929 DMA_FROM_DEVICE);
4930 if (unlikely(dma_mapping_error(d, mapping))) {
4931 if (net_ratelimit())
4932 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
4933 goto err_out;
4934 }
4935
4936 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
4937 return data;
4938
4939 err_out:
4940 kfree(data);
4941 return NULL;
4942 }
4943
4944 static void rtl8169_rx_clear(struct rtl8169_private *tp)
4945 {
4946 unsigned int i;
4947
4948 for (i = 0; i < NUM_RX_DESC; i++) {
4949 if (tp->Rx_databuff[i]) {
4950 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
4951 tp->RxDescArray + i);
4952 }
4953 }
4954 }
4955
4956 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
4957 {
4958 desc->opts1 |= cpu_to_le32(RingEnd);
4959 }
4960
4961 static int rtl8169_rx_fill(struct rtl8169_private *tp)
4962 {
4963 unsigned int i;
4964
4965 for (i = 0; i < NUM_RX_DESC; i++) {
4966 void *data;
4967
4968 if (tp->Rx_databuff[i])
4969 continue;
4970
4971 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
4972 if (!data) {
4973 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
4974 goto err_out;
4975 }
4976 tp->Rx_databuff[i] = data;
4977 }
4978
4979 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
4980 return 0;
4981
4982 err_out:
4983 rtl8169_rx_clear(tp);
4984 return -ENOMEM;
4985 }
4986
4987 static int rtl8169_init_ring(struct net_device *dev)
4988 {
4989 struct rtl8169_private *tp = netdev_priv(dev);
4990
4991 rtl8169_init_ring_indexes(tp);
4992
4993 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
4994 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
4995
4996 return rtl8169_rx_fill(tp);
4997 }
4998
4999 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5000 struct TxDesc *desc)
5001 {
5002 unsigned int len = tx_skb->len;
5003
5004 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5005
5006 desc->opts1 = 0x00;
5007 desc->opts2 = 0x00;
5008 desc->addr = 0x00;
5009 tx_skb->len = 0;
5010 }
5011
5012 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5013 unsigned int n)
5014 {
5015 unsigned int i;
5016
5017 for (i = 0; i < n; i++) {
5018 unsigned int entry = (start + i) % NUM_TX_DESC;
5019 struct ring_info *tx_skb = tp->tx_skb + entry;
5020 unsigned int len = tx_skb->len;
5021
5022 if (len) {
5023 struct sk_buff *skb = tx_skb->skb;
5024
5025 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5026 tp->TxDescArray + entry);
5027 if (skb) {
5028 tp->dev->stats.tx_dropped++;
5029 dev_kfree_skb(skb);
5030 tx_skb->skb = NULL;
5031 }
5032 }
5033 }
5034 }
5035
5036 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5037 {
5038 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5039 tp->cur_tx = tp->dirty_tx = 0;
5040 netdev_reset_queue(tp->dev);
5041 }
5042
5043 static void rtl_reset_work(struct rtl8169_private *tp)
5044 {
5045 struct net_device *dev = tp->dev;
5046 int i;
5047
5048 napi_disable(&tp->napi);
5049 netif_stop_queue(dev);
5050 synchronize_sched();
5051
5052 rtl8169_hw_reset(tp);
5053
5054 for (i = 0; i < NUM_RX_DESC; i++)
5055 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5056
5057 rtl8169_tx_clear(tp);
5058 rtl8169_init_ring_indexes(tp);
5059
5060 napi_enable(&tp->napi);
5061 rtl_hw_start(dev);
5062 netif_wake_queue(dev);
5063 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5064 }
5065
5066 static void rtl8169_tx_timeout(struct net_device *dev)
5067 {
5068 struct rtl8169_private *tp = netdev_priv(dev);
5069
5070 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5071 }
5072
5073 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5074 u32 *opts)
5075 {
5076 struct skb_shared_info *info = skb_shinfo(skb);
5077 unsigned int cur_frag, entry;
5078 struct TxDesc * uninitialized_var(txd);
5079 struct device *d = &tp->pci_dev->dev;
5080
5081 entry = tp->cur_tx;
5082 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5083 const skb_frag_t *frag = info->frags + cur_frag;
5084 dma_addr_t mapping;
5085 u32 status, len;
5086 void *addr;
5087
5088 entry = (entry + 1) % NUM_TX_DESC;
5089
5090 txd = tp->TxDescArray + entry;
5091 len = skb_frag_size(frag);
5092 addr = skb_frag_address(frag);
5093 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5094 if (unlikely(dma_mapping_error(d, mapping))) {
5095 if (net_ratelimit())
5096 netif_err(tp, drv, tp->dev,
5097 "Failed to map TX fragments DMA!\n");
5098 goto err_out;
5099 }
5100
5101 /* Anti gcc 2.95.3 bugware (sic) */
5102 status = opts[0] | len |
5103 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5104
5105 txd->opts1 = cpu_to_le32(status);
5106 txd->opts2 = cpu_to_le32(opts[1]);
5107 txd->addr = cpu_to_le64(mapping);
5108
5109 tp->tx_skb[entry].len = len;
5110 }
5111
5112 if (cur_frag) {
5113 tp->tx_skb[entry].skb = skb;
5114 txd->opts1 |= cpu_to_le32(LastFrag);
5115 }
5116
5117 return cur_frag;
5118
5119 err_out:
5120 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5121 return -EIO;
5122 }
5123
5124 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5125 struct sk_buff *skb, u32 *opts)
5126 {
5127 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5128 u32 mss = skb_shinfo(skb)->gso_size;
5129 int offset = info->opts_offset;
5130
5131 if (mss) {
5132 opts[0] |= TD_LSO;
5133 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5134 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5135 const struct iphdr *ip = ip_hdr(skb);
5136
5137 if (ip->protocol == IPPROTO_TCP)
5138 opts[offset] |= info->checksum.tcp;
5139 else if (ip->protocol == IPPROTO_UDP)
5140 opts[offset] |= info->checksum.udp;
5141 else
5142 WARN_ON_ONCE(1);
5143 }
5144 }
5145
5146 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5147 struct net_device *dev)
5148 {
5149 struct rtl8169_private *tp = netdev_priv(dev);
5150 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5151 struct TxDesc *txd = tp->TxDescArray + entry;
5152 void __iomem *ioaddr = tp->mmio_addr;
5153 struct device *d = &tp->pci_dev->dev;
5154 dma_addr_t mapping;
5155 u32 status, len;
5156 u32 opts[2];
5157 int frags;
5158
5159 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
5160 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5161 goto err_stop_0;
5162 }
5163
5164 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5165 goto err_stop_0;
5166
5167 len = skb_headlen(skb);
5168 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5169 if (unlikely(dma_mapping_error(d, mapping))) {
5170 if (net_ratelimit())
5171 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5172 goto err_dma_0;
5173 }
5174
5175 tp->tx_skb[entry].len = len;
5176 txd->addr = cpu_to_le64(mapping);
5177
5178 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5179 opts[0] = DescOwn;
5180
5181 rtl8169_tso_csum(tp, skb, opts);
5182
5183 frags = rtl8169_xmit_frags(tp, skb, opts);
5184 if (frags < 0)
5185 goto err_dma_1;
5186 else if (frags)
5187 opts[0] |= FirstFrag;
5188 else {
5189 opts[0] |= FirstFrag | LastFrag;
5190 tp->tx_skb[entry].skb = skb;
5191 }
5192
5193 txd->opts2 = cpu_to_le32(opts[1]);
5194
5195 netdev_sent_queue(dev, skb->len);
5196
5197 skb_tx_timestamp(skb);
5198
5199 wmb();
5200
5201 /* Anti gcc 2.95.3 bugware (sic) */
5202 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5203 txd->opts1 = cpu_to_le32(status);
5204
5205 tp->cur_tx += frags + 1;
5206
5207 wmb();
5208
5209 RTL_W8(TxPoll, NPQ);
5210
5211 mmiowb();
5212
5213 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
5214 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5215 * not miss a ring update when it notices a stopped queue.
5216 */
5217 smp_wmb();
5218 netif_stop_queue(dev);
5219 /* Sync with rtl_tx:
5220 * - publish queue status and cur_tx ring index (write barrier)
5221 * - refresh dirty_tx ring index (read barrier).
5222 * May the current thread have a pessimistic view of the ring
5223 * status and forget to wake up queue, a racing rtl_tx thread
5224 * can't.
5225 */
5226 smp_mb();
5227 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
5228 netif_wake_queue(dev);
5229 }
5230
5231 return NETDEV_TX_OK;
5232
5233 err_dma_1:
5234 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5235 err_dma_0:
5236 dev_kfree_skb(skb);
5237 dev->stats.tx_dropped++;
5238 return NETDEV_TX_OK;
5239
5240 err_stop_0:
5241 netif_stop_queue(dev);
5242 dev->stats.tx_dropped++;
5243 return NETDEV_TX_BUSY;
5244 }
5245
5246 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5247 {
5248 struct rtl8169_private *tp = netdev_priv(dev);
5249 struct pci_dev *pdev = tp->pci_dev;
5250 u16 pci_status, pci_cmd;
5251
5252 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5253 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5254
5255 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5256 pci_cmd, pci_status);
5257
5258 /*
5259 * The recovery sequence below admits a very elaborated explanation:
5260 * - it seems to work;
5261 * - I did not see what else could be done;
5262 * - it makes iop3xx happy.
5263 *
5264 * Feel free to adjust to your needs.
5265 */
5266 if (pdev->broken_parity_status)
5267 pci_cmd &= ~PCI_COMMAND_PARITY;
5268 else
5269 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5270
5271 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5272
5273 pci_write_config_word(pdev, PCI_STATUS,
5274 pci_status & (PCI_STATUS_DETECTED_PARITY |
5275 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5276 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5277
5278 /* The infamous DAC f*ckup only happens at boot time */
5279 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5280 void __iomem *ioaddr = tp->mmio_addr;
5281
5282 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5283 tp->cp_cmd &= ~PCIDAC;
5284 RTL_W16(CPlusCmd, tp->cp_cmd);
5285 dev->features &= ~NETIF_F_HIGHDMA;
5286 }
5287
5288 rtl8169_hw_reset(tp);
5289
5290 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5291 }
5292
5293 struct rtl_txc {
5294 int packets;
5295 int bytes;
5296 };
5297
5298 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5299 {
5300 struct rtl8169_stats *tx_stats = &tp->tx_stats;
5301 unsigned int dirty_tx, tx_left;
5302 struct rtl_txc txc = { 0, 0 };
5303
5304 dirty_tx = tp->dirty_tx;
5305 smp_rmb();
5306 tx_left = tp->cur_tx - dirty_tx;
5307
5308 while (tx_left > 0) {
5309 unsigned int entry = dirty_tx % NUM_TX_DESC;
5310 struct ring_info *tx_skb = tp->tx_skb + entry;
5311 u32 status;
5312
5313 rmb();
5314 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5315 if (status & DescOwn)
5316 break;
5317
5318 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5319 tp->TxDescArray + entry);
5320 if (status & LastFrag) {
5321 struct sk_buff *skb = tx_skb->skb;
5322
5323 txc.packets++;
5324 txc.bytes += skb->len;
5325 dev_kfree_skb(skb);
5326 tx_skb->skb = NULL;
5327 }
5328 dirty_tx++;
5329 tx_left--;
5330 }
5331
5332 u64_stats_update_begin(&tx_stats->syncp);
5333 tx_stats->packets += txc.packets;
5334 tx_stats->bytes += txc.bytes;
5335 u64_stats_update_end(&tx_stats->syncp);
5336
5337 netdev_completed_queue(dev, txc.packets, txc.bytes);
5338
5339 if (tp->dirty_tx != dirty_tx) {
5340 tp->dirty_tx = dirty_tx;
5341 /* Sync with rtl8169_start_xmit:
5342 * - publish dirty_tx ring index (write barrier)
5343 * - refresh cur_tx ring index and queue status (read barrier)
5344 * May the current thread miss the stopped queue condition,
5345 * a racing xmit thread can only have a right view of the
5346 * ring status.
5347 */
5348 smp_mb();
5349 if (netif_queue_stopped(dev) &&
5350 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
5351 netif_wake_queue(dev);
5352 }
5353 /*
5354 * 8168 hack: TxPoll requests are lost when the Tx packets are
5355 * too close. Let's kick an extra TxPoll request when a burst
5356 * of start_xmit activity is detected (if it is not detected,
5357 * it is slow enough). -- FR
5358 */
5359 if (tp->cur_tx != dirty_tx) {
5360 void __iomem *ioaddr = tp->mmio_addr;
5361
5362 RTL_W8(TxPoll, NPQ);
5363 }
5364 }
5365 }
5366
5367 static inline int rtl8169_fragmented_frame(u32 status)
5368 {
5369 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5370 }
5371
5372 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5373 {
5374 u32 status = opts1 & RxProtoMask;
5375
5376 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5377 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5378 skb->ip_summed = CHECKSUM_UNNECESSARY;
5379 else
5380 skb_checksum_none_assert(skb);
5381 }
5382
5383 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5384 struct rtl8169_private *tp,
5385 int pkt_size,
5386 dma_addr_t addr)
5387 {
5388 struct sk_buff *skb;
5389 struct device *d = &tp->pci_dev->dev;
5390
5391 data = rtl8169_align(data);
5392 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5393 prefetch(data);
5394 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5395 if (skb)
5396 memcpy(skb->data, data, pkt_size);
5397 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5398
5399 return skb;
5400 }
5401
5402 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5403 {
5404 unsigned int cur_rx, rx_left;
5405 unsigned int count;
5406
5407 cur_rx = tp->cur_rx;
5408 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
5409 rx_left = min(rx_left, budget);
5410
5411 for (; rx_left > 0; rx_left--, cur_rx++) {
5412 unsigned int entry = cur_rx % NUM_RX_DESC;
5413 struct RxDesc *desc = tp->RxDescArray + entry;
5414 u32 status;
5415
5416 rmb();
5417 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5418
5419 if (status & DescOwn)
5420 break;
5421 if (unlikely(status & RxRES)) {
5422 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5423 status);
5424 dev->stats.rx_errors++;
5425 if (status & (RxRWT | RxRUNT))
5426 dev->stats.rx_length_errors++;
5427 if (status & RxCRC)
5428 dev->stats.rx_crc_errors++;
5429 if (status & RxFOVF) {
5430 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5431 dev->stats.rx_fifo_errors++;
5432 }
5433 if ((status & (RxRUNT | RxCRC)) &&
5434 !(status & (RxRWT | RxFOVF)) &&
5435 (dev->features & NETIF_F_RXALL))
5436 goto process_pkt;
5437
5438 rtl8169_mark_to_asic(desc, rx_buf_sz);
5439 } else {
5440 struct sk_buff *skb;
5441 dma_addr_t addr;
5442 int pkt_size;
5443
5444 process_pkt:
5445 addr = le64_to_cpu(desc->addr);
5446 if (likely(!(dev->features & NETIF_F_RXFCS)))
5447 pkt_size = (status & 0x00003fff) - 4;
5448 else
5449 pkt_size = status & 0x00003fff;
5450
5451 /*
5452 * The driver does not support incoming fragmented
5453 * frames. They are seen as a symptom of over-mtu
5454 * sized frames.
5455 */
5456 if (unlikely(rtl8169_fragmented_frame(status))) {
5457 dev->stats.rx_dropped++;
5458 dev->stats.rx_length_errors++;
5459 rtl8169_mark_to_asic(desc, rx_buf_sz);
5460 continue;
5461 }
5462
5463 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
5464 tp, pkt_size, addr);
5465 rtl8169_mark_to_asic(desc, rx_buf_sz);
5466 if (!skb) {
5467 dev->stats.rx_dropped++;
5468 continue;
5469 }
5470
5471 rtl8169_rx_csum(skb, status);
5472 skb_put(skb, pkt_size);
5473 skb->protocol = eth_type_trans(skb, dev);
5474
5475 rtl8169_rx_vlan_tag(desc, skb);
5476
5477 napi_gro_receive(&tp->napi, skb);
5478
5479 u64_stats_update_begin(&tp->rx_stats.syncp);
5480 tp->rx_stats.packets++;
5481 tp->rx_stats.bytes += pkt_size;
5482 u64_stats_update_end(&tp->rx_stats.syncp);
5483 }
5484
5485 /* Work around for AMD plateform. */
5486 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
5487 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
5488 desc->opts2 = 0;
5489 cur_rx++;
5490 }
5491 }
5492
5493 count = cur_rx - tp->cur_rx;
5494 tp->cur_rx = cur_rx;
5495
5496 tp->dirty_rx += count;
5497
5498 return count;
5499 }
5500
5501 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5502 {
5503 struct net_device *dev = dev_instance;
5504 struct rtl8169_private *tp = netdev_priv(dev);
5505 int handled = 0;
5506 u16 status;
5507
5508 status = rtl_get_events(tp);
5509 if (status && status != 0xffff) {
5510 status &= RTL_EVENT_NAPI | tp->event_slow;
5511 if (status) {
5512 handled = 1;
5513
5514 rtl_irq_disable(tp);
5515 napi_schedule(&tp->napi);
5516 }
5517 }
5518 return IRQ_RETVAL(handled);
5519 }
5520
5521 /*
5522 * Workqueue context.
5523 */
5524 static void rtl_slow_event_work(struct rtl8169_private *tp)
5525 {
5526 struct net_device *dev = tp->dev;
5527 u16 status;
5528
5529 status = rtl_get_events(tp) & tp->event_slow;
5530 rtl_ack_events(tp, status);
5531
5532 if (unlikely(status & RxFIFOOver)) {
5533 switch (tp->mac_version) {
5534 /* Work around for rx fifo overflow */
5535 case RTL_GIGA_MAC_VER_11:
5536 netif_stop_queue(dev);
5537 /* XXX - Hack alert. See rtl_task(). */
5538 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
5539 default:
5540 break;
5541 }
5542 }
5543
5544 if (unlikely(status & SYSErr))
5545 rtl8169_pcierr_interrupt(dev);
5546
5547 if (status & LinkChg)
5548 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5549
5550 napi_disable(&tp->napi);
5551 rtl_irq_disable(tp);
5552
5553 napi_enable(&tp->napi);
5554 napi_schedule(&tp->napi);
5555 }
5556
5557 static void rtl_task(struct work_struct *work)
5558 {
5559 static const struct {
5560 int bitnr;
5561 void (*action)(struct rtl8169_private *);
5562 } rtl_work[] = {
5563 /* XXX - keep rtl_slow_event_work() as first element. */
5564 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
5565 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
5566 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
5567 };
5568 struct rtl8169_private *tp =
5569 container_of(work, struct rtl8169_private, wk.work);
5570 struct net_device *dev = tp->dev;
5571 int i;
5572
5573 rtl_lock_work(tp);
5574
5575 if (!netif_running(dev) ||
5576 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
5577 goto out_unlock;
5578
5579 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5580 bool pending;
5581
5582 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
5583 if (pending)
5584 rtl_work[i].action(tp);
5585 }
5586
5587 out_unlock:
5588 rtl_unlock_work(tp);
5589 }
5590
5591 static int rtl8169_poll(struct napi_struct *napi, int budget)
5592 {
5593 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5594 struct net_device *dev = tp->dev;
5595 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
5596 int work_done= 0;
5597 u16 status;
5598
5599 status = rtl_get_events(tp);
5600 rtl_ack_events(tp, status & ~tp->event_slow);
5601
5602 if (status & RTL_EVENT_NAPI_RX)
5603 work_done = rtl_rx(dev, tp, (u32) budget);
5604
5605 if (status & RTL_EVENT_NAPI_TX)
5606 rtl_tx(dev, tp);
5607
5608 if (status & tp->event_slow) {
5609 enable_mask &= ~tp->event_slow;
5610
5611 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
5612 }
5613
5614 if (work_done < budget) {
5615 napi_complete(napi);
5616
5617 rtl_irq_enable(tp, enable_mask);
5618 mmiowb();
5619 }
5620
5621 return work_done;
5622 }
5623
5624 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
5625 {
5626 struct rtl8169_private *tp = netdev_priv(dev);
5627
5628 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
5629 return;
5630
5631 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
5632 RTL_W32(RxMissed, 0);
5633 }
5634
5635 static void rtl8169_down(struct net_device *dev)
5636 {
5637 struct rtl8169_private *tp = netdev_priv(dev);
5638 void __iomem *ioaddr = tp->mmio_addr;
5639
5640 del_timer_sync(&tp->timer);
5641
5642 napi_disable(&tp->napi);
5643 netif_stop_queue(dev);
5644
5645 rtl8169_hw_reset(tp);
5646 /*
5647 * At this point device interrupts can not be enabled in any function,
5648 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
5649 * and napi is disabled (rtl8169_poll).
5650 */
5651 rtl8169_rx_missed(dev, ioaddr);
5652
5653 /* Give a racing hard_start_xmit a few cycles to complete. */
5654 synchronize_sched();
5655
5656 rtl8169_tx_clear(tp);
5657
5658 rtl8169_rx_clear(tp);
5659
5660 rtl_pll_power_down(tp);
5661 }
5662
5663 static int rtl8169_close(struct net_device *dev)
5664 {
5665 struct rtl8169_private *tp = netdev_priv(dev);
5666 struct pci_dev *pdev = tp->pci_dev;
5667
5668 pm_runtime_get_sync(&pdev->dev);
5669
5670 /* Update counters before going down */
5671 rtl8169_update_counters(dev);
5672
5673 rtl_lock_work(tp);
5674 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5675
5676 rtl8169_down(dev);
5677 rtl_unlock_work(tp);
5678
5679 free_irq(pdev->irq, dev);
5680
5681 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5682 tp->RxPhyAddr);
5683 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5684 tp->TxPhyAddr);
5685 tp->TxDescArray = NULL;
5686 tp->RxDescArray = NULL;
5687
5688 pm_runtime_put_sync(&pdev->dev);
5689
5690 return 0;
5691 }
5692
5693 #ifdef CONFIG_NET_POLL_CONTROLLER
5694 static void rtl8169_netpoll(struct net_device *dev)
5695 {
5696 struct rtl8169_private *tp = netdev_priv(dev);
5697
5698 rtl8169_interrupt(tp->pci_dev->irq, dev);
5699 }
5700 #endif
5701
5702 static int rtl_open(struct net_device *dev)
5703 {
5704 struct rtl8169_private *tp = netdev_priv(dev);
5705 void __iomem *ioaddr = tp->mmio_addr;
5706 struct pci_dev *pdev = tp->pci_dev;
5707 int retval = -ENOMEM;
5708
5709 pm_runtime_get_sync(&pdev->dev);
5710
5711 /*
5712 * Rx and Tx desscriptors needs 256 bytes alignment.
5713 * dma_alloc_coherent provides more.
5714 */
5715 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
5716 &tp->TxPhyAddr, GFP_KERNEL);
5717 if (!tp->TxDescArray)
5718 goto err_pm_runtime_put;
5719
5720 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
5721 &tp->RxPhyAddr, GFP_KERNEL);
5722 if (!tp->RxDescArray)
5723 goto err_free_tx_0;
5724
5725 retval = rtl8169_init_ring(dev);
5726 if (retval < 0)
5727 goto err_free_rx_1;
5728
5729 INIT_WORK(&tp->wk.work, rtl_task);
5730
5731 smp_mb();
5732
5733 rtl_request_firmware(tp);
5734
5735 retval = request_irq(pdev->irq, rtl8169_interrupt,
5736 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
5737 dev->name, dev);
5738 if (retval < 0)
5739 goto err_release_fw_2;
5740
5741 rtl_lock_work(tp);
5742
5743 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5744
5745 napi_enable(&tp->napi);
5746
5747 rtl8169_init_phy(dev, tp);
5748
5749 __rtl8169_set_features(dev, dev->features);
5750
5751 rtl_pll_power_up(tp);
5752
5753 rtl_hw_start(dev);
5754
5755 netif_start_queue(dev);
5756
5757 rtl_unlock_work(tp);
5758
5759 tp->saved_wolopts = 0;
5760 pm_runtime_put_noidle(&pdev->dev);
5761
5762 rtl8169_check_link_status(dev, tp, ioaddr);
5763 out:
5764 return retval;
5765
5766 err_release_fw_2:
5767 rtl_release_firmware(tp);
5768 rtl8169_rx_clear(tp);
5769 err_free_rx_1:
5770 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5771 tp->RxPhyAddr);
5772 tp->RxDescArray = NULL;
5773 err_free_tx_0:
5774 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5775 tp->TxPhyAddr);
5776 tp->TxDescArray = NULL;
5777 err_pm_runtime_put:
5778 pm_runtime_put_noidle(&pdev->dev);
5779 goto out;
5780 }
5781
5782 static struct rtnl_link_stats64 *
5783 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5784 {
5785 struct rtl8169_private *tp = netdev_priv(dev);
5786 void __iomem *ioaddr = tp->mmio_addr;
5787 unsigned int start;
5788
5789 if (netif_running(dev))
5790 rtl8169_rx_missed(dev, ioaddr);
5791
5792 do {
5793 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
5794 stats->rx_packets = tp->rx_stats.packets;
5795 stats->rx_bytes = tp->rx_stats.bytes;
5796 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
5797
5798
5799 do {
5800 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
5801 stats->tx_packets = tp->tx_stats.packets;
5802 stats->tx_bytes = tp->tx_stats.bytes;
5803 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
5804
5805 stats->rx_dropped = dev->stats.rx_dropped;
5806 stats->tx_dropped = dev->stats.tx_dropped;
5807 stats->rx_length_errors = dev->stats.rx_length_errors;
5808 stats->rx_errors = dev->stats.rx_errors;
5809 stats->rx_crc_errors = dev->stats.rx_crc_errors;
5810 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
5811 stats->rx_missed_errors = dev->stats.rx_missed_errors;
5812
5813 return stats;
5814 }
5815
5816 static void rtl8169_net_suspend(struct net_device *dev)
5817 {
5818 struct rtl8169_private *tp = netdev_priv(dev);
5819
5820 if (!netif_running(dev))
5821 return;
5822
5823 netif_device_detach(dev);
5824 netif_stop_queue(dev);
5825
5826 rtl_lock_work(tp);
5827 napi_disable(&tp->napi);
5828 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5829 rtl_unlock_work(tp);
5830
5831 rtl_pll_power_down(tp);
5832 }
5833
5834 #ifdef CONFIG_PM
5835
5836 static int rtl8169_suspend(struct device *device)
5837 {
5838 struct pci_dev *pdev = to_pci_dev(device);
5839 struct net_device *dev = pci_get_drvdata(pdev);
5840
5841 rtl8169_net_suspend(dev);
5842
5843 return 0;
5844 }
5845
5846 static void __rtl8169_resume(struct net_device *dev)
5847 {
5848 struct rtl8169_private *tp = netdev_priv(dev);
5849
5850 netif_device_attach(dev);
5851
5852 rtl_pll_power_up(tp);
5853
5854 rtl_lock_work(tp);
5855 napi_enable(&tp->napi);
5856 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5857 rtl_unlock_work(tp);
5858
5859 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5860 }
5861
5862 static int rtl8169_resume(struct device *device)
5863 {
5864 struct pci_dev *pdev = to_pci_dev(device);
5865 struct net_device *dev = pci_get_drvdata(pdev);
5866 struct rtl8169_private *tp = netdev_priv(dev);
5867
5868 rtl8169_init_phy(dev, tp);
5869
5870 if (netif_running(dev))
5871 __rtl8169_resume(dev);
5872
5873 return 0;
5874 }
5875
5876 static int rtl8169_runtime_suspend(struct device *device)
5877 {
5878 struct pci_dev *pdev = to_pci_dev(device);
5879 struct net_device *dev = pci_get_drvdata(pdev);
5880 struct rtl8169_private *tp = netdev_priv(dev);
5881
5882 if (!tp->TxDescArray)
5883 return 0;
5884
5885 rtl_lock_work(tp);
5886 tp->saved_wolopts = __rtl8169_get_wol(tp);
5887 __rtl8169_set_wol(tp, WAKE_ANY);
5888 rtl_unlock_work(tp);
5889
5890 rtl8169_net_suspend(dev);
5891
5892 return 0;
5893 }
5894
5895 static int rtl8169_runtime_resume(struct device *device)
5896 {
5897 struct pci_dev *pdev = to_pci_dev(device);
5898 struct net_device *dev = pci_get_drvdata(pdev);
5899 struct rtl8169_private *tp = netdev_priv(dev);
5900
5901 if (!tp->TxDescArray)
5902 return 0;
5903
5904 rtl_lock_work(tp);
5905 __rtl8169_set_wol(tp, tp->saved_wolopts);
5906 tp->saved_wolopts = 0;
5907 rtl_unlock_work(tp);
5908
5909 rtl8169_init_phy(dev, tp);
5910
5911 __rtl8169_resume(dev);
5912
5913 return 0;
5914 }
5915
5916 static int rtl8169_runtime_idle(struct device *device)
5917 {
5918 struct pci_dev *pdev = to_pci_dev(device);
5919 struct net_device *dev = pci_get_drvdata(pdev);
5920 struct rtl8169_private *tp = netdev_priv(dev);
5921
5922 return tp->TxDescArray ? -EBUSY : 0;
5923 }
5924
5925 static const struct dev_pm_ops rtl8169_pm_ops = {
5926 .suspend = rtl8169_suspend,
5927 .resume = rtl8169_resume,
5928 .freeze = rtl8169_suspend,
5929 .thaw = rtl8169_resume,
5930 .poweroff = rtl8169_suspend,
5931 .restore = rtl8169_resume,
5932 .runtime_suspend = rtl8169_runtime_suspend,
5933 .runtime_resume = rtl8169_runtime_resume,
5934 .runtime_idle = rtl8169_runtime_idle,
5935 };
5936
5937 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
5938
5939 #else /* !CONFIG_PM */
5940
5941 #define RTL8169_PM_OPS NULL
5942
5943 #endif /* !CONFIG_PM */
5944
5945 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
5946 {
5947 void __iomem *ioaddr = tp->mmio_addr;
5948
5949 /* WoL fails with 8168b when the receiver is disabled. */
5950 switch (tp->mac_version) {
5951 case RTL_GIGA_MAC_VER_11:
5952 case RTL_GIGA_MAC_VER_12:
5953 case RTL_GIGA_MAC_VER_17:
5954 pci_clear_master(tp->pci_dev);
5955
5956 RTL_W8(ChipCmd, CmdRxEnb);
5957 /* PCI commit */
5958 RTL_R8(ChipCmd);
5959 break;
5960 default:
5961 break;
5962 }
5963 }
5964
5965 static void rtl_shutdown(struct pci_dev *pdev)
5966 {
5967 struct net_device *dev = pci_get_drvdata(pdev);
5968 struct rtl8169_private *tp = netdev_priv(dev);
5969 struct device *d = &pdev->dev;
5970
5971 pm_runtime_get_sync(d);
5972
5973 rtl8169_net_suspend(dev);
5974
5975 /* Restore original MAC address */
5976 rtl_rar_set(tp, dev->perm_addr);
5977
5978 rtl8169_hw_reset(tp);
5979
5980 if (system_state == SYSTEM_POWER_OFF) {
5981 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
5982 rtl_wol_suspend_quirk(tp);
5983 rtl_wol_shutdown_quirk(tp);
5984 }
5985
5986 pci_wake_from_d3(pdev, true);
5987 pci_set_power_state(pdev, PCI_D3hot);
5988 }
5989
5990 pm_runtime_put_noidle(d);
5991 }
5992
5993 static void __devexit rtl_remove_one(struct pci_dev *pdev)
5994 {
5995 struct net_device *dev = pci_get_drvdata(pdev);
5996 struct rtl8169_private *tp = netdev_priv(dev);
5997
5998 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
5999 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6000 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6001 rtl8168_driver_stop(tp);
6002 }
6003
6004 cancel_work_sync(&tp->wk.work);
6005
6006 unregister_netdev(dev);
6007
6008 rtl_release_firmware(tp);
6009
6010 if (pci_dev_run_wake(pdev))
6011 pm_runtime_get_noresume(&pdev->dev);
6012
6013 /* restore original MAC address */
6014 rtl_rar_set(tp, dev->perm_addr);
6015
6016 rtl_disable_msi(pdev, tp);
6017 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6018 pci_set_drvdata(pdev, NULL);
6019 }
6020
6021 static const struct net_device_ops rtl_netdev_ops = {
6022 .ndo_open = rtl_open,
6023 .ndo_stop = rtl8169_close,
6024 .ndo_get_stats64 = rtl8169_get_stats64,
6025 .ndo_start_xmit = rtl8169_start_xmit,
6026 .ndo_tx_timeout = rtl8169_tx_timeout,
6027 .ndo_validate_addr = eth_validate_addr,
6028 .ndo_change_mtu = rtl8169_change_mtu,
6029 .ndo_fix_features = rtl8169_fix_features,
6030 .ndo_set_features = rtl8169_set_features,
6031 .ndo_set_mac_address = rtl_set_mac_address,
6032 .ndo_do_ioctl = rtl8169_ioctl,
6033 .ndo_set_rx_mode = rtl_set_rx_mode,
6034 #ifdef CONFIG_NET_POLL_CONTROLLER
6035 .ndo_poll_controller = rtl8169_netpoll,
6036 #endif
6037
6038 };
6039
6040 static const struct rtl_cfg_info {
6041 void (*hw_start)(struct net_device *);
6042 unsigned int region;
6043 unsigned int align;
6044 u16 event_slow;
6045 unsigned features;
6046 u8 default_ver;
6047 } rtl_cfg_infos [] = {
6048 [RTL_CFG_0] = {
6049 .hw_start = rtl_hw_start_8169,
6050 .region = 1,
6051 .align = 0,
6052 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6053 .features = RTL_FEATURE_GMII,
6054 .default_ver = RTL_GIGA_MAC_VER_01,
6055 },
6056 [RTL_CFG_1] = {
6057 .hw_start = rtl_hw_start_8168,
6058 .region = 2,
6059 .align = 8,
6060 .event_slow = SYSErr | LinkChg | RxOverflow,
6061 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6062 .default_ver = RTL_GIGA_MAC_VER_11,
6063 },
6064 [RTL_CFG_2] = {
6065 .hw_start = rtl_hw_start_8101,
6066 .region = 2,
6067 .align = 8,
6068 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6069 PCSTimeout,
6070 .features = RTL_FEATURE_MSI,
6071 .default_ver = RTL_GIGA_MAC_VER_13,
6072 }
6073 };
6074
6075 /* Cfg9346_Unlock assumed. */
6076 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6077 const struct rtl_cfg_info *cfg)
6078 {
6079 void __iomem *ioaddr = tp->mmio_addr;
6080 unsigned msi = 0;
6081 u8 cfg2;
6082
6083 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6084 if (cfg->features & RTL_FEATURE_MSI) {
6085 if (pci_enable_msi(tp->pci_dev)) {
6086 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6087 } else {
6088 cfg2 |= MSIEnable;
6089 msi = RTL_FEATURE_MSI;
6090 }
6091 }
6092 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6093 RTL_W8(Config2, cfg2);
6094 return msi;
6095 }
6096
6097 static int __devinit
6098 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6099 {
6100 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6101 const unsigned int region = cfg->region;
6102 struct rtl8169_private *tp;
6103 struct mii_if_info *mii;
6104 struct net_device *dev;
6105 void __iomem *ioaddr;
6106 int chipset, i;
6107 int rc;
6108
6109 if (netif_msg_drv(&debug)) {
6110 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6111 MODULENAME, RTL8169_VERSION);
6112 }
6113
6114 dev = alloc_etherdev(sizeof (*tp));
6115 if (!dev) {
6116 rc = -ENOMEM;
6117 goto out;
6118 }
6119
6120 SET_NETDEV_DEV(dev, &pdev->dev);
6121 dev->netdev_ops = &rtl_netdev_ops;
6122 tp = netdev_priv(dev);
6123 tp->dev = dev;
6124 tp->pci_dev = pdev;
6125 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6126
6127 mii = &tp->mii;
6128 mii->dev = dev;
6129 mii->mdio_read = rtl_mdio_read;
6130 mii->mdio_write = rtl_mdio_write;
6131 mii->phy_id_mask = 0x1f;
6132 mii->reg_num_mask = 0x1f;
6133 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6134
6135 /* disable ASPM completely as that cause random device stop working
6136 * problems as well as full system hangs for some PCIe devices users */
6137 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6138 PCIE_LINK_STATE_CLKPM);
6139
6140 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6141 rc = pci_enable_device(pdev);
6142 if (rc < 0) {
6143 netif_err(tp, probe, dev, "enable failure\n");
6144 goto err_out_free_dev_1;
6145 }
6146
6147 if (pci_set_mwi(pdev) < 0)
6148 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6149
6150 /* make sure PCI base addr 1 is MMIO */
6151 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6152 netif_err(tp, probe, dev,
6153 "region #%d not an MMIO resource, aborting\n",
6154 region);
6155 rc = -ENODEV;
6156 goto err_out_mwi_2;
6157 }
6158
6159 /* check for weird/broken PCI region reporting */
6160 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6161 netif_err(tp, probe, dev,
6162 "Invalid PCI region size(s), aborting\n");
6163 rc = -ENODEV;
6164 goto err_out_mwi_2;
6165 }
6166
6167 rc = pci_request_regions(pdev, MODULENAME);
6168 if (rc < 0) {
6169 netif_err(tp, probe, dev, "could not request regions\n");
6170 goto err_out_mwi_2;
6171 }
6172
6173 tp->cp_cmd = RxChkSum;
6174
6175 if ((sizeof(dma_addr_t) > 4) &&
6176 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6177 tp->cp_cmd |= PCIDAC;
6178 dev->features |= NETIF_F_HIGHDMA;
6179 } else {
6180 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6181 if (rc < 0) {
6182 netif_err(tp, probe, dev, "DMA configuration failed\n");
6183 goto err_out_free_res_3;
6184 }
6185 }
6186
6187 /* ioremap MMIO region */
6188 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6189 if (!ioaddr) {
6190 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6191 rc = -EIO;
6192 goto err_out_free_res_3;
6193 }
6194 tp->mmio_addr = ioaddr;
6195
6196 if (!pci_is_pcie(pdev))
6197 netif_info(tp, probe, dev, "not PCI Express\n");
6198
6199 /* Identify chip attached to board */
6200 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6201
6202 rtl_init_rxcfg(tp);
6203
6204 rtl_irq_disable(tp);
6205
6206 rtl_hw_reset(tp);
6207
6208 rtl_ack_events(tp, 0xffff);
6209
6210 pci_set_master(pdev);
6211
6212 /*
6213 * Pretend we are using VLANs; This bypasses a nasty bug where
6214 * Interrupts stop flowing on high load on 8110SCd controllers.
6215 */
6216 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6217 tp->cp_cmd |= RxVlan;
6218
6219 rtl_init_mdio_ops(tp);
6220 rtl_init_pll_power_ops(tp);
6221 rtl_init_jumbo_ops(tp);
6222
6223 rtl8169_print_mac_version(tp);
6224
6225 chipset = tp->mac_version;
6226 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6227
6228 RTL_W8(Cfg9346, Cfg9346_Unlock);
6229 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6230 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6231 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6232 tp->features |= RTL_FEATURE_WOL;
6233 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6234 tp->features |= RTL_FEATURE_WOL;
6235 tp->features |= rtl_try_msi(tp, cfg);
6236 RTL_W8(Cfg9346, Cfg9346_Lock);
6237
6238 if (rtl_tbi_enabled(tp)) {
6239 tp->set_speed = rtl8169_set_speed_tbi;
6240 tp->get_settings = rtl8169_gset_tbi;
6241 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6242 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6243 tp->link_ok = rtl8169_tbi_link_ok;
6244 tp->do_ioctl = rtl_tbi_ioctl;
6245 } else {
6246 tp->set_speed = rtl8169_set_speed_xmii;
6247 tp->get_settings = rtl8169_gset_xmii;
6248 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6249 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6250 tp->link_ok = rtl8169_xmii_link_ok;
6251 tp->do_ioctl = rtl_xmii_ioctl;
6252 }
6253
6254 mutex_init(&tp->wk.mutex);
6255
6256 /* Get MAC address */
6257 for (i = 0; i < ETH_ALEN; i++)
6258 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6259 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6260
6261 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6262 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6263
6264 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6265
6266 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6267 * properly for all devices */
6268 dev->features |= NETIF_F_RXCSUM |
6269 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6270
6271 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6272 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6273 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6274 NETIF_F_HIGHDMA;
6275
6276 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6277 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6278 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6279
6280 dev->hw_features |= NETIF_F_RXALL;
6281 dev->hw_features |= NETIF_F_RXFCS;
6282
6283 tp->hw_start = cfg->hw_start;
6284 tp->event_slow = cfg->event_slow;
6285
6286 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6287 ~(RxBOVF | RxFOVF) : ~0;
6288
6289 init_timer(&tp->timer);
6290 tp->timer.data = (unsigned long) dev;
6291 tp->timer.function = rtl8169_phy_timer;
6292
6293 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6294
6295 rc = register_netdev(dev);
6296 if (rc < 0)
6297 goto err_out_msi_4;
6298
6299 pci_set_drvdata(pdev, dev);
6300
6301 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6302 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6303 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6304 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6305 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6306 "tx checksumming: %s]\n",
6307 rtl_chip_infos[chipset].jumbo_max,
6308 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6309 }
6310
6311 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6312 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6313 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6314 rtl8168_driver_start(tp);
6315 }
6316
6317 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6318
6319 if (pci_dev_run_wake(pdev))
6320 pm_runtime_put_noidle(&pdev->dev);
6321
6322 netif_carrier_off(dev);
6323
6324 out:
6325 return rc;
6326
6327 err_out_msi_4:
6328 rtl_disable_msi(pdev, tp);
6329 iounmap(ioaddr);
6330 err_out_free_res_3:
6331 pci_release_regions(pdev);
6332 err_out_mwi_2:
6333 pci_clear_mwi(pdev);
6334 pci_disable_device(pdev);
6335 err_out_free_dev_1:
6336 free_netdev(dev);
6337 goto out;
6338 }
6339
6340 static struct pci_driver rtl8169_pci_driver = {
6341 .name = MODULENAME,
6342 .id_table = rtl8169_pci_tbl,
6343 .probe = rtl_init_one,
6344 .remove = __devexit_p(rtl_remove_one),
6345 .shutdown = rtl_shutdown,
6346 .driver.pm = RTL8169_PM_OPS,
6347 };
6348
6349 static int __init rtl8169_init_module(void)
6350 {
6351 return pci_register_driver(&rtl8169_pci_driver);
6352 }
6353
6354 static void __exit rtl8169_cleanup_module(void)
6355 {
6356 pci_unregister_driver(&rtl8169_pci_driver);
6357 }
6358
6359 module_init(rtl8169_init_module);
6360 module_exit(rtl8169_cleanup_module);
This page took 0.156041 seconds and 4 git commands to generate.