r8169: add device specific CSI access helpers.
[deliverable/linux.git] / drivers / net / ethernet / realtek / r8169.c
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
31
32 #include <asm/io.h>
33 #include <asm/irq.h>
34
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
38
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47
48 #ifdef RTL8169_DEBUG
49 #define assert(expr) \
50 if (!(expr)) { \
51 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
52 #expr,__FILE__,__func__,__LINE__); \
53 }
54 #define dprintk(fmt, args...) \
55 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
56 #else
57 #define assert(expr) do {} while (0)
58 #define dprintk(fmt, args...) do {} while (0)
59 #endif /* RTL8169_DEBUG */
60
61 #define R8169_MSG_DEFAULT \
62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
63
64 #define TX_BUFFS_AVAIL(tp) \
65 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
66
67 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
68 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
69 static const int multicast_filter_limit = 32;
70
71 #define MAX_READ_REQUEST_SHIFT 12
72 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
73 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
74 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
75
76 #define R8169_REGS_SIZE 256
77 #define R8169_NAPI_WEIGHT 64
78 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
79 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
80 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
81 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
82 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
83
84 #define RTL8169_TX_TIMEOUT (6*HZ)
85 #define RTL8169_PHY_TIMEOUT (10*HZ)
86
87 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
88 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
89 #define RTL_EEPROM_SIG_ADDR 0x0000
90
91 /* write/read MMIO register */
92 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
93 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
94 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
95 #define RTL_R8(reg) readb (ioaddr + (reg))
96 #define RTL_R16(reg) readw (ioaddr + (reg))
97 #define RTL_R32(reg) readl (ioaddr + (reg))
98
99 enum mac_version {
100 RTL_GIGA_MAC_VER_01 = 0,
101 RTL_GIGA_MAC_VER_02,
102 RTL_GIGA_MAC_VER_03,
103 RTL_GIGA_MAC_VER_04,
104 RTL_GIGA_MAC_VER_05,
105 RTL_GIGA_MAC_VER_06,
106 RTL_GIGA_MAC_VER_07,
107 RTL_GIGA_MAC_VER_08,
108 RTL_GIGA_MAC_VER_09,
109 RTL_GIGA_MAC_VER_10,
110 RTL_GIGA_MAC_VER_11,
111 RTL_GIGA_MAC_VER_12,
112 RTL_GIGA_MAC_VER_13,
113 RTL_GIGA_MAC_VER_14,
114 RTL_GIGA_MAC_VER_15,
115 RTL_GIGA_MAC_VER_16,
116 RTL_GIGA_MAC_VER_17,
117 RTL_GIGA_MAC_VER_18,
118 RTL_GIGA_MAC_VER_19,
119 RTL_GIGA_MAC_VER_20,
120 RTL_GIGA_MAC_VER_21,
121 RTL_GIGA_MAC_VER_22,
122 RTL_GIGA_MAC_VER_23,
123 RTL_GIGA_MAC_VER_24,
124 RTL_GIGA_MAC_VER_25,
125 RTL_GIGA_MAC_VER_26,
126 RTL_GIGA_MAC_VER_27,
127 RTL_GIGA_MAC_VER_28,
128 RTL_GIGA_MAC_VER_29,
129 RTL_GIGA_MAC_VER_30,
130 RTL_GIGA_MAC_VER_31,
131 RTL_GIGA_MAC_VER_32,
132 RTL_GIGA_MAC_VER_33,
133 RTL_GIGA_MAC_VER_34,
134 RTL_GIGA_MAC_VER_35,
135 RTL_GIGA_MAC_VER_36,
136 RTL_GIGA_MAC_NONE = 0xff,
137 };
138
139 enum rtl_tx_desc_version {
140 RTL_TD_0 = 0,
141 RTL_TD_1 = 1,
142 };
143
144 #define JUMBO_1K ETH_DATA_LEN
145 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
146 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
147 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
148 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
149
150 #define _R(NAME,TD,FW,SZ,B) { \
151 .name = NAME, \
152 .txd_version = TD, \
153 .fw_name = FW, \
154 .jumbo_max = SZ, \
155 .jumbo_tx_csum = B \
156 }
157
158 static const struct {
159 const char *name;
160 enum rtl_tx_desc_version txd_version;
161 const char *fw_name;
162 u16 jumbo_max;
163 bool jumbo_tx_csum;
164 } rtl_chip_infos[] = {
165 /* PCI devices. */
166 [RTL_GIGA_MAC_VER_01] =
167 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
168 [RTL_GIGA_MAC_VER_02] =
169 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
170 [RTL_GIGA_MAC_VER_03] =
171 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
172 [RTL_GIGA_MAC_VER_04] =
173 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
174 [RTL_GIGA_MAC_VER_05] =
175 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
176 [RTL_GIGA_MAC_VER_06] =
177 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
178 /* PCI-E devices. */
179 [RTL_GIGA_MAC_VER_07] =
180 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
181 [RTL_GIGA_MAC_VER_08] =
182 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
183 [RTL_GIGA_MAC_VER_09] =
184 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
185 [RTL_GIGA_MAC_VER_10] =
186 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
187 [RTL_GIGA_MAC_VER_11] =
188 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
189 [RTL_GIGA_MAC_VER_12] =
190 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
191 [RTL_GIGA_MAC_VER_13] =
192 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
193 [RTL_GIGA_MAC_VER_14] =
194 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
195 [RTL_GIGA_MAC_VER_15] =
196 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
197 [RTL_GIGA_MAC_VER_16] =
198 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
199 [RTL_GIGA_MAC_VER_17] =
200 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
201 [RTL_GIGA_MAC_VER_18] =
202 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
203 [RTL_GIGA_MAC_VER_19] =
204 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
205 [RTL_GIGA_MAC_VER_20] =
206 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
207 [RTL_GIGA_MAC_VER_21] =
208 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
209 [RTL_GIGA_MAC_VER_22] =
210 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
211 [RTL_GIGA_MAC_VER_23] =
212 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
213 [RTL_GIGA_MAC_VER_24] =
214 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
215 [RTL_GIGA_MAC_VER_25] =
216 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
217 JUMBO_9K, false),
218 [RTL_GIGA_MAC_VER_26] =
219 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
220 JUMBO_9K, false),
221 [RTL_GIGA_MAC_VER_27] =
222 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
223 [RTL_GIGA_MAC_VER_28] =
224 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
225 [RTL_GIGA_MAC_VER_29] =
226 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
227 JUMBO_1K, true),
228 [RTL_GIGA_MAC_VER_30] =
229 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
230 JUMBO_1K, true),
231 [RTL_GIGA_MAC_VER_31] =
232 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
233 [RTL_GIGA_MAC_VER_32] =
234 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
235 JUMBO_9K, false),
236 [RTL_GIGA_MAC_VER_33] =
237 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
238 JUMBO_9K, false),
239 [RTL_GIGA_MAC_VER_34] =
240 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
241 JUMBO_9K, false),
242 [RTL_GIGA_MAC_VER_35] =
243 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
244 JUMBO_9K, false),
245 [RTL_GIGA_MAC_VER_36] =
246 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
247 JUMBO_9K, false),
248 };
249 #undef _R
250
251 enum cfg_version {
252 RTL_CFG_0 = 0x00,
253 RTL_CFG_1,
254 RTL_CFG_2
255 };
256
257 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
258 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
259 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
260 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
261 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
262 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
263 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
264 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
265 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
266 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
267 { PCI_VENDOR_ID_LINKSYS, 0x1032,
268 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
269 { 0x0001, 0x8168,
270 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
271 {0,},
272 };
273
274 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
275
276 static int rx_buf_sz = 16383;
277 static int use_dac;
278 static struct {
279 u32 msg_enable;
280 } debug = { -1 };
281
282 enum rtl_registers {
283 MAC0 = 0, /* Ethernet hardware address. */
284 MAC4 = 4,
285 MAR0 = 8, /* Multicast filter. */
286 CounterAddrLow = 0x10,
287 CounterAddrHigh = 0x14,
288 TxDescStartAddrLow = 0x20,
289 TxDescStartAddrHigh = 0x24,
290 TxHDescStartAddrLow = 0x28,
291 TxHDescStartAddrHigh = 0x2c,
292 FLASH = 0x30,
293 ERSR = 0x36,
294 ChipCmd = 0x37,
295 TxPoll = 0x38,
296 IntrMask = 0x3c,
297 IntrStatus = 0x3e,
298
299 TxConfig = 0x40,
300 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
301 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
302
303 RxConfig = 0x44,
304 #define RX128_INT_EN (1 << 15) /* 8111c and later */
305 #define RX_MULTI_EN (1 << 14) /* 8111c only */
306 #define RXCFG_FIFO_SHIFT 13
307 /* No threshold before first PCI xfer */
308 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
309 #define RXCFG_DMA_SHIFT 8
310 /* Unlimited maximum PCI burst. */
311 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
312
313 RxMissed = 0x4c,
314 Cfg9346 = 0x50,
315 Config0 = 0x51,
316 Config1 = 0x52,
317 Config2 = 0x53,
318 #define PME_SIGNAL (1 << 5) /* 8168c and later */
319
320 Config3 = 0x54,
321 Config4 = 0x55,
322 Config5 = 0x56,
323 MultiIntr = 0x5c,
324 PHYAR = 0x60,
325 PHYstatus = 0x6c,
326 RxMaxSize = 0xda,
327 CPlusCmd = 0xe0,
328 IntrMitigate = 0xe2,
329 RxDescAddrLow = 0xe4,
330 RxDescAddrHigh = 0xe8,
331 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
332
333 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
334
335 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
336
337 #define TxPacketMax (8064 >> 7)
338 #define EarlySize 0x27
339
340 FuncEvent = 0xf0,
341 FuncEventMask = 0xf4,
342 FuncPresetState = 0xf8,
343 FuncForceEvent = 0xfc,
344 };
345
346 enum rtl8110_registers {
347 TBICSR = 0x64,
348 TBI_ANAR = 0x68,
349 TBI_LPAR = 0x6a,
350 };
351
352 enum rtl8168_8101_registers {
353 CSIDR = 0x64,
354 CSIAR = 0x68,
355 #define CSIAR_FLAG 0x80000000
356 #define CSIAR_WRITE_CMD 0x80000000
357 #define CSIAR_BYTE_ENABLE 0x0f
358 #define CSIAR_BYTE_ENABLE_SHIFT 12
359 #define CSIAR_ADDR_MASK 0x0fff
360 PMCH = 0x6f,
361 EPHYAR = 0x80,
362 #define EPHYAR_FLAG 0x80000000
363 #define EPHYAR_WRITE_CMD 0x80000000
364 #define EPHYAR_REG_MASK 0x1f
365 #define EPHYAR_REG_SHIFT 16
366 #define EPHYAR_DATA_MASK 0xffff
367 DLLPR = 0xd0,
368 #define PFM_EN (1 << 6)
369 DBG_REG = 0xd1,
370 #define FIX_NAK_1 (1 << 4)
371 #define FIX_NAK_2 (1 << 3)
372 TWSI = 0xd2,
373 MCU = 0xd3,
374 #define NOW_IS_OOB (1 << 7)
375 #define EN_NDP (1 << 3)
376 #define EN_OOB_RESET (1 << 2)
377 EFUSEAR = 0xdc,
378 #define EFUSEAR_FLAG 0x80000000
379 #define EFUSEAR_WRITE_CMD 0x80000000
380 #define EFUSEAR_READ_CMD 0x00000000
381 #define EFUSEAR_REG_MASK 0x03ff
382 #define EFUSEAR_REG_SHIFT 8
383 #define EFUSEAR_DATA_MASK 0xff
384 };
385
386 enum rtl8168_registers {
387 LED_FREQ = 0x1a,
388 EEE_LED = 0x1b,
389 ERIDR = 0x70,
390 ERIAR = 0x74,
391 #define ERIAR_FLAG 0x80000000
392 #define ERIAR_WRITE_CMD 0x80000000
393 #define ERIAR_READ_CMD 0x00000000
394 #define ERIAR_ADDR_BYTE_ALIGN 4
395 #define ERIAR_TYPE_SHIFT 16
396 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
397 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
398 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
399 #define ERIAR_MASK_SHIFT 12
400 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
401 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
402 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
403 EPHY_RXER_NUM = 0x7c,
404 OCPDR = 0xb0, /* OCP GPHY access */
405 #define OCPDR_WRITE_CMD 0x80000000
406 #define OCPDR_READ_CMD 0x00000000
407 #define OCPDR_REG_MASK 0x7f
408 #define OCPDR_GPHY_REG_SHIFT 16
409 #define OCPDR_DATA_MASK 0xffff
410 OCPAR = 0xb4,
411 #define OCPAR_FLAG 0x80000000
412 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
413 #define OCPAR_GPHY_READ_CMD 0x0000f060
414 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
415 MISC = 0xf0, /* 8168e only. */
416 #define TXPLA_RST (1 << 29)
417 #define PWM_EN (1 << 22)
418 };
419
420 enum rtl_register_content {
421 /* InterruptStatusBits */
422 SYSErr = 0x8000,
423 PCSTimeout = 0x4000,
424 SWInt = 0x0100,
425 TxDescUnavail = 0x0080,
426 RxFIFOOver = 0x0040,
427 LinkChg = 0x0020,
428 RxOverflow = 0x0010,
429 TxErr = 0x0008,
430 TxOK = 0x0004,
431 RxErr = 0x0002,
432 RxOK = 0x0001,
433
434 /* RxStatusDesc */
435 RxBOVF = (1 << 24),
436 RxFOVF = (1 << 23),
437 RxRWT = (1 << 22),
438 RxRES = (1 << 21),
439 RxRUNT = (1 << 20),
440 RxCRC = (1 << 19),
441
442 /* ChipCmdBits */
443 StopReq = 0x80,
444 CmdReset = 0x10,
445 CmdRxEnb = 0x08,
446 CmdTxEnb = 0x04,
447 RxBufEmpty = 0x01,
448
449 /* TXPoll register p.5 */
450 HPQ = 0x80, /* Poll cmd on the high prio queue */
451 NPQ = 0x40, /* Poll cmd on the low prio queue */
452 FSWInt = 0x01, /* Forced software interrupt */
453
454 /* Cfg9346Bits */
455 Cfg9346_Lock = 0x00,
456 Cfg9346_Unlock = 0xc0,
457
458 /* rx_mode_bits */
459 AcceptErr = 0x20,
460 AcceptRunt = 0x10,
461 AcceptBroadcast = 0x08,
462 AcceptMulticast = 0x04,
463 AcceptMyPhys = 0x02,
464 AcceptAllPhys = 0x01,
465 #define RX_CONFIG_ACCEPT_MASK 0x3f
466
467 /* TxConfigBits */
468 TxInterFrameGapShift = 24,
469 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
470
471 /* Config1 register p.24 */
472 LEDS1 = (1 << 7),
473 LEDS0 = (1 << 6),
474 Speed_down = (1 << 4),
475 MEMMAP = (1 << 3),
476 IOMAP = (1 << 2),
477 VPD = (1 << 1),
478 PMEnable = (1 << 0), /* Power Management Enable */
479
480 /* Config2 register p. 25 */
481 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
482 PCI_Clock_66MHz = 0x01,
483 PCI_Clock_33MHz = 0x00,
484
485 /* Config3 register p.25 */
486 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
487 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
488 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
489 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
490
491 /* Config4 register */
492 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
493
494 /* Config5 register p.27 */
495 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
496 MWF = (1 << 5), /* Accept Multicast wakeup frame */
497 UWF = (1 << 4), /* Accept Unicast wakeup frame */
498 Spi_en = (1 << 3),
499 LanWake = (1 << 1), /* LanWake enable/disable */
500 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
501
502 /* TBICSR p.28 */
503 TBIReset = 0x80000000,
504 TBILoopback = 0x40000000,
505 TBINwEnable = 0x20000000,
506 TBINwRestart = 0x10000000,
507 TBILinkOk = 0x02000000,
508 TBINwComplete = 0x01000000,
509
510 /* CPlusCmd p.31 */
511 EnableBist = (1 << 15), // 8168 8101
512 Mac_dbgo_oe = (1 << 14), // 8168 8101
513 Normal_mode = (1 << 13), // unused
514 Force_half_dup = (1 << 12), // 8168 8101
515 Force_rxflow_en = (1 << 11), // 8168 8101
516 Force_txflow_en = (1 << 10), // 8168 8101
517 Cxpl_dbg_sel = (1 << 9), // 8168 8101
518 ASF = (1 << 8), // 8168 8101
519 PktCntrDisable = (1 << 7), // 8168 8101
520 Mac_dbgo_sel = 0x001c, // 8168
521 RxVlan = (1 << 6),
522 RxChkSum = (1 << 5),
523 PCIDAC = (1 << 4),
524 PCIMulRW = (1 << 3),
525 INTT_0 = 0x0000, // 8168
526 INTT_1 = 0x0001, // 8168
527 INTT_2 = 0x0002, // 8168
528 INTT_3 = 0x0003, // 8168
529
530 /* rtl8169_PHYstatus */
531 TBI_Enable = 0x80,
532 TxFlowCtrl = 0x40,
533 RxFlowCtrl = 0x20,
534 _1000bpsF = 0x10,
535 _100bps = 0x08,
536 _10bps = 0x04,
537 LinkStatus = 0x02,
538 FullDup = 0x01,
539
540 /* _TBICSRBit */
541 TBILinkOK = 0x02000000,
542
543 /* DumpCounterCommand */
544 CounterDump = 0x8,
545 };
546
547 enum rtl_desc_bit {
548 /* First doubleword. */
549 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
550 RingEnd = (1 << 30), /* End of descriptor ring */
551 FirstFrag = (1 << 29), /* First segment of a packet */
552 LastFrag = (1 << 28), /* Final segment of a packet */
553 };
554
555 /* Generic case. */
556 enum rtl_tx_desc_bit {
557 /* First doubleword. */
558 TD_LSO = (1 << 27), /* Large Send Offload */
559 #define TD_MSS_MAX 0x07ffu /* MSS value */
560
561 /* Second doubleword. */
562 TxVlanTag = (1 << 17), /* Add VLAN tag */
563 };
564
565 /* 8169, 8168b and 810x except 8102e. */
566 enum rtl_tx_desc_bit_0 {
567 /* First doubleword. */
568 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
569 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
570 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
571 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
572 };
573
574 /* 8102e, 8168c and beyond. */
575 enum rtl_tx_desc_bit_1 {
576 /* Second doubleword. */
577 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
578 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
579 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
580 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
581 };
582
583 static const struct rtl_tx_desc_info {
584 struct {
585 u32 udp;
586 u32 tcp;
587 } checksum;
588 u16 mss_shift;
589 u16 opts_offset;
590 } tx_desc_info [] = {
591 [RTL_TD_0] = {
592 .checksum = {
593 .udp = TD0_IP_CS | TD0_UDP_CS,
594 .tcp = TD0_IP_CS | TD0_TCP_CS
595 },
596 .mss_shift = TD0_MSS_SHIFT,
597 .opts_offset = 0
598 },
599 [RTL_TD_1] = {
600 .checksum = {
601 .udp = TD1_IP_CS | TD1_UDP_CS,
602 .tcp = TD1_IP_CS | TD1_TCP_CS
603 },
604 .mss_shift = TD1_MSS_SHIFT,
605 .opts_offset = 1
606 }
607 };
608
609 enum rtl_rx_desc_bit {
610 /* Rx private */
611 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
612 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
613
614 #define RxProtoUDP (PID1)
615 #define RxProtoTCP (PID0)
616 #define RxProtoIP (PID1 | PID0)
617 #define RxProtoMask RxProtoIP
618
619 IPFail = (1 << 16), /* IP checksum failed */
620 UDPFail = (1 << 15), /* UDP/IP checksum failed */
621 TCPFail = (1 << 14), /* TCP/IP checksum failed */
622 RxVlanTag = (1 << 16), /* VLAN tag available */
623 };
624
625 #define RsvdMask 0x3fffc000
626
627 struct TxDesc {
628 __le32 opts1;
629 __le32 opts2;
630 __le64 addr;
631 };
632
633 struct RxDesc {
634 __le32 opts1;
635 __le32 opts2;
636 __le64 addr;
637 };
638
639 struct ring_info {
640 struct sk_buff *skb;
641 u32 len;
642 u8 __pad[sizeof(void *) - sizeof(u32)];
643 };
644
645 enum features {
646 RTL_FEATURE_WOL = (1 << 0),
647 RTL_FEATURE_MSI = (1 << 1),
648 RTL_FEATURE_GMII = (1 << 2),
649 };
650
651 struct rtl8169_counters {
652 __le64 tx_packets;
653 __le64 rx_packets;
654 __le64 tx_errors;
655 __le32 rx_errors;
656 __le16 rx_missed;
657 __le16 align_errors;
658 __le32 tx_one_collision;
659 __le32 tx_multi_collision;
660 __le64 rx_unicast;
661 __le64 rx_broadcast;
662 __le32 rx_multicast;
663 __le16 tx_aborted;
664 __le16 tx_underun;
665 };
666
667 enum rtl_flag {
668 RTL_FLAG_TASK_ENABLED,
669 RTL_FLAG_TASK_SLOW_PENDING,
670 RTL_FLAG_TASK_RESET_PENDING,
671 RTL_FLAG_TASK_PHY_PENDING,
672 RTL_FLAG_MAX
673 };
674
675 struct rtl8169_stats {
676 u64 packets;
677 u64 bytes;
678 struct u64_stats_sync syncp;
679 };
680
681 struct rtl8169_private {
682 void __iomem *mmio_addr; /* memory map physical address */
683 struct pci_dev *pci_dev;
684 struct net_device *dev;
685 struct napi_struct napi;
686 u32 msg_enable;
687 u16 txd_version;
688 u16 mac_version;
689 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
690 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
691 u32 dirty_rx;
692 u32 dirty_tx;
693 struct rtl8169_stats rx_stats;
694 struct rtl8169_stats tx_stats;
695 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
696 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
697 dma_addr_t TxPhyAddr;
698 dma_addr_t RxPhyAddr;
699 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
700 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
701 struct timer_list timer;
702 u16 cp_cmd;
703
704 u16 event_slow;
705
706 struct mdio_ops {
707 void (*write)(void __iomem *, int, int);
708 int (*read)(void __iomem *, int);
709 } mdio_ops;
710
711 struct pll_power_ops {
712 void (*down)(struct rtl8169_private *);
713 void (*up)(struct rtl8169_private *);
714 } pll_power_ops;
715
716 struct jumbo_ops {
717 void (*enable)(struct rtl8169_private *);
718 void (*disable)(struct rtl8169_private *);
719 } jumbo_ops;
720
721 struct csi_ops {
722 void (*write)(void __iomem *, int, int);
723 u32 (*read)(void __iomem *, int);
724 } csi_ops;
725
726 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
727 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
728 void (*phy_reset_enable)(struct rtl8169_private *tp);
729 void (*hw_start)(struct net_device *);
730 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
731 unsigned int (*link_ok)(void __iomem *);
732 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
733
734 struct {
735 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
736 struct mutex mutex;
737 struct work_struct work;
738 } wk;
739
740 unsigned features;
741
742 struct mii_if_info mii;
743 struct rtl8169_counters counters;
744 u32 saved_wolopts;
745 u32 opts1_mask;
746
747 struct rtl_fw {
748 const struct firmware *fw;
749
750 #define RTL_VER_SIZE 32
751
752 char version[RTL_VER_SIZE];
753
754 struct rtl_fw_phy_action {
755 __le32 *code;
756 size_t size;
757 } phy_action;
758 } *rtl_fw;
759 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
760 };
761
762 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
763 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
764 module_param(use_dac, int, 0);
765 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
766 module_param_named(debug, debug.msg_enable, int, 0);
767 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
768 MODULE_LICENSE("GPL");
769 MODULE_VERSION(RTL8169_VERSION);
770 MODULE_FIRMWARE(FIRMWARE_8168D_1);
771 MODULE_FIRMWARE(FIRMWARE_8168D_2);
772 MODULE_FIRMWARE(FIRMWARE_8168E_1);
773 MODULE_FIRMWARE(FIRMWARE_8168E_2);
774 MODULE_FIRMWARE(FIRMWARE_8168E_3);
775 MODULE_FIRMWARE(FIRMWARE_8105E_1);
776 MODULE_FIRMWARE(FIRMWARE_8168F_1);
777 MODULE_FIRMWARE(FIRMWARE_8168F_2);
778
779 static void rtl_lock_work(struct rtl8169_private *tp)
780 {
781 mutex_lock(&tp->wk.mutex);
782 }
783
784 static void rtl_unlock_work(struct rtl8169_private *tp)
785 {
786 mutex_unlock(&tp->wk.mutex);
787 }
788
789 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
790 {
791 int cap = pci_pcie_cap(pdev);
792
793 if (cap) {
794 u16 ctl;
795
796 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
797 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
798 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
799 }
800 }
801
802 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
803 {
804 void __iomem *ioaddr = tp->mmio_addr;
805 int i;
806
807 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
808 for (i = 0; i < 20; i++) {
809 udelay(100);
810 if (RTL_R32(OCPAR) & OCPAR_FLAG)
811 break;
812 }
813 return RTL_R32(OCPDR);
814 }
815
816 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
817 {
818 void __iomem *ioaddr = tp->mmio_addr;
819 int i;
820
821 RTL_W32(OCPDR, data);
822 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
823 for (i = 0; i < 20; i++) {
824 udelay(100);
825 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
826 break;
827 }
828 }
829
830 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
831 {
832 void __iomem *ioaddr = tp->mmio_addr;
833 int i;
834
835 RTL_W8(ERIDR, cmd);
836 RTL_W32(ERIAR, 0x800010e8);
837 msleep(2);
838 for (i = 0; i < 5; i++) {
839 udelay(100);
840 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
841 break;
842 }
843
844 ocp_write(tp, 0x1, 0x30, 0x00000001);
845 }
846
847 #define OOB_CMD_RESET 0x00
848 #define OOB_CMD_DRIVER_START 0x05
849 #define OOB_CMD_DRIVER_STOP 0x06
850
851 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
852 {
853 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
854 }
855
856 static void rtl8168_driver_start(struct rtl8169_private *tp)
857 {
858 u16 reg;
859 int i;
860
861 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
862
863 reg = rtl8168_get_ocp_reg(tp);
864
865 for (i = 0; i < 10; i++) {
866 msleep(10);
867 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
868 break;
869 }
870 }
871
872 static void rtl8168_driver_stop(struct rtl8169_private *tp)
873 {
874 u16 reg;
875 int i;
876
877 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
878
879 reg = rtl8168_get_ocp_reg(tp);
880
881 for (i = 0; i < 10; i++) {
882 msleep(10);
883 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
884 break;
885 }
886 }
887
888 static int r8168dp_check_dash(struct rtl8169_private *tp)
889 {
890 u16 reg = rtl8168_get_ocp_reg(tp);
891
892 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
893 }
894
895 static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
896 {
897 int i;
898
899 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
900
901 for (i = 20; i > 0; i--) {
902 /*
903 * Check if the RTL8169 has completed writing to the specified
904 * MII register.
905 */
906 if (!(RTL_R32(PHYAR) & 0x80000000))
907 break;
908 udelay(25);
909 }
910 /*
911 * According to hardware specs a 20us delay is required after write
912 * complete indication, but before sending next command.
913 */
914 udelay(20);
915 }
916
917 static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
918 {
919 int i, value = -1;
920
921 RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
922
923 for (i = 20; i > 0; i--) {
924 /*
925 * Check if the RTL8169 has completed retrieving data from
926 * the specified MII register.
927 */
928 if (RTL_R32(PHYAR) & 0x80000000) {
929 value = RTL_R32(PHYAR) & 0xffff;
930 break;
931 }
932 udelay(25);
933 }
934 /*
935 * According to hardware specs a 20us delay is required after read
936 * complete indication, but before sending next command.
937 */
938 udelay(20);
939
940 return value;
941 }
942
943 static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
944 {
945 int i;
946
947 RTL_W32(OCPDR, data |
948 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
949 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
950 RTL_W32(EPHY_RXER_NUM, 0);
951
952 for (i = 0; i < 100; i++) {
953 mdelay(1);
954 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
955 break;
956 }
957 }
958
959 static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
960 {
961 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
962 (value & OCPDR_DATA_MASK));
963 }
964
965 static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
966 {
967 int i;
968
969 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
970
971 mdelay(1);
972 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
973 RTL_W32(EPHY_RXER_NUM, 0);
974
975 for (i = 0; i < 100; i++) {
976 mdelay(1);
977 if (RTL_R32(OCPAR) & OCPAR_FLAG)
978 break;
979 }
980
981 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
982 }
983
984 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
985
986 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
987 {
988 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
989 }
990
991 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
992 {
993 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
994 }
995
996 static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
997 {
998 r8168dp_2_mdio_start(ioaddr);
999
1000 r8169_mdio_write(ioaddr, reg_addr, value);
1001
1002 r8168dp_2_mdio_stop(ioaddr);
1003 }
1004
1005 static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
1006 {
1007 int value;
1008
1009 r8168dp_2_mdio_start(ioaddr);
1010
1011 value = r8169_mdio_read(ioaddr, reg_addr);
1012
1013 r8168dp_2_mdio_stop(ioaddr);
1014
1015 return value;
1016 }
1017
1018 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1019 {
1020 tp->mdio_ops.write(tp->mmio_addr, location, val);
1021 }
1022
1023 static int rtl_readphy(struct rtl8169_private *tp, int location)
1024 {
1025 return tp->mdio_ops.read(tp->mmio_addr, location);
1026 }
1027
1028 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1029 {
1030 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1031 }
1032
1033 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1034 {
1035 int val;
1036
1037 val = rtl_readphy(tp, reg_addr);
1038 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1039 }
1040
1041 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1042 int val)
1043 {
1044 struct rtl8169_private *tp = netdev_priv(dev);
1045
1046 rtl_writephy(tp, location, val);
1047 }
1048
1049 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1050 {
1051 struct rtl8169_private *tp = netdev_priv(dev);
1052
1053 return rtl_readphy(tp, location);
1054 }
1055
1056 static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
1057 {
1058 unsigned int i;
1059
1060 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1061 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1062
1063 for (i = 0; i < 100; i++) {
1064 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
1065 break;
1066 udelay(10);
1067 }
1068 }
1069
1070 static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1071 {
1072 u16 value = 0xffff;
1073 unsigned int i;
1074
1075 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1076
1077 for (i = 0; i < 100; i++) {
1078 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
1079 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1080 break;
1081 }
1082 udelay(10);
1083 }
1084
1085 return value;
1086 }
1087
1088 static
1089 void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1090 {
1091 unsigned int i;
1092
1093 BUG_ON((addr & 3) || (mask == 0));
1094 RTL_W32(ERIDR, val);
1095 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1096
1097 for (i = 0; i < 100; i++) {
1098 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1099 break;
1100 udelay(100);
1101 }
1102 }
1103
1104 static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
1105 {
1106 u32 value = ~0x00;
1107 unsigned int i;
1108
1109 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1110
1111 for (i = 0; i < 100; i++) {
1112 if (RTL_R32(ERIAR) & ERIAR_FLAG) {
1113 value = RTL_R32(ERIDR);
1114 break;
1115 }
1116 udelay(100);
1117 }
1118
1119 return value;
1120 }
1121
1122 static void
1123 rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
1124 {
1125 u32 val;
1126
1127 val = rtl_eri_read(ioaddr, addr, type);
1128 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
1129 }
1130
1131 struct exgmac_reg {
1132 u16 addr;
1133 u16 mask;
1134 u32 val;
1135 };
1136
1137 static void rtl_write_exgmac_batch(void __iomem *ioaddr,
1138 const struct exgmac_reg *r, int len)
1139 {
1140 while (len-- > 0) {
1141 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1142 r++;
1143 }
1144 }
1145
1146 static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1147 {
1148 u8 value = 0xff;
1149 unsigned int i;
1150
1151 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1152
1153 for (i = 0; i < 300; i++) {
1154 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
1155 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
1156 break;
1157 }
1158 udelay(100);
1159 }
1160
1161 return value;
1162 }
1163
1164 static u16 rtl_get_events(struct rtl8169_private *tp)
1165 {
1166 void __iomem *ioaddr = tp->mmio_addr;
1167
1168 return RTL_R16(IntrStatus);
1169 }
1170
1171 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1172 {
1173 void __iomem *ioaddr = tp->mmio_addr;
1174
1175 RTL_W16(IntrStatus, bits);
1176 mmiowb();
1177 }
1178
1179 static void rtl_irq_disable(struct rtl8169_private *tp)
1180 {
1181 void __iomem *ioaddr = tp->mmio_addr;
1182
1183 RTL_W16(IntrMask, 0);
1184 mmiowb();
1185 }
1186
1187 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1188 {
1189 void __iomem *ioaddr = tp->mmio_addr;
1190
1191 RTL_W16(IntrMask, bits);
1192 }
1193
1194 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1195 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1196 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1197
1198 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1199 {
1200 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1201 }
1202
1203 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1204 {
1205 void __iomem *ioaddr = tp->mmio_addr;
1206
1207 rtl_irq_disable(tp);
1208 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1209 RTL_R8(ChipCmd);
1210 }
1211
1212 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1213 {
1214 void __iomem *ioaddr = tp->mmio_addr;
1215
1216 return RTL_R32(TBICSR) & TBIReset;
1217 }
1218
1219 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1220 {
1221 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1222 }
1223
1224 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1225 {
1226 return RTL_R32(TBICSR) & TBILinkOk;
1227 }
1228
1229 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1230 {
1231 return RTL_R8(PHYstatus) & LinkStatus;
1232 }
1233
1234 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1235 {
1236 void __iomem *ioaddr = tp->mmio_addr;
1237
1238 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1239 }
1240
1241 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1242 {
1243 unsigned int val;
1244
1245 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1246 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1247 }
1248
1249 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1250 {
1251 void __iomem *ioaddr = tp->mmio_addr;
1252 struct net_device *dev = tp->dev;
1253
1254 if (!netif_running(dev))
1255 return;
1256
1257 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
1258 if (RTL_R8(PHYstatus) & _1000bpsF) {
1259 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1260 0x00000011, ERIAR_EXGMAC);
1261 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1262 0x00000005, ERIAR_EXGMAC);
1263 } else if (RTL_R8(PHYstatus) & _100bps) {
1264 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1265 0x0000001f, ERIAR_EXGMAC);
1266 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1267 0x00000005, ERIAR_EXGMAC);
1268 } else {
1269 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1270 0x0000001f, ERIAR_EXGMAC);
1271 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1272 0x0000003f, ERIAR_EXGMAC);
1273 }
1274 /* Reset packet filter */
1275 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1276 ERIAR_EXGMAC);
1277 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1278 ERIAR_EXGMAC);
1279 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1280 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1281 if (RTL_R8(PHYstatus) & _1000bpsF) {
1282 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1283 0x00000011, ERIAR_EXGMAC);
1284 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1285 0x00000005, ERIAR_EXGMAC);
1286 } else {
1287 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1288 0x0000001f, ERIAR_EXGMAC);
1289 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1290 0x0000003f, ERIAR_EXGMAC);
1291 }
1292 }
1293 }
1294
1295 static void __rtl8169_check_link_status(struct net_device *dev,
1296 struct rtl8169_private *tp,
1297 void __iomem *ioaddr, bool pm)
1298 {
1299 if (tp->link_ok(ioaddr)) {
1300 rtl_link_chg_patch(tp);
1301 /* This is to cancel a scheduled suspend if there's one. */
1302 if (pm)
1303 pm_request_resume(&tp->pci_dev->dev);
1304 netif_carrier_on(dev);
1305 if (net_ratelimit())
1306 netif_info(tp, ifup, dev, "link up\n");
1307 } else {
1308 netif_carrier_off(dev);
1309 netif_info(tp, ifdown, dev, "link down\n");
1310 if (pm)
1311 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1312 }
1313 }
1314
1315 static void rtl8169_check_link_status(struct net_device *dev,
1316 struct rtl8169_private *tp,
1317 void __iomem *ioaddr)
1318 {
1319 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1320 }
1321
1322 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1323
1324 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1325 {
1326 void __iomem *ioaddr = tp->mmio_addr;
1327 u8 options;
1328 u32 wolopts = 0;
1329
1330 options = RTL_R8(Config1);
1331 if (!(options & PMEnable))
1332 return 0;
1333
1334 options = RTL_R8(Config3);
1335 if (options & LinkUp)
1336 wolopts |= WAKE_PHY;
1337 if (options & MagicPacket)
1338 wolopts |= WAKE_MAGIC;
1339
1340 options = RTL_R8(Config5);
1341 if (options & UWF)
1342 wolopts |= WAKE_UCAST;
1343 if (options & BWF)
1344 wolopts |= WAKE_BCAST;
1345 if (options & MWF)
1346 wolopts |= WAKE_MCAST;
1347
1348 return wolopts;
1349 }
1350
1351 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1352 {
1353 struct rtl8169_private *tp = netdev_priv(dev);
1354
1355 rtl_lock_work(tp);
1356
1357 wol->supported = WAKE_ANY;
1358 wol->wolopts = __rtl8169_get_wol(tp);
1359
1360 rtl_unlock_work(tp);
1361 }
1362
1363 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1364 {
1365 void __iomem *ioaddr = tp->mmio_addr;
1366 unsigned int i;
1367 static const struct {
1368 u32 opt;
1369 u16 reg;
1370 u8 mask;
1371 } cfg[] = {
1372 { WAKE_PHY, Config3, LinkUp },
1373 { WAKE_MAGIC, Config3, MagicPacket },
1374 { WAKE_UCAST, Config5, UWF },
1375 { WAKE_BCAST, Config5, BWF },
1376 { WAKE_MCAST, Config5, MWF },
1377 { WAKE_ANY, Config5, LanWake }
1378 };
1379 u8 options;
1380
1381 RTL_W8(Cfg9346, Cfg9346_Unlock);
1382
1383 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1384 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1385 if (wolopts & cfg[i].opt)
1386 options |= cfg[i].mask;
1387 RTL_W8(cfg[i].reg, options);
1388 }
1389
1390 switch (tp->mac_version) {
1391 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1392 options = RTL_R8(Config1) & ~PMEnable;
1393 if (wolopts)
1394 options |= PMEnable;
1395 RTL_W8(Config1, options);
1396 break;
1397 default:
1398 options = RTL_R8(Config2) & ~PME_SIGNAL;
1399 if (wolopts)
1400 options |= PME_SIGNAL;
1401 RTL_W8(Config2, options);
1402 break;
1403 }
1404
1405 RTL_W8(Cfg9346, Cfg9346_Lock);
1406 }
1407
1408 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1409 {
1410 struct rtl8169_private *tp = netdev_priv(dev);
1411
1412 rtl_lock_work(tp);
1413
1414 if (wol->wolopts)
1415 tp->features |= RTL_FEATURE_WOL;
1416 else
1417 tp->features &= ~RTL_FEATURE_WOL;
1418 __rtl8169_set_wol(tp, wol->wolopts);
1419
1420 rtl_unlock_work(tp);
1421
1422 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1423
1424 return 0;
1425 }
1426
1427 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1428 {
1429 return rtl_chip_infos[tp->mac_version].fw_name;
1430 }
1431
1432 static void rtl8169_get_drvinfo(struct net_device *dev,
1433 struct ethtool_drvinfo *info)
1434 {
1435 struct rtl8169_private *tp = netdev_priv(dev);
1436 struct rtl_fw *rtl_fw = tp->rtl_fw;
1437
1438 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1439 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1440 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1441 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1442 if (!IS_ERR_OR_NULL(rtl_fw))
1443 strlcpy(info->fw_version, rtl_fw->version,
1444 sizeof(info->fw_version));
1445 }
1446
1447 static int rtl8169_get_regs_len(struct net_device *dev)
1448 {
1449 return R8169_REGS_SIZE;
1450 }
1451
1452 static int rtl8169_set_speed_tbi(struct net_device *dev,
1453 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1454 {
1455 struct rtl8169_private *tp = netdev_priv(dev);
1456 void __iomem *ioaddr = tp->mmio_addr;
1457 int ret = 0;
1458 u32 reg;
1459
1460 reg = RTL_R32(TBICSR);
1461 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1462 (duplex == DUPLEX_FULL)) {
1463 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1464 } else if (autoneg == AUTONEG_ENABLE)
1465 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1466 else {
1467 netif_warn(tp, link, dev,
1468 "incorrect speed setting refused in TBI mode\n");
1469 ret = -EOPNOTSUPP;
1470 }
1471
1472 return ret;
1473 }
1474
1475 static int rtl8169_set_speed_xmii(struct net_device *dev,
1476 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1477 {
1478 struct rtl8169_private *tp = netdev_priv(dev);
1479 int giga_ctrl, bmcr;
1480 int rc = -EINVAL;
1481
1482 rtl_writephy(tp, 0x1f, 0x0000);
1483
1484 if (autoneg == AUTONEG_ENABLE) {
1485 int auto_nego;
1486
1487 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1488 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1489 ADVERTISE_100HALF | ADVERTISE_100FULL);
1490
1491 if (adv & ADVERTISED_10baseT_Half)
1492 auto_nego |= ADVERTISE_10HALF;
1493 if (adv & ADVERTISED_10baseT_Full)
1494 auto_nego |= ADVERTISE_10FULL;
1495 if (adv & ADVERTISED_100baseT_Half)
1496 auto_nego |= ADVERTISE_100HALF;
1497 if (adv & ADVERTISED_100baseT_Full)
1498 auto_nego |= ADVERTISE_100FULL;
1499
1500 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1501
1502 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1503 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1504
1505 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1506 if (tp->mii.supports_gmii) {
1507 if (adv & ADVERTISED_1000baseT_Half)
1508 giga_ctrl |= ADVERTISE_1000HALF;
1509 if (adv & ADVERTISED_1000baseT_Full)
1510 giga_ctrl |= ADVERTISE_1000FULL;
1511 } else if (adv & (ADVERTISED_1000baseT_Half |
1512 ADVERTISED_1000baseT_Full)) {
1513 netif_info(tp, link, dev,
1514 "PHY does not support 1000Mbps\n");
1515 goto out;
1516 }
1517
1518 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1519
1520 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1521 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1522 } else {
1523 giga_ctrl = 0;
1524
1525 if (speed == SPEED_10)
1526 bmcr = 0;
1527 else if (speed == SPEED_100)
1528 bmcr = BMCR_SPEED100;
1529 else
1530 goto out;
1531
1532 if (duplex == DUPLEX_FULL)
1533 bmcr |= BMCR_FULLDPLX;
1534 }
1535
1536 rtl_writephy(tp, MII_BMCR, bmcr);
1537
1538 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1539 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1540 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1541 rtl_writephy(tp, 0x17, 0x2138);
1542 rtl_writephy(tp, 0x0e, 0x0260);
1543 } else {
1544 rtl_writephy(tp, 0x17, 0x2108);
1545 rtl_writephy(tp, 0x0e, 0x0000);
1546 }
1547 }
1548
1549 rc = 0;
1550 out:
1551 return rc;
1552 }
1553
1554 static int rtl8169_set_speed(struct net_device *dev,
1555 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1556 {
1557 struct rtl8169_private *tp = netdev_priv(dev);
1558 int ret;
1559
1560 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1561 if (ret < 0)
1562 goto out;
1563
1564 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1565 (advertising & ADVERTISED_1000baseT_Full)) {
1566 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1567 }
1568 out:
1569 return ret;
1570 }
1571
1572 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1573 {
1574 struct rtl8169_private *tp = netdev_priv(dev);
1575 int ret;
1576
1577 del_timer_sync(&tp->timer);
1578
1579 rtl_lock_work(tp);
1580 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1581 cmd->duplex, cmd->advertising);
1582 rtl_unlock_work(tp);
1583
1584 return ret;
1585 }
1586
1587 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1588 netdev_features_t features)
1589 {
1590 struct rtl8169_private *tp = netdev_priv(dev);
1591
1592 if (dev->mtu > TD_MSS_MAX)
1593 features &= ~NETIF_F_ALL_TSO;
1594
1595 if (dev->mtu > JUMBO_1K &&
1596 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1597 features &= ~NETIF_F_IP_CSUM;
1598
1599 return features;
1600 }
1601
1602 static void __rtl8169_set_features(struct net_device *dev,
1603 netdev_features_t features)
1604 {
1605 struct rtl8169_private *tp = netdev_priv(dev);
1606 netdev_features_t changed = features ^ dev->features;
1607 void __iomem *ioaddr = tp->mmio_addr;
1608
1609 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1610 return;
1611
1612 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1613 if (features & NETIF_F_RXCSUM)
1614 tp->cp_cmd |= RxChkSum;
1615 else
1616 tp->cp_cmd &= ~RxChkSum;
1617
1618 if (dev->features & NETIF_F_HW_VLAN_RX)
1619 tp->cp_cmd |= RxVlan;
1620 else
1621 tp->cp_cmd &= ~RxVlan;
1622
1623 RTL_W16(CPlusCmd, tp->cp_cmd);
1624 RTL_R16(CPlusCmd);
1625 }
1626 if (changed & NETIF_F_RXALL) {
1627 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1628 if (features & NETIF_F_RXALL)
1629 tmp |= (AcceptErr | AcceptRunt);
1630 RTL_W32(RxConfig, tmp);
1631 }
1632 }
1633
1634 static int rtl8169_set_features(struct net_device *dev,
1635 netdev_features_t features)
1636 {
1637 struct rtl8169_private *tp = netdev_priv(dev);
1638
1639 rtl_lock_work(tp);
1640 __rtl8169_set_features(dev, features);
1641 rtl_unlock_work(tp);
1642
1643 return 0;
1644 }
1645
1646
1647 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1648 struct sk_buff *skb)
1649 {
1650 return (vlan_tx_tag_present(skb)) ?
1651 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1652 }
1653
1654 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1655 {
1656 u32 opts2 = le32_to_cpu(desc->opts2);
1657
1658 if (opts2 & RxVlanTag)
1659 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1660
1661 desc->opts2 = 0;
1662 }
1663
1664 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1665 {
1666 struct rtl8169_private *tp = netdev_priv(dev);
1667 void __iomem *ioaddr = tp->mmio_addr;
1668 u32 status;
1669
1670 cmd->supported =
1671 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1672 cmd->port = PORT_FIBRE;
1673 cmd->transceiver = XCVR_INTERNAL;
1674
1675 status = RTL_R32(TBICSR);
1676 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1677 cmd->autoneg = !!(status & TBINwEnable);
1678
1679 ethtool_cmd_speed_set(cmd, SPEED_1000);
1680 cmd->duplex = DUPLEX_FULL; /* Always set */
1681
1682 return 0;
1683 }
1684
1685 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1686 {
1687 struct rtl8169_private *tp = netdev_priv(dev);
1688
1689 return mii_ethtool_gset(&tp->mii, cmd);
1690 }
1691
1692 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1693 {
1694 struct rtl8169_private *tp = netdev_priv(dev);
1695 int rc;
1696
1697 rtl_lock_work(tp);
1698 rc = tp->get_settings(dev, cmd);
1699 rtl_unlock_work(tp);
1700
1701 return rc;
1702 }
1703
1704 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1705 void *p)
1706 {
1707 struct rtl8169_private *tp = netdev_priv(dev);
1708
1709 if (regs->len > R8169_REGS_SIZE)
1710 regs->len = R8169_REGS_SIZE;
1711
1712 rtl_lock_work(tp);
1713 memcpy_fromio(p, tp->mmio_addr, regs->len);
1714 rtl_unlock_work(tp);
1715 }
1716
1717 static u32 rtl8169_get_msglevel(struct net_device *dev)
1718 {
1719 struct rtl8169_private *tp = netdev_priv(dev);
1720
1721 return tp->msg_enable;
1722 }
1723
1724 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1725 {
1726 struct rtl8169_private *tp = netdev_priv(dev);
1727
1728 tp->msg_enable = value;
1729 }
1730
1731 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1732 "tx_packets",
1733 "rx_packets",
1734 "tx_errors",
1735 "rx_errors",
1736 "rx_missed",
1737 "align_errors",
1738 "tx_single_collisions",
1739 "tx_multi_collisions",
1740 "unicast",
1741 "broadcast",
1742 "multicast",
1743 "tx_aborted",
1744 "tx_underrun",
1745 };
1746
1747 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1748 {
1749 switch (sset) {
1750 case ETH_SS_STATS:
1751 return ARRAY_SIZE(rtl8169_gstrings);
1752 default:
1753 return -EOPNOTSUPP;
1754 }
1755 }
1756
1757 static void rtl8169_update_counters(struct net_device *dev)
1758 {
1759 struct rtl8169_private *tp = netdev_priv(dev);
1760 void __iomem *ioaddr = tp->mmio_addr;
1761 struct device *d = &tp->pci_dev->dev;
1762 struct rtl8169_counters *counters;
1763 dma_addr_t paddr;
1764 u32 cmd;
1765 int wait = 1000;
1766
1767 /*
1768 * Some chips are unable to dump tally counters when the receiver
1769 * is disabled.
1770 */
1771 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1772 return;
1773
1774 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1775 if (!counters)
1776 return;
1777
1778 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1779 cmd = (u64)paddr & DMA_BIT_MASK(32);
1780 RTL_W32(CounterAddrLow, cmd);
1781 RTL_W32(CounterAddrLow, cmd | CounterDump);
1782
1783 while (wait--) {
1784 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1785 memcpy(&tp->counters, counters, sizeof(*counters));
1786 break;
1787 }
1788 udelay(10);
1789 }
1790
1791 RTL_W32(CounterAddrLow, 0);
1792 RTL_W32(CounterAddrHigh, 0);
1793
1794 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1795 }
1796
1797 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1798 struct ethtool_stats *stats, u64 *data)
1799 {
1800 struct rtl8169_private *tp = netdev_priv(dev);
1801
1802 ASSERT_RTNL();
1803
1804 rtl8169_update_counters(dev);
1805
1806 data[0] = le64_to_cpu(tp->counters.tx_packets);
1807 data[1] = le64_to_cpu(tp->counters.rx_packets);
1808 data[2] = le64_to_cpu(tp->counters.tx_errors);
1809 data[3] = le32_to_cpu(tp->counters.rx_errors);
1810 data[4] = le16_to_cpu(tp->counters.rx_missed);
1811 data[5] = le16_to_cpu(tp->counters.align_errors);
1812 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1813 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1814 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1815 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1816 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1817 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1818 data[12] = le16_to_cpu(tp->counters.tx_underun);
1819 }
1820
1821 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1822 {
1823 switch(stringset) {
1824 case ETH_SS_STATS:
1825 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1826 break;
1827 }
1828 }
1829
1830 static const struct ethtool_ops rtl8169_ethtool_ops = {
1831 .get_drvinfo = rtl8169_get_drvinfo,
1832 .get_regs_len = rtl8169_get_regs_len,
1833 .get_link = ethtool_op_get_link,
1834 .get_settings = rtl8169_get_settings,
1835 .set_settings = rtl8169_set_settings,
1836 .get_msglevel = rtl8169_get_msglevel,
1837 .set_msglevel = rtl8169_set_msglevel,
1838 .get_regs = rtl8169_get_regs,
1839 .get_wol = rtl8169_get_wol,
1840 .set_wol = rtl8169_set_wol,
1841 .get_strings = rtl8169_get_strings,
1842 .get_sset_count = rtl8169_get_sset_count,
1843 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1844 .get_ts_info = ethtool_op_get_ts_info,
1845 };
1846
1847 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1848 struct net_device *dev, u8 default_version)
1849 {
1850 void __iomem *ioaddr = tp->mmio_addr;
1851 /*
1852 * The driver currently handles the 8168Bf and the 8168Be identically
1853 * but they can be identified more specifically through the test below
1854 * if needed:
1855 *
1856 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1857 *
1858 * Same thing for the 8101Eb and the 8101Ec:
1859 *
1860 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1861 */
1862 static const struct rtl_mac_info {
1863 u32 mask;
1864 u32 val;
1865 int mac_version;
1866 } mac_info[] = {
1867 /* 8168F family. */
1868 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1869 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1870
1871 /* 8168E family. */
1872 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
1873 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1874 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1875 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1876
1877 /* 8168D family. */
1878 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1879 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
1880 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
1881
1882 /* 8168DP family. */
1883 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1884 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
1885 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
1886
1887 /* 8168C family. */
1888 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
1889 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
1890 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
1891 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
1892 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1893 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
1894 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
1895 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
1896 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
1897
1898 /* 8168B family. */
1899 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1900 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1901 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1902 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1903
1904 /* 8101 family. */
1905 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1906 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1907 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1908 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
1909 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1910 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1911 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1912 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1913 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1914 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
1915 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1916 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
1917 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1918 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1919 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
1920 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1921 /* FIXME: where did these entries come from ? -- FR */
1922 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1923 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1924
1925 /* 8110 family. */
1926 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1927 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1928 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1929 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1930 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1931 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1932
1933 /* Catch-all */
1934 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1935 };
1936 const struct rtl_mac_info *p = mac_info;
1937 u32 reg;
1938
1939 reg = RTL_R32(TxConfig);
1940 while ((reg & p->mask) != p->val)
1941 p++;
1942 tp->mac_version = p->mac_version;
1943
1944 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
1945 netif_notice(tp, probe, dev,
1946 "unknown MAC, using family default\n");
1947 tp->mac_version = default_version;
1948 }
1949 }
1950
1951 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1952 {
1953 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1954 }
1955
1956 struct phy_reg {
1957 u16 reg;
1958 u16 val;
1959 };
1960
1961 static void rtl_writephy_batch(struct rtl8169_private *tp,
1962 const struct phy_reg *regs, int len)
1963 {
1964 while (len-- > 0) {
1965 rtl_writephy(tp, regs->reg, regs->val);
1966 regs++;
1967 }
1968 }
1969
1970 #define PHY_READ 0x00000000
1971 #define PHY_DATA_OR 0x10000000
1972 #define PHY_DATA_AND 0x20000000
1973 #define PHY_BJMPN 0x30000000
1974 #define PHY_READ_EFUSE 0x40000000
1975 #define PHY_READ_MAC_BYTE 0x50000000
1976 #define PHY_WRITE_MAC_BYTE 0x60000000
1977 #define PHY_CLEAR_READCOUNT 0x70000000
1978 #define PHY_WRITE 0x80000000
1979 #define PHY_READCOUNT_EQ_SKIP 0x90000000
1980 #define PHY_COMP_EQ_SKIPN 0xa0000000
1981 #define PHY_COMP_NEQ_SKIPN 0xb0000000
1982 #define PHY_WRITE_PREVIOUS 0xc0000000
1983 #define PHY_SKIPN 0xd0000000
1984 #define PHY_DELAY_MS 0xe0000000
1985 #define PHY_WRITE_ERI_WORD 0xf0000000
1986
1987 struct fw_info {
1988 u32 magic;
1989 char version[RTL_VER_SIZE];
1990 __le32 fw_start;
1991 __le32 fw_len;
1992 u8 chksum;
1993 } __packed;
1994
1995 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
1996
1997 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
1998 {
1999 const struct firmware *fw = rtl_fw->fw;
2000 struct fw_info *fw_info = (struct fw_info *)fw->data;
2001 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2002 char *version = rtl_fw->version;
2003 bool rc = false;
2004
2005 if (fw->size < FW_OPCODE_SIZE)
2006 goto out;
2007
2008 if (!fw_info->magic) {
2009 size_t i, size, start;
2010 u8 checksum = 0;
2011
2012 if (fw->size < sizeof(*fw_info))
2013 goto out;
2014
2015 for (i = 0; i < fw->size; i++)
2016 checksum += fw->data[i];
2017 if (checksum != 0)
2018 goto out;
2019
2020 start = le32_to_cpu(fw_info->fw_start);
2021 if (start > fw->size)
2022 goto out;
2023
2024 size = le32_to_cpu(fw_info->fw_len);
2025 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2026 goto out;
2027
2028 memcpy(version, fw_info->version, RTL_VER_SIZE);
2029
2030 pa->code = (__le32 *)(fw->data + start);
2031 pa->size = size;
2032 } else {
2033 if (fw->size % FW_OPCODE_SIZE)
2034 goto out;
2035
2036 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2037
2038 pa->code = (__le32 *)fw->data;
2039 pa->size = fw->size / FW_OPCODE_SIZE;
2040 }
2041 version[RTL_VER_SIZE - 1] = 0;
2042
2043 rc = true;
2044 out:
2045 return rc;
2046 }
2047
2048 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2049 struct rtl_fw_phy_action *pa)
2050 {
2051 bool rc = false;
2052 size_t index;
2053
2054 for (index = 0; index < pa->size; index++) {
2055 u32 action = le32_to_cpu(pa->code[index]);
2056 u32 regno = (action & 0x0fff0000) >> 16;
2057
2058 switch(action & 0xf0000000) {
2059 case PHY_READ:
2060 case PHY_DATA_OR:
2061 case PHY_DATA_AND:
2062 case PHY_READ_EFUSE:
2063 case PHY_CLEAR_READCOUNT:
2064 case PHY_WRITE:
2065 case PHY_WRITE_PREVIOUS:
2066 case PHY_DELAY_MS:
2067 break;
2068
2069 case PHY_BJMPN:
2070 if (regno > index) {
2071 netif_err(tp, ifup, tp->dev,
2072 "Out of range of firmware\n");
2073 goto out;
2074 }
2075 break;
2076 case PHY_READCOUNT_EQ_SKIP:
2077 if (index + 2 >= pa->size) {
2078 netif_err(tp, ifup, tp->dev,
2079 "Out of range of firmware\n");
2080 goto out;
2081 }
2082 break;
2083 case PHY_COMP_EQ_SKIPN:
2084 case PHY_COMP_NEQ_SKIPN:
2085 case PHY_SKIPN:
2086 if (index + 1 + regno >= pa->size) {
2087 netif_err(tp, ifup, tp->dev,
2088 "Out of range of firmware\n");
2089 goto out;
2090 }
2091 break;
2092
2093 case PHY_READ_MAC_BYTE:
2094 case PHY_WRITE_MAC_BYTE:
2095 case PHY_WRITE_ERI_WORD:
2096 default:
2097 netif_err(tp, ifup, tp->dev,
2098 "Invalid action 0x%08x\n", action);
2099 goto out;
2100 }
2101 }
2102 rc = true;
2103 out:
2104 return rc;
2105 }
2106
2107 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2108 {
2109 struct net_device *dev = tp->dev;
2110 int rc = -EINVAL;
2111
2112 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2113 netif_err(tp, ifup, dev, "invalid firwmare\n");
2114 goto out;
2115 }
2116
2117 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2118 rc = 0;
2119 out:
2120 return rc;
2121 }
2122
2123 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2124 {
2125 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2126 u32 predata, count;
2127 size_t index;
2128
2129 predata = count = 0;
2130
2131 for (index = 0; index < pa->size; ) {
2132 u32 action = le32_to_cpu(pa->code[index]);
2133 u32 data = action & 0x0000ffff;
2134 u32 regno = (action & 0x0fff0000) >> 16;
2135
2136 if (!action)
2137 break;
2138
2139 switch(action & 0xf0000000) {
2140 case PHY_READ:
2141 predata = rtl_readphy(tp, regno);
2142 count++;
2143 index++;
2144 break;
2145 case PHY_DATA_OR:
2146 predata |= data;
2147 index++;
2148 break;
2149 case PHY_DATA_AND:
2150 predata &= data;
2151 index++;
2152 break;
2153 case PHY_BJMPN:
2154 index -= regno;
2155 break;
2156 case PHY_READ_EFUSE:
2157 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
2158 index++;
2159 break;
2160 case PHY_CLEAR_READCOUNT:
2161 count = 0;
2162 index++;
2163 break;
2164 case PHY_WRITE:
2165 rtl_writephy(tp, regno, data);
2166 index++;
2167 break;
2168 case PHY_READCOUNT_EQ_SKIP:
2169 index += (count == data) ? 2 : 1;
2170 break;
2171 case PHY_COMP_EQ_SKIPN:
2172 if (predata == data)
2173 index += regno;
2174 index++;
2175 break;
2176 case PHY_COMP_NEQ_SKIPN:
2177 if (predata != data)
2178 index += regno;
2179 index++;
2180 break;
2181 case PHY_WRITE_PREVIOUS:
2182 rtl_writephy(tp, regno, predata);
2183 index++;
2184 break;
2185 case PHY_SKIPN:
2186 index += regno + 1;
2187 break;
2188 case PHY_DELAY_MS:
2189 mdelay(data);
2190 index++;
2191 break;
2192
2193 case PHY_READ_MAC_BYTE:
2194 case PHY_WRITE_MAC_BYTE:
2195 case PHY_WRITE_ERI_WORD:
2196 default:
2197 BUG();
2198 }
2199 }
2200 }
2201
2202 static void rtl_release_firmware(struct rtl8169_private *tp)
2203 {
2204 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2205 release_firmware(tp->rtl_fw->fw);
2206 kfree(tp->rtl_fw);
2207 }
2208 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2209 }
2210
2211 static void rtl_apply_firmware(struct rtl8169_private *tp)
2212 {
2213 struct rtl_fw *rtl_fw = tp->rtl_fw;
2214
2215 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2216 if (!IS_ERR_OR_NULL(rtl_fw))
2217 rtl_phy_write_fw(tp, rtl_fw);
2218 }
2219
2220 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2221 {
2222 if (rtl_readphy(tp, reg) != val)
2223 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2224 else
2225 rtl_apply_firmware(tp);
2226 }
2227
2228 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2229 {
2230 static const struct phy_reg phy_reg_init[] = {
2231 { 0x1f, 0x0001 },
2232 { 0x06, 0x006e },
2233 { 0x08, 0x0708 },
2234 { 0x15, 0x4000 },
2235 { 0x18, 0x65c7 },
2236
2237 { 0x1f, 0x0001 },
2238 { 0x03, 0x00a1 },
2239 { 0x02, 0x0008 },
2240 { 0x01, 0x0120 },
2241 { 0x00, 0x1000 },
2242 { 0x04, 0x0800 },
2243 { 0x04, 0x0000 },
2244
2245 { 0x03, 0xff41 },
2246 { 0x02, 0xdf60 },
2247 { 0x01, 0x0140 },
2248 { 0x00, 0x0077 },
2249 { 0x04, 0x7800 },
2250 { 0x04, 0x7000 },
2251
2252 { 0x03, 0x802f },
2253 { 0x02, 0x4f02 },
2254 { 0x01, 0x0409 },
2255 { 0x00, 0xf0f9 },
2256 { 0x04, 0x9800 },
2257 { 0x04, 0x9000 },
2258
2259 { 0x03, 0xdf01 },
2260 { 0x02, 0xdf20 },
2261 { 0x01, 0xff95 },
2262 { 0x00, 0xba00 },
2263 { 0x04, 0xa800 },
2264 { 0x04, 0xa000 },
2265
2266 { 0x03, 0xff41 },
2267 { 0x02, 0xdf20 },
2268 { 0x01, 0x0140 },
2269 { 0x00, 0x00bb },
2270 { 0x04, 0xb800 },
2271 { 0x04, 0xb000 },
2272
2273 { 0x03, 0xdf41 },
2274 { 0x02, 0xdc60 },
2275 { 0x01, 0x6340 },
2276 { 0x00, 0x007d },
2277 { 0x04, 0xd800 },
2278 { 0x04, 0xd000 },
2279
2280 { 0x03, 0xdf01 },
2281 { 0x02, 0xdf20 },
2282 { 0x01, 0x100a },
2283 { 0x00, 0xa0ff },
2284 { 0x04, 0xf800 },
2285 { 0x04, 0xf000 },
2286
2287 { 0x1f, 0x0000 },
2288 { 0x0b, 0x0000 },
2289 { 0x00, 0x9200 }
2290 };
2291
2292 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2293 }
2294
2295 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2296 {
2297 static const struct phy_reg phy_reg_init[] = {
2298 { 0x1f, 0x0002 },
2299 { 0x01, 0x90d0 },
2300 { 0x1f, 0x0000 }
2301 };
2302
2303 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2304 }
2305
2306 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2307 {
2308 struct pci_dev *pdev = tp->pci_dev;
2309
2310 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2311 (pdev->subsystem_device != 0xe000))
2312 return;
2313
2314 rtl_writephy(tp, 0x1f, 0x0001);
2315 rtl_writephy(tp, 0x10, 0xf01b);
2316 rtl_writephy(tp, 0x1f, 0x0000);
2317 }
2318
2319 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2320 {
2321 static const struct phy_reg phy_reg_init[] = {
2322 { 0x1f, 0x0001 },
2323 { 0x04, 0x0000 },
2324 { 0x03, 0x00a1 },
2325 { 0x02, 0x0008 },
2326 { 0x01, 0x0120 },
2327 { 0x00, 0x1000 },
2328 { 0x04, 0x0800 },
2329 { 0x04, 0x9000 },
2330 { 0x03, 0x802f },
2331 { 0x02, 0x4f02 },
2332 { 0x01, 0x0409 },
2333 { 0x00, 0xf099 },
2334 { 0x04, 0x9800 },
2335 { 0x04, 0xa000 },
2336 { 0x03, 0xdf01 },
2337 { 0x02, 0xdf20 },
2338 { 0x01, 0xff95 },
2339 { 0x00, 0xba00 },
2340 { 0x04, 0xa800 },
2341 { 0x04, 0xf000 },
2342 { 0x03, 0xdf01 },
2343 { 0x02, 0xdf20 },
2344 { 0x01, 0x101a },
2345 { 0x00, 0xa0ff },
2346 { 0x04, 0xf800 },
2347 { 0x04, 0x0000 },
2348 { 0x1f, 0x0000 },
2349
2350 { 0x1f, 0x0001 },
2351 { 0x10, 0xf41b },
2352 { 0x14, 0xfb54 },
2353 { 0x18, 0xf5c7 },
2354 { 0x1f, 0x0000 },
2355
2356 { 0x1f, 0x0001 },
2357 { 0x17, 0x0cc0 },
2358 { 0x1f, 0x0000 }
2359 };
2360
2361 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2362
2363 rtl8169scd_hw_phy_config_quirk(tp);
2364 }
2365
2366 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2367 {
2368 static const struct phy_reg phy_reg_init[] = {
2369 { 0x1f, 0x0001 },
2370 { 0x04, 0x0000 },
2371 { 0x03, 0x00a1 },
2372 { 0x02, 0x0008 },
2373 { 0x01, 0x0120 },
2374 { 0x00, 0x1000 },
2375 { 0x04, 0x0800 },
2376 { 0x04, 0x9000 },
2377 { 0x03, 0x802f },
2378 { 0x02, 0x4f02 },
2379 { 0x01, 0x0409 },
2380 { 0x00, 0xf099 },
2381 { 0x04, 0x9800 },
2382 { 0x04, 0xa000 },
2383 { 0x03, 0xdf01 },
2384 { 0x02, 0xdf20 },
2385 { 0x01, 0xff95 },
2386 { 0x00, 0xba00 },
2387 { 0x04, 0xa800 },
2388 { 0x04, 0xf000 },
2389 { 0x03, 0xdf01 },
2390 { 0x02, 0xdf20 },
2391 { 0x01, 0x101a },
2392 { 0x00, 0xa0ff },
2393 { 0x04, 0xf800 },
2394 { 0x04, 0x0000 },
2395 { 0x1f, 0x0000 },
2396
2397 { 0x1f, 0x0001 },
2398 { 0x0b, 0x8480 },
2399 { 0x1f, 0x0000 },
2400
2401 { 0x1f, 0x0001 },
2402 { 0x18, 0x67c7 },
2403 { 0x04, 0x2000 },
2404 { 0x03, 0x002f },
2405 { 0x02, 0x4360 },
2406 { 0x01, 0x0109 },
2407 { 0x00, 0x3022 },
2408 { 0x04, 0x2800 },
2409 { 0x1f, 0x0000 },
2410
2411 { 0x1f, 0x0001 },
2412 { 0x17, 0x0cc0 },
2413 { 0x1f, 0x0000 }
2414 };
2415
2416 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2417 }
2418
2419 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2420 {
2421 static const struct phy_reg phy_reg_init[] = {
2422 { 0x10, 0xf41b },
2423 { 0x1f, 0x0000 }
2424 };
2425
2426 rtl_writephy(tp, 0x1f, 0x0001);
2427 rtl_patchphy(tp, 0x16, 1 << 0);
2428
2429 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2430 }
2431
2432 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2433 {
2434 static const struct phy_reg phy_reg_init[] = {
2435 { 0x1f, 0x0001 },
2436 { 0x10, 0xf41b },
2437 { 0x1f, 0x0000 }
2438 };
2439
2440 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2441 }
2442
2443 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2444 {
2445 static const struct phy_reg phy_reg_init[] = {
2446 { 0x1f, 0x0000 },
2447 { 0x1d, 0x0f00 },
2448 { 0x1f, 0x0002 },
2449 { 0x0c, 0x1ec8 },
2450 { 0x1f, 0x0000 }
2451 };
2452
2453 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2454 }
2455
2456 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2457 {
2458 static const struct phy_reg phy_reg_init[] = {
2459 { 0x1f, 0x0001 },
2460 { 0x1d, 0x3d98 },
2461 { 0x1f, 0x0000 }
2462 };
2463
2464 rtl_writephy(tp, 0x1f, 0x0000);
2465 rtl_patchphy(tp, 0x14, 1 << 5);
2466 rtl_patchphy(tp, 0x0d, 1 << 5);
2467
2468 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2469 }
2470
2471 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2472 {
2473 static const struct phy_reg phy_reg_init[] = {
2474 { 0x1f, 0x0001 },
2475 { 0x12, 0x2300 },
2476 { 0x1f, 0x0002 },
2477 { 0x00, 0x88d4 },
2478 { 0x01, 0x82b1 },
2479 { 0x03, 0x7002 },
2480 { 0x08, 0x9e30 },
2481 { 0x09, 0x01f0 },
2482 { 0x0a, 0x5500 },
2483 { 0x0c, 0x00c8 },
2484 { 0x1f, 0x0003 },
2485 { 0x12, 0xc096 },
2486 { 0x16, 0x000a },
2487 { 0x1f, 0x0000 },
2488 { 0x1f, 0x0000 },
2489 { 0x09, 0x2000 },
2490 { 0x09, 0x0000 }
2491 };
2492
2493 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2494
2495 rtl_patchphy(tp, 0x14, 1 << 5);
2496 rtl_patchphy(tp, 0x0d, 1 << 5);
2497 rtl_writephy(tp, 0x1f, 0x0000);
2498 }
2499
2500 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2501 {
2502 static const struct phy_reg phy_reg_init[] = {
2503 { 0x1f, 0x0001 },
2504 { 0x12, 0x2300 },
2505 { 0x03, 0x802f },
2506 { 0x02, 0x4f02 },
2507 { 0x01, 0x0409 },
2508 { 0x00, 0xf099 },
2509 { 0x04, 0x9800 },
2510 { 0x04, 0x9000 },
2511 { 0x1d, 0x3d98 },
2512 { 0x1f, 0x0002 },
2513 { 0x0c, 0x7eb8 },
2514 { 0x06, 0x0761 },
2515 { 0x1f, 0x0003 },
2516 { 0x16, 0x0f0a },
2517 { 0x1f, 0x0000 }
2518 };
2519
2520 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2521
2522 rtl_patchphy(tp, 0x16, 1 << 0);
2523 rtl_patchphy(tp, 0x14, 1 << 5);
2524 rtl_patchphy(tp, 0x0d, 1 << 5);
2525 rtl_writephy(tp, 0x1f, 0x0000);
2526 }
2527
2528 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2529 {
2530 static const struct phy_reg phy_reg_init[] = {
2531 { 0x1f, 0x0001 },
2532 { 0x12, 0x2300 },
2533 { 0x1d, 0x3d98 },
2534 { 0x1f, 0x0002 },
2535 { 0x0c, 0x7eb8 },
2536 { 0x06, 0x5461 },
2537 { 0x1f, 0x0003 },
2538 { 0x16, 0x0f0a },
2539 { 0x1f, 0x0000 }
2540 };
2541
2542 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2543
2544 rtl_patchphy(tp, 0x16, 1 << 0);
2545 rtl_patchphy(tp, 0x14, 1 << 5);
2546 rtl_patchphy(tp, 0x0d, 1 << 5);
2547 rtl_writephy(tp, 0x1f, 0x0000);
2548 }
2549
2550 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2551 {
2552 rtl8168c_3_hw_phy_config(tp);
2553 }
2554
2555 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2556 {
2557 static const struct phy_reg phy_reg_init_0[] = {
2558 /* Channel Estimation */
2559 { 0x1f, 0x0001 },
2560 { 0x06, 0x4064 },
2561 { 0x07, 0x2863 },
2562 { 0x08, 0x059c },
2563 { 0x09, 0x26b4 },
2564 { 0x0a, 0x6a19 },
2565 { 0x0b, 0xdcc8 },
2566 { 0x10, 0xf06d },
2567 { 0x14, 0x7f68 },
2568 { 0x18, 0x7fd9 },
2569 { 0x1c, 0xf0ff },
2570 { 0x1d, 0x3d9c },
2571 { 0x1f, 0x0003 },
2572 { 0x12, 0xf49f },
2573 { 0x13, 0x070b },
2574 { 0x1a, 0x05ad },
2575 { 0x14, 0x94c0 },
2576
2577 /*
2578 * Tx Error Issue
2579 * Enhance line driver power
2580 */
2581 { 0x1f, 0x0002 },
2582 { 0x06, 0x5561 },
2583 { 0x1f, 0x0005 },
2584 { 0x05, 0x8332 },
2585 { 0x06, 0x5561 },
2586
2587 /*
2588 * Can not link to 1Gbps with bad cable
2589 * Decrease SNR threshold form 21.07dB to 19.04dB
2590 */
2591 { 0x1f, 0x0001 },
2592 { 0x17, 0x0cc0 },
2593
2594 { 0x1f, 0x0000 },
2595 { 0x0d, 0xf880 }
2596 };
2597 void __iomem *ioaddr = tp->mmio_addr;
2598
2599 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2600
2601 /*
2602 * Rx Error Issue
2603 * Fine Tune Switching regulator parameter
2604 */
2605 rtl_writephy(tp, 0x1f, 0x0002);
2606 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2607 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2608
2609 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2610 static const struct phy_reg phy_reg_init[] = {
2611 { 0x1f, 0x0002 },
2612 { 0x05, 0x669a },
2613 { 0x1f, 0x0005 },
2614 { 0x05, 0x8330 },
2615 { 0x06, 0x669a },
2616 { 0x1f, 0x0002 }
2617 };
2618 int val;
2619
2620 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2621
2622 val = rtl_readphy(tp, 0x0d);
2623
2624 if ((val & 0x00ff) != 0x006c) {
2625 static const u32 set[] = {
2626 0x0065, 0x0066, 0x0067, 0x0068,
2627 0x0069, 0x006a, 0x006b, 0x006c
2628 };
2629 int i;
2630
2631 rtl_writephy(tp, 0x1f, 0x0002);
2632
2633 val &= 0xff00;
2634 for (i = 0; i < ARRAY_SIZE(set); i++)
2635 rtl_writephy(tp, 0x0d, val | set[i]);
2636 }
2637 } else {
2638 static const struct phy_reg phy_reg_init[] = {
2639 { 0x1f, 0x0002 },
2640 { 0x05, 0x6662 },
2641 { 0x1f, 0x0005 },
2642 { 0x05, 0x8330 },
2643 { 0x06, 0x6662 }
2644 };
2645
2646 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2647 }
2648
2649 /* RSET couple improve */
2650 rtl_writephy(tp, 0x1f, 0x0002);
2651 rtl_patchphy(tp, 0x0d, 0x0300);
2652 rtl_patchphy(tp, 0x0f, 0x0010);
2653
2654 /* Fine tune PLL performance */
2655 rtl_writephy(tp, 0x1f, 0x0002);
2656 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2657 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2658
2659 rtl_writephy(tp, 0x1f, 0x0005);
2660 rtl_writephy(tp, 0x05, 0x001b);
2661
2662 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2663
2664 rtl_writephy(tp, 0x1f, 0x0000);
2665 }
2666
2667 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2668 {
2669 static const struct phy_reg phy_reg_init_0[] = {
2670 /* Channel Estimation */
2671 { 0x1f, 0x0001 },
2672 { 0x06, 0x4064 },
2673 { 0x07, 0x2863 },
2674 { 0x08, 0x059c },
2675 { 0x09, 0x26b4 },
2676 { 0x0a, 0x6a19 },
2677 { 0x0b, 0xdcc8 },
2678 { 0x10, 0xf06d },
2679 { 0x14, 0x7f68 },
2680 { 0x18, 0x7fd9 },
2681 { 0x1c, 0xf0ff },
2682 { 0x1d, 0x3d9c },
2683 { 0x1f, 0x0003 },
2684 { 0x12, 0xf49f },
2685 { 0x13, 0x070b },
2686 { 0x1a, 0x05ad },
2687 { 0x14, 0x94c0 },
2688
2689 /*
2690 * Tx Error Issue
2691 * Enhance line driver power
2692 */
2693 { 0x1f, 0x0002 },
2694 { 0x06, 0x5561 },
2695 { 0x1f, 0x0005 },
2696 { 0x05, 0x8332 },
2697 { 0x06, 0x5561 },
2698
2699 /*
2700 * Can not link to 1Gbps with bad cable
2701 * Decrease SNR threshold form 21.07dB to 19.04dB
2702 */
2703 { 0x1f, 0x0001 },
2704 { 0x17, 0x0cc0 },
2705
2706 { 0x1f, 0x0000 },
2707 { 0x0d, 0xf880 }
2708 };
2709 void __iomem *ioaddr = tp->mmio_addr;
2710
2711 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2712
2713 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2714 static const struct phy_reg phy_reg_init[] = {
2715 { 0x1f, 0x0002 },
2716 { 0x05, 0x669a },
2717 { 0x1f, 0x0005 },
2718 { 0x05, 0x8330 },
2719 { 0x06, 0x669a },
2720
2721 { 0x1f, 0x0002 }
2722 };
2723 int val;
2724
2725 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2726
2727 val = rtl_readphy(tp, 0x0d);
2728 if ((val & 0x00ff) != 0x006c) {
2729 static const u32 set[] = {
2730 0x0065, 0x0066, 0x0067, 0x0068,
2731 0x0069, 0x006a, 0x006b, 0x006c
2732 };
2733 int i;
2734
2735 rtl_writephy(tp, 0x1f, 0x0002);
2736
2737 val &= 0xff00;
2738 for (i = 0; i < ARRAY_SIZE(set); i++)
2739 rtl_writephy(tp, 0x0d, val | set[i]);
2740 }
2741 } else {
2742 static const struct phy_reg phy_reg_init[] = {
2743 { 0x1f, 0x0002 },
2744 { 0x05, 0x2642 },
2745 { 0x1f, 0x0005 },
2746 { 0x05, 0x8330 },
2747 { 0x06, 0x2642 }
2748 };
2749
2750 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2751 }
2752
2753 /* Fine tune PLL performance */
2754 rtl_writephy(tp, 0x1f, 0x0002);
2755 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2756 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2757
2758 /* Switching regulator Slew rate */
2759 rtl_writephy(tp, 0x1f, 0x0002);
2760 rtl_patchphy(tp, 0x0f, 0x0017);
2761
2762 rtl_writephy(tp, 0x1f, 0x0005);
2763 rtl_writephy(tp, 0x05, 0x001b);
2764
2765 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2766
2767 rtl_writephy(tp, 0x1f, 0x0000);
2768 }
2769
2770 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2771 {
2772 static const struct phy_reg phy_reg_init[] = {
2773 { 0x1f, 0x0002 },
2774 { 0x10, 0x0008 },
2775 { 0x0d, 0x006c },
2776
2777 { 0x1f, 0x0000 },
2778 { 0x0d, 0xf880 },
2779
2780 { 0x1f, 0x0001 },
2781 { 0x17, 0x0cc0 },
2782
2783 { 0x1f, 0x0001 },
2784 { 0x0b, 0xa4d8 },
2785 { 0x09, 0x281c },
2786 { 0x07, 0x2883 },
2787 { 0x0a, 0x6b35 },
2788 { 0x1d, 0x3da4 },
2789 { 0x1c, 0xeffd },
2790 { 0x14, 0x7f52 },
2791 { 0x18, 0x7fc6 },
2792 { 0x08, 0x0601 },
2793 { 0x06, 0x4063 },
2794 { 0x10, 0xf074 },
2795 { 0x1f, 0x0003 },
2796 { 0x13, 0x0789 },
2797 { 0x12, 0xf4bd },
2798 { 0x1a, 0x04fd },
2799 { 0x14, 0x84b0 },
2800 { 0x1f, 0x0000 },
2801 { 0x00, 0x9200 },
2802
2803 { 0x1f, 0x0005 },
2804 { 0x01, 0x0340 },
2805 { 0x1f, 0x0001 },
2806 { 0x04, 0x4000 },
2807 { 0x03, 0x1d21 },
2808 { 0x02, 0x0c32 },
2809 { 0x01, 0x0200 },
2810 { 0x00, 0x5554 },
2811 { 0x04, 0x4800 },
2812 { 0x04, 0x4000 },
2813 { 0x04, 0xf000 },
2814 { 0x03, 0xdf01 },
2815 { 0x02, 0xdf20 },
2816 { 0x01, 0x101a },
2817 { 0x00, 0xa0ff },
2818 { 0x04, 0xf800 },
2819 { 0x04, 0xf000 },
2820 { 0x1f, 0x0000 },
2821
2822 { 0x1f, 0x0007 },
2823 { 0x1e, 0x0023 },
2824 { 0x16, 0x0000 },
2825 { 0x1f, 0x0000 }
2826 };
2827
2828 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2829 }
2830
2831 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2832 {
2833 static const struct phy_reg phy_reg_init[] = {
2834 { 0x1f, 0x0001 },
2835 { 0x17, 0x0cc0 },
2836
2837 { 0x1f, 0x0007 },
2838 { 0x1e, 0x002d },
2839 { 0x18, 0x0040 },
2840 { 0x1f, 0x0000 }
2841 };
2842
2843 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2844 rtl_patchphy(tp, 0x0d, 1 << 5);
2845 }
2846
2847 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
2848 {
2849 static const struct phy_reg phy_reg_init[] = {
2850 /* Enable Delay cap */
2851 { 0x1f, 0x0005 },
2852 { 0x05, 0x8b80 },
2853 { 0x06, 0xc896 },
2854 { 0x1f, 0x0000 },
2855
2856 /* Channel estimation fine tune */
2857 { 0x1f, 0x0001 },
2858 { 0x0b, 0x6c20 },
2859 { 0x07, 0x2872 },
2860 { 0x1c, 0xefff },
2861 { 0x1f, 0x0003 },
2862 { 0x14, 0x6420 },
2863 { 0x1f, 0x0000 },
2864
2865 /* Update PFM & 10M TX idle timer */
2866 { 0x1f, 0x0007 },
2867 { 0x1e, 0x002f },
2868 { 0x15, 0x1919 },
2869 { 0x1f, 0x0000 },
2870
2871 { 0x1f, 0x0007 },
2872 { 0x1e, 0x00ac },
2873 { 0x18, 0x0006 },
2874 { 0x1f, 0x0000 }
2875 };
2876
2877 rtl_apply_firmware(tp);
2878
2879 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2880
2881 /* DCO enable for 10M IDLE Power */
2882 rtl_writephy(tp, 0x1f, 0x0007);
2883 rtl_writephy(tp, 0x1e, 0x0023);
2884 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2885 rtl_writephy(tp, 0x1f, 0x0000);
2886
2887 /* For impedance matching */
2888 rtl_writephy(tp, 0x1f, 0x0002);
2889 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
2890 rtl_writephy(tp, 0x1f, 0x0000);
2891
2892 /* PHY auto speed down */
2893 rtl_writephy(tp, 0x1f, 0x0007);
2894 rtl_writephy(tp, 0x1e, 0x002d);
2895 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2896 rtl_writephy(tp, 0x1f, 0x0000);
2897 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2898
2899 rtl_writephy(tp, 0x1f, 0x0005);
2900 rtl_writephy(tp, 0x05, 0x8b86);
2901 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2902 rtl_writephy(tp, 0x1f, 0x0000);
2903
2904 rtl_writephy(tp, 0x1f, 0x0005);
2905 rtl_writephy(tp, 0x05, 0x8b85);
2906 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2907 rtl_writephy(tp, 0x1f, 0x0007);
2908 rtl_writephy(tp, 0x1e, 0x0020);
2909 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2910 rtl_writephy(tp, 0x1f, 0x0006);
2911 rtl_writephy(tp, 0x00, 0x5a00);
2912 rtl_writephy(tp, 0x1f, 0x0000);
2913 rtl_writephy(tp, 0x0d, 0x0007);
2914 rtl_writephy(tp, 0x0e, 0x003c);
2915 rtl_writephy(tp, 0x0d, 0x4007);
2916 rtl_writephy(tp, 0x0e, 0x0000);
2917 rtl_writephy(tp, 0x0d, 0x0000);
2918 }
2919
2920 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2921 {
2922 static const struct phy_reg phy_reg_init[] = {
2923 /* Enable Delay cap */
2924 { 0x1f, 0x0004 },
2925 { 0x1f, 0x0007 },
2926 { 0x1e, 0x00ac },
2927 { 0x18, 0x0006 },
2928 { 0x1f, 0x0002 },
2929 { 0x1f, 0x0000 },
2930 { 0x1f, 0x0000 },
2931
2932 /* Channel estimation fine tune */
2933 { 0x1f, 0x0003 },
2934 { 0x09, 0xa20f },
2935 { 0x1f, 0x0000 },
2936 { 0x1f, 0x0000 },
2937
2938 /* Green Setting */
2939 { 0x1f, 0x0005 },
2940 { 0x05, 0x8b5b },
2941 { 0x06, 0x9222 },
2942 { 0x05, 0x8b6d },
2943 { 0x06, 0x8000 },
2944 { 0x05, 0x8b76 },
2945 { 0x06, 0x8000 },
2946 { 0x1f, 0x0000 }
2947 };
2948
2949 rtl_apply_firmware(tp);
2950
2951 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2952
2953 /* For 4-corner performance improve */
2954 rtl_writephy(tp, 0x1f, 0x0005);
2955 rtl_writephy(tp, 0x05, 0x8b80);
2956 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2957 rtl_writephy(tp, 0x1f, 0x0000);
2958
2959 /* PHY auto speed down */
2960 rtl_writephy(tp, 0x1f, 0x0004);
2961 rtl_writephy(tp, 0x1f, 0x0007);
2962 rtl_writephy(tp, 0x1e, 0x002d);
2963 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
2964 rtl_writephy(tp, 0x1f, 0x0002);
2965 rtl_writephy(tp, 0x1f, 0x0000);
2966 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2967
2968 /* improve 10M EEE waveform */
2969 rtl_writephy(tp, 0x1f, 0x0005);
2970 rtl_writephy(tp, 0x05, 0x8b86);
2971 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2972 rtl_writephy(tp, 0x1f, 0x0000);
2973
2974 /* Improve 2-pair detection performance */
2975 rtl_writephy(tp, 0x1f, 0x0005);
2976 rtl_writephy(tp, 0x05, 0x8b85);
2977 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
2978 rtl_writephy(tp, 0x1f, 0x0000);
2979
2980 /* EEE setting */
2981 rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
2982 ERIAR_EXGMAC);
2983 rtl_writephy(tp, 0x1f, 0x0005);
2984 rtl_writephy(tp, 0x05, 0x8b85);
2985 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2986 rtl_writephy(tp, 0x1f, 0x0004);
2987 rtl_writephy(tp, 0x1f, 0x0007);
2988 rtl_writephy(tp, 0x1e, 0x0020);
2989 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
2990 rtl_writephy(tp, 0x1f, 0x0002);
2991 rtl_writephy(tp, 0x1f, 0x0000);
2992 rtl_writephy(tp, 0x0d, 0x0007);
2993 rtl_writephy(tp, 0x0e, 0x003c);
2994 rtl_writephy(tp, 0x0d, 0x4007);
2995 rtl_writephy(tp, 0x0e, 0x0000);
2996 rtl_writephy(tp, 0x0d, 0x0000);
2997
2998 /* Green feature */
2999 rtl_writephy(tp, 0x1f, 0x0003);
3000 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3001 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3002 rtl_writephy(tp, 0x1f, 0x0000);
3003 }
3004
3005 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3006 {
3007 static const struct phy_reg phy_reg_init[] = {
3008 /* Channel estimation fine tune */
3009 { 0x1f, 0x0003 },
3010 { 0x09, 0xa20f },
3011 { 0x1f, 0x0000 },
3012
3013 /* Modify green table for giga & fnet */
3014 { 0x1f, 0x0005 },
3015 { 0x05, 0x8b55 },
3016 { 0x06, 0x0000 },
3017 { 0x05, 0x8b5e },
3018 { 0x06, 0x0000 },
3019 { 0x05, 0x8b67 },
3020 { 0x06, 0x0000 },
3021 { 0x05, 0x8b70 },
3022 { 0x06, 0x0000 },
3023 { 0x1f, 0x0000 },
3024 { 0x1f, 0x0007 },
3025 { 0x1e, 0x0078 },
3026 { 0x17, 0x0000 },
3027 { 0x19, 0x00fb },
3028 { 0x1f, 0x0000 },
3029
3030 /* Modify green table for 10M */
3031 { 0x1f, 0x0005 },
3032 { 0x05, 0x8b79 },
3033 { 0x06, 0xaa00 },
3034 { 0x1f, 0x0000 },
3035
3036 /* Disable hiimpedance detection (RTCT) */
3037 { 0x1f, 0x0003 },
3038 { 0x01, 0x328a },
3039 { 0x1f, 0x0000 }
3040 };
3041
3042 rtl_apply_firmware(tp);
3043
3044 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3045
3046 /* For 4-corner performance improve */
3047 rtl_writephy(tp, 0x1f, 0x0005);
3048 rtl_writephy(tp, 0x05, 0x8b80);
3049 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3050 rtl_writephy(tp, 0x1f, 0x0000);
3051
3052 /* PHY auto speed down */
3053 rtl_writephy(tp, 0x1f, 0x0007);
3054 rtl_writephy(tp, 0x1e, 0x002d);
3055 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3056 rtl_writephy(tp, 0x1f, 0x0000);
3057 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3058
3059 /* Improve 10M EEE waveform */
3060 rtl_writephy(tp, 0x1f, 0x0005);
3061 rtl_writephy(tp, 0x05, 0x8b86);
3062 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3063 rtl_writephy(tp, 0x1f, 0x0000);
3064
3065 /* Improve 2-pair detection performance */
3066 rtl_writephy(tp, 0x1f, 0x0005);
3067 rtl_writephy(tp, 0x05, 0x8b85);
3068 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3069 rtl_writephy(tp, 0x1f, 0x0000);
3070 }
3071
3072 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3073 {
3074 rtl_apply_firmware(tp);
3075
3076 /* For 4-corner performance improve */
3077 rtl_writephy(tp, 0x1f, 0x0005);
3078 rtl_writephy(tp, 0x05, 0x8b80);
3079 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3080 rtl_writephy(tp, 0x1f, 0x0000);
3081
3082 /* PHY auto speed down */
3083 rtl_writephy(tp, 0x1f, 0x0007);
3084 rtl_writephy(tp, 0x1e, 0x002d);
3085 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3086 rtl_writephy(tp, 0x1f, 0x0000);
3087 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3088
3089 /* Improve 10M EEE waveform */
3090 rtl_writephy(tp, 0x1f, 0x0005);
3091 rtl_writephy(tp, 0x05, 0x8b86);
3092 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3093 rtl_writephy(tp, 0x1f, 0x0000);
3094 }
3095
3096 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3097 {
3098 static const struct phy_reg phy_reg_init[] = {
3099 { 0x1f, 0x0003 },
3100 { 0x08, 0x441d },
3101 { 0x01, 0x9100 },
3102 { 0x1f, 0x0000 }
3103 };
3104
3105 rtl_writephy(tp, 0x1f, 0x0000);
3106 rtl_patchphy(tp, 0x11, 1 << 12);
3107 rtl_patchphy(tp, 0x19, 1 << 13);
3108 rtl_patchphy(tp, 0x10, 1 << 15);
3109
3110 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3111 }
3112
3113 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3114 {
3115 static const struct phy_reg phy_reg_init[] = {
3116 { 0x1f, 0x0005 },
3117 { 0x1a, 0x0000 },
3118 { 0x1f, 0x0000 },
3119
3120 { 0x1f, 0x0004 },
3121 { 0x1c, 0x0000 },
3122 { 0x1f, 0x0000 },
3123
3124 { 0x1f, 0x0001 },
3125 { 0x15, 0x7701 },
3126 { 0x1f, 0x0000 }
3127 };
3128
3129 /* Disable ALDPS before ram code */
3130 rtl_writephy(tp, 0x1f, 0x0000);
3131 rtl_writephy(tp, 0x18, 0x0310);
3132 msleep(100);
3133
3134 rtl_apply_firmware(tp);
3135
3136 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3137 }
3138
3139 static void rtl_hw_phy_config(struct net_device *dev)
3140 {
3141 struct rtl8169_private *tp = netdev_priv(dev);
3142
3143 rtl8169_print_mac_version(tp);
3144
3145 switch (tp->mac_version) {
3146 case RTL_GIGA_MAC_VER_01:
3147 break;
3148 case RTL_GIGA_MAC_VER_02:
3149 case RTL_GIGA_MAC_VER_03:
3150 rtl8169s_hw_phy_config(tp);
3151 break;
3152 case RTL_GIGA_MAC_VER_04:
3153 rtl8169sb_hw_phy_config(tp);
3154 break;
3155 case RTL_GIGA_MAC_VER_05:
3156 rtl8169scd_hw_phy_config(tp);
3157 break;
3158 case RTL_GIGA_MAC_VER_06:
3159 rtl8169sce_hw_phy_config(tp);
3160 break;
3161 case RTL_GIGA_MAC_VER_07:
3162 case RTL_GIGA_MAC_VER_08:
3163 case RTL_GIGA_MAC_VER_09:
3164 rtl8102e_hw_phy_config(tp);
3165 break;
3166 case RTL_GIGA_MAC_VER_11:
3167 rtl8168bb_hw_phy_config(tp);
3168 break;
3169 case RTL_GIGA_MAC_VER_12:
3170 rtl8168bef_hw_phy_config(tp);
3171 break;
3172 case RTL_GIGA_MAC_VER_17:
3173 rtl8168bef_hw_phy_config(tp);
3174 break;
3175 case RTL_GIGA_MAC_VER_18:
3176 rtl8168cp_1_hw_phy_config(tp);
3177 break;
3178 case RTL_GIGA_MAC_VER_19:
3179 rtl8168c_1_hw_phy_config(tp);
3180 break;
3181 case RTL_GIGA_MAC_VER_20:
3182 rtl8168c_2_hw_phy_config(tp);
3183 break;
3184 case RTL_GIGA_MAC_VER_21:
3185 rtl8168c_3_hw_phy_config(tp);
3186 break;
3187 case RTL_GIGA_MAC_VER_22:
3188 rtl8168c_4_hw_phy_config(tp);
3189 break;
3190 case RTL_GIGA_MAC_VER_23:
3191 case RTL_GIGA_MAC_VER_24:
3192 rtl8168cp_2_hw_phy_config(tp);
3193 break;
3194 case RTL_GIGA_MAC_VER_25:
3195 rtl8168d_1_hw_phy_config(tp);
3196 break;
3197 case RTL_GIGA_MAC_VER_26:
3198 rtl8168d_2_hw_phy_config(tp);
3199 break;
3200 case RTL_GIGA_MAC_VER_27:
3201 rtl8168d_3_hw_phy_config(tp);
3202 break;
3203 case RTL_GIGA_MAC_VER_28:
3204 rtl8168d_4_hw_phy_config(tp);
3205 break;
3206 case RTL_GIGA_MAC_VER_29:
3207 case RTL_GIGA_MAC_VER_30:
3208 rtl8105e_hw_phy_config(tp);
3209 break;
3210 case RTL_GIGA_MAC_VER_31:
3211 /* None. */
3212 break;
3213 case RTL_GIGA_MAC_VER_32:
3214 case RTL_GIGA_MAC_VER_33:
3215 rtl8168e_1_hw_phy_config(tp);
3216 break;
3217 case RTL_GIGA_MAC_VER_34:
3218 rtl8168e_2_hw_phy_config(tp);
3219 break;
3220 case RTL_GIGA_MAC_VER_35:
3221 rtl8168f_1_hw_phy_config(tp);
3222 break;
3223 case RTL_GIGA_MAC_VER_36:
3224 rtl8168f_2_hw_phy_config(tp);
3225 break;
3226
3227 default:
3228 break;
3229 }
3230 }
3231
3232 static void rtl_phy_work(struct rtl8169_private *tp)
3233 {
3234 struct timer_list *timer = &tp->timer;
3235 void __iomem *ioaddr = tp->mmio_addr;
3236 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3237
3238 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3239
3240 if (tp->phy_reset_pending(tp)) {
3241 /*
3242 * A busy loop could burn quite a few cycles on nowadays CPU.
3243 * Let's delay the execution of the timer for a few ticks.
3244 */
3245 timeout = HZ/10;
3246 goto out_mod_timer;
3247 }
3248
3249 if (tp->link_ok(ioaddr))
3250 return;
3251
3252 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3253
3254 tp->phy_reset_enable(tp);
3255
3256 out_mod_timer:
3257 mod_timer(timer, jiffies + timeout);
3258 }
3259
3260 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3261 {
3262 if (!test_and_set_bit(flag, tp->wk.flags))
3263 schedule_work(&tp->wk.work);
3264 }
3265
3266 static void rtl8169_phy_timer(unsigned long __opaque)
3267 {
3268 struct net_device *dev = (struct net_device *)__opaque;
3269 struct rtl8169_private *tp = netdev_priv(dev);
3270
3271 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3272 }
3273
3274 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3275 void __iomem *ioaddr)
3276 {
3277 iounmap(ioaddr);
3278 pci_release_regions(pdev);
3279 pci_clear_mwi(pdev);
3280 pci_disable_device(pdev);
3281 free_netdev(dev);
3282 }
3283
3284 static void rtl8169_phy_reset(struct net_device *dev,
3285 struct rtl8169_private *tp)
3286 {
3287 unsigned int i;
3288
3289 tp->phy_reset_enable(tp);
3290 for (i = 0; i < 100; i++) {
3291 if (!tp->phy_reset_pending(tp))
3292 return;
3293 msleep(1);
3294 }
3295 netif_err(tp, link, dev, "PHY reset failed\n");
3296 }
3297
3298 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3299 {
3300 void __iomem *ioaddr = tp->mmio_addr;
3301
3302 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3303 (RTL_R8(PHYstatus) & TBI_Enable);
3304 }
3305
3306 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3307 {
3308 void __iomem *ioaddr = tp->mmio_addr;
3309
3310 rtl_hw_phy_config(dev);
3311
3312 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3313 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3314 RTL_W8(0x82, 0x01);
3315 }
3316
3317 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3318
3319 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3320 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3321
3322 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3323 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3324 RTL_W8(0x82, 0x01);
3325 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3326 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3327 }
3328
3329 rtl8169_phy_reset(dev, tp);
3330
3331 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3332 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3333 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3334 (tp->mii.supports_gmii ?
3335 ADVERTISED_1000baseT_Half |
3336 ADVERTISED_1000baseT_Full : 0));
3337
3338 if (rtl_tbi_enabled(tp))
3339 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3340 }
3341
3342 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3343 {
3344 void __iomem *ioaddr = tp->mmio_addr;
3345 u32 high;
3346 u32 low;
3347
3348 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3349 high = addr[4] | (addr[5] << 8);
3350
3351 rtl_lock_work(tp);
3352
3353 RTL_W8(Cfg9346, Cfg9346_Unlock);
3354
3355 RTL_W32(MAC4, high);
3356 RTL_R32(MAC4);
3357
3358 RTL_W32(MAC0, low);
3359 RTL_R32(MAC0);
3360
3361 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3362 const struct exgmac_reg e[] = {
3363 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3364 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3365 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3366 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3367 low >> 16 },
3368 };
3369
3370 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
3371 }
3372
3373 RTL_W8(Cfg9346, Cfg9346_Lock);
3374
3375 rtl_unlock_work(tp);
3376 }
3377
3378 static int rtl_set_mac_address(struct net_device *dev, void *p)
3379 {
3380 struct rtl8169_private *tp = netdev_priv(dev);
3381 struct sockaddr *addr = p;
3382
3383 if (!is_valid_ether_addr(addr->sa_data))
3384 return -EADDRNOTAVAIL;
3385
3386 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3387
3388 rtl_rar_set(tp, dev->dev_addr);
3389
3390 return 0;
3391 }
3392
3393 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3394 {
3395 struct rtl8169_private *tp = netdev_priv(dev);
3396 struct mii_ioctl_data *data = if_mii(ifr);
3397
3398 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3399 }
3400
3401 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3402 struct mii_ioctl_data *data, int cmd)
3403 {
3404 switch (cmd) {
3405 case SIOCGMIIPHY:
3406 data->phy_id = 32; /* Internal PHY */
3407 return 0;
3408
3409 case SIOCGMIIREG:
3410 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3411 return 0;
3412
3413 case SIOCSMIIREG:
3414 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3415 return 0;
3416 }
3417 return -EOPNOTSUPP;
3418 }
3419
3420 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3421 {
3422 return -EOPNOTSUPP;
3423 }
3424
3425 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3426 {
3427 if (tp->features & RTL_FEATURE_MSI) {
3428 pci_disable_msi(pdev);
3429 tp->features &= ~RTL_FEATURE_MSI;
3430 }
3431 }
3432
3433 static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3434 {
3435 struct mdio_ops *ops = &tp->mdio_ops;
3436
3437 switch (tp->mac_version) {
3438 case RTL_GIGA_MAC_VER_27:
3439 ops->write = r8168dp_1_mdio_write;
3440 ops->read = r8168dp_1_mdio_read;
3441 break;
3442 case RTL_GIGA_MAC_VER_28:
3443 case RTL_GIGA_MAC_VER_31:
3444 ops->write = r8168dp_2_mdio_write;
3445 ops->read = r8168dp_2_mdio_read;
3446 break;
3447 default:
3448 ops->write = r8169_mdio_write;
3449 ops->read = r8169_mdio_read;
3450 break;
3451 }
3452 }
3453
3454 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3455 {
3456 void __iomem *ioaddr = tp->mmio_addr;
3457
3458 switch (tp->mac_version) {
3459 case RTL_GIGA_MAC_VER_29:
3460 case RTL_GIGA_MAC_VER_30:
3461 case RTL_GIGA_MAC_VER_32:
3462 case RTL_GIGA_MAC_VER_33:
3463 case RTL_GIGA_MAC_VER_34:
3464 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3465 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3466 break;
3467 default:
3468 break;
3469 }
3470 }
3471
3472 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3473 {
3474 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3475 return false;
3476
3477 rtl_writephy(tp, 0x1f, 0x0000);
3478 rtl_writephy(tp, MII_BMCR, 0x0000);
3479
3480 rtl_wol_suspend_quirk(tp);
3481
3482 return true;
3483 }
3484
3485 static void r810x_phy_power_down(struct rtl8169_private *tp)
3486 {
3487 rtl_writephy(tp, 0x1f, 0x0000);
3488 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3489 }
3490
3491 static void r810x_phy_power_up(struct rtl8169_private *tp)
3492 {
3493 rtl_writephy(tp, 0x1f, 0x0000);
3494 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3495 }
3496
3497 static void r810x_pll_power_down(struct rtl8169_private *tp)
3498 {
3499 void __iomem *ioaddr = tp->mmio_addr;
3500
3501 if (rtl_wol_pll_power_down(tp))
3502 return;
3503
3504 r810x_phy_power_down(tp);
3505
3506 switch (tp->mac_version) {
3507 case RTL_GIGA_MAC_VER_07:
3508 case RTL_GIGA_MAC_VER_08:
3509 case RTL_GIGA_MAC_VER_09:
3510 case RTL_GIGA_MAC_VER_10:
3511 case RTL_GIGA_MAC_VER_13:
3512 case RTL_GIGA_MAC_VER_16:
3513 break;
3514 default:
3515 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3516 break;
3517 }
3518 }
3519
3520 static void r810x_pll_power_up(struct rtl8169_private *tp)
3521 {
3522 void __iomem *ioaddr = tp->mmio_addr;
3523
3524 r810x_phy_power_up(tp);
3525
3526 switch (tp->mac_version) {
3527 case RTL_GIGA_MAC_VER_07:
3528 case RTL_GIGA_MAC_VER_08:
3529 case RTL_GIGA_MAC_VER_09:
3530 case RTL_GIGA_MAC_VER_10:
3531 case RTL_GIGA_MAC_VER_13:
3532 case RTL_GIGA_MAC_VER_16:
3533 break;
3534 default:
3535 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3536 break;
3537 }
3538 }
3539
3540 static void r8168_phy_power_up(struct rtl8169_private *tp)
3541 {
3542 rtl_writephy(tp, 0x1f, 0x0000);
3543 switch (tp->mac_version) {
3544 case RTL_GIGA_MAC_VER_11:
3545 case RTL_GIGA_MAC_VER_12:
3546 case RTL_GIGA_MAC_VER_17:
3547 case RTL_GIGA_MAC_VER_18:
3548 case RTL_GIGA_MAC_VER_19:
3549 case RTL_GIGA_MAC_VER_20:
3550 case RTL_GIGA_MAC_VER_21:
3551 case RTL_GIGA_MAC_VER_22:
3552 case RTL_GIGA_MAC_VER_23:
3553 case RTL_GIGA_MAC_VER_24:
3554 case RTL_GIGA_MAC_VER_25:
3555 case RTL_GIGA_MAC_VER_26:
3556 case RTL_GIGA_MAC_VER_27:
3557 case RTL_GIGA_MAC_VER_28:
3558 case RTL_GIGA_MAC_VER_31:
3559 rtl_writephy(tp, 0x0e, 0x0000);
3560 break;
3561 default:
3562 break;
3563 }
3564 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3565 }
3566
3567 static void r8168_phy_power_down(struct rtl8169_private *tp)
3568 {
3569 rtl_writephy(tp, 0x1f, 0x0000);
3570 switch (tp->mac_version) {
3571 case RTL_GIGA_MAC_VER_32:
3572 case RTL_GIGA_MAC_VER_33:
3573 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3574 break;
3575
3576 case RTL_GIGA_MAC_VER_11:
3577 case RTL_GIGA_MAC_VER_12:
3578 case RTL_GIGA_MAC_VER_17:
3579 case RTL_GIGA_MAC_VER_18:
3580 case RTL_GIGA_MAC_VER_19:
3581 case RTL_GIGA_MAC_VER_20:
3582 case RTL_GIGA_MAC_VER_21:
3583 case RTL_GIGA_MAC_VER_22:
3584 case RTL_GIGA_MAC_VER_23:
3585 case RTL_GIGA_MAC_VER_24:
3586 case RTL_GIGA_MAC_VER_25:
3587 case RTL_GIGA_MAC_VER_26:
3588 case RTL_GIGA_MAC_VER_27:
3589 case RTL_GIGA_MAC_VER_28:
3590 case RTL_GIGA_MAC_VER_31:
3591 rtl_writephy(tp, 0x0e, 0x0200);
3592 default:
3593 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3594 break;
3595 }
3596 }
3597
3598 static void r8168_pll_power_down(struct rtl8169_private *tp)
3599 {
3600 void __iomem *ioaddr = tp->mmio_addr;
3601
3602 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3603 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3604 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3605 r8168dp_check_dash(tp)) {
3606 return;
3607 }
3608
3609 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3610 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3611 (RTL_R16(CPlusCmd) & ASF)) {
3612 return;
3613 }
3614
3615 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3616 tp->mac_version == RTL_GIGA_MAC_VER_33)
3617 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3618
3619 if (rtl_wol_pll_power_down(tp))
3620 return;
3621
3622 r8168_phy_power_down(tp);
3623
3624 switch (tp->mac_version) {
3625 case RTL_GIGA_MAC_VER_25:
3626 case RTL_GIGA_MAC_VER_26:
3627 case RTL_GIGA_MAC_VER_27:
3628 case RTL_GIGA_MAC_VER_28:
3629 case RTL_GIGA_MAC_VER_31:
3630 case RTL_GIGA_MAC_VER_32:
3631 case RTL_GIGA_MAC_VER_33:
3632 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3633 break;
3634 }
3635 }
3636
3637 static void r8168_pll_power_up(struct rtl8169_private *tp)
3638 {
3639 void __iomem *ioaddr = tp->mmio_addr;
3640
3641 switch (tp->mac_version) {
3642 case RTL_GIGA_MAC_VER_25:
3643 case RTL_GIGA_MAC_VER_26:
3644 case RTL_GIGA_MAC_VER_27:
3645 case RTL_GIGA_MAC_VER_28:
3646 case RTL_GIGA_MAC_VER_31:
3647 case RTL_GIGA_MAC_VER_32:
3648 case RTL_GIGA_MAC_VER_33:
3649 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3650 break;
3651 }
3652
3653 r8168_phy_power_up(tp);
3654 }
3655
3656 static void rtl_generic_op(struct rtl8169_private *tp,
3657 void (*op)(struct rtl8169_private *))
3658 {
3659 if (op)
3660 op(tp);
3661 }
3662
3663 static void rtl_pll_power_down(struct rtl8169_private *tp)
3664 {
3665 rtl_generic_op(tp, tp->pll_power_ops.down);
3666 }
3667
3668 static void rtl_pll_power_up(struct rtl8169_private *tp)
3669 {
3670 rtl_generic_op(tp, tp->pll_power_ops.up);
3671 }
3672
3673 static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3674 {
3675 struct pll_power_ops *ops = &tp->pll_power_ops;
3676
3677 switch (tp->mac_version) {
3678 case RTL_GIGA_MAC_VER_07:
3679 case RTL_GIGA_MAC_VER_08:
3680 case RTL_GIGA_MAC_VER_09:
3681 case RTL_GIGA_MAC_VER_10:
3682 case RTL_GIGA_MAC_VER_16:
3683 case RTL_GIGA_MAC_VER_29:
3684 case RTL_GIGA_MAC_VER_30:
3685 ops->down = r810x_pll_power_down;
3686 ops->up = r810x_pll_power_up;
3687 break;
3688
3689 case RTL_GIGA_MAC_VER_11:
3690 case RTL_GIGA_MAC_VER_12:
3691 case RTL_GIGA_MAC_VER_17:
3692 case RTL_GIGA_MAC_VER_18:
3693 case RTL_GIGA_MAC_VER_19:
3694 case RTL_GIGA_MAC_VER_20:
3695 case RTL_GIGA_MAC_VER_21:
3696 case RTL_GIGA_MAC_VER_22:
3697 case RTL_GIGA_MAC_VER_23:
3698 case RTL_GIGA_MAC_VER_24:
3699 case RTL_GIGA_MAC_VER_25:
3700 case RTL_GIGA_MAC_VER_26:
3701 case RTL_GIGA_MAC_VER_27:
3702 case RTL_GIGA_MAC_VER_28:
3703 case RTL_GIGA_MAC_VER_31:
3704 case RTL_GIGA_MAC_VER_32:
3705 case RTL_GIGA_MAC_VER_33:
3706 case RTL_GIGA_MAC_VER_34:
3707 case RTL_GIGA_MAC_VER_35:
3708 case RTL_GIGA_MAC_VER_36:
3709 ops->down = r8168_pll_power_down;
3710 ops->up = r8168_pll_power_up;
3711 break;
3712
3713 default:
3714 ops->down = NULL;
3715 ops->up = NULL;
3716 break;
3717 }
3718 }
3719
3720 static void rtl_init_rxcfg(struct rtl8169_private *tp)
3721 {
3722 void __iomem *ioaddr = tp->mmio_addr;
3723
3724 switch (tp->mac_version) {
3725 case RTL_GIGA_MAC_VER_01:
3726 case RTL_GIGA_MAC_VER_02:
3727 case RTL_GIGA_MAC_VER_03:
3728 case RTL_GIGA_MAC_VER_04:
3729 case RTL_GIGA_MAC_VER_05:
3730 case RTL_GIGA_MAC_VER_06:
3731 case RTL_GIGA_MAC_VER_10:
3732 case RTL_GIGA_MAC_VER_11:
3733 case RTL_GIGA_MAC_VER_12:
3734 case RTL_GIGA_MAC_VER_13:
3735 case RTL_GIGA_MAC_VER_14:
3736 case RTL_GIGA_MAC_VER_15:
3737 case RTL_GIGA_MAC_VER_16:
3738 case RTL_GIGA_MAC_VER_17:
3739 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3740 break;
3741 case RTL_GIGA_MAC_VER_18:
3742 case RTL_GIGA_MAC_VER_19:
3743 case RTL_GIGA_MAC_VER_20:
3744 case RTL_GIGA_MAC_VER_21:
3745 case RTL_GIGA_MAC_VER_22:
3746 case RTL_GIGA_MAC_VER_23:
3747 case RTL_GIGA_MAC_VER_24:
3748 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3749 break;
3750 default:
3751 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
3752 break;
3753 }
3754 }
3755
3756 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3757 {
3758 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
3759 }
3760
3761 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
3762 {
3763 void __iomem *ioaddr = tp->mmio_addr;
3764
3765 RTL_W8(Cfg9346, Cfg9346_Unlock);
3766 rtl_generic_op(tp, tp->jumbo_ops.enable);
3767 RTL_W8(Cfg9346, Cfg9346_Lock);
3768 }
3769
3770 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
3771 {
3772 void __iomem *ioaddr = tp->mmio_addr;
3773
3774 RTL_W8(Cfg9346, Cfg9346_Unlock);
3775 rtl_generic_op(tp, tp->jumbo_ops.disable);
3776 RTL_W8(Cfg9346, Cfg9346_Lock);
3777 }
3778
3779 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
3780 {
3781 void __iomem *ioaddr = tp->mmio_addr;
3782
3783 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3784 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
3785 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3786 }
3787
3788 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
3789 {
3790 void __iomem *ioaddr = tp->mmio_addr;
3791
3792 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3793 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
3794 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3795 }
3796
3797 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
3798 {
3799 void __iomem *ioaddr = tp->mmio_addr;
3800
3801 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3802 }
3803
3804 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3805 {
3806 void __iomem *ioaddr = tp->mmio_addr;
3807
3808 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3809 }
3810
3811 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3812 {
3813 void __iomem *ioaddr = tp->mmio_addr;
3814
3815 RTL_W8(MaxTxPacketSize, 0x3f);
3816 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3817 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
3818 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3819 }
3820
3821 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3822 {
3823 void __iomem *ioaddr = tp->mmio_addr;
3824
3825 RTL_W8(MaxTxPacketSize, 0x0c);
3826 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3827 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
3828 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3829 }
3830
3831 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
3832 {
3833 rtl_tx_performance_tweak(tp->pci_dev,
3834 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3835 }
3836
3837 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
3838 {
3839 rtl_tx_performance_tweak(tp->pci_dev,
3840 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3841 }
3842
3843 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
3844 {
3845 void __iomem *ioaddr = tp->mmio_addr;
3846
3847 r8168b_0_hw_jumbo_enable(tp);
3848
3849 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
3850 }
3851
3852 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
3853 {
3854 void __iomem *ioaddr = tp->mmio_addr;
3855
3856 r8168b_0_hw_jumbo_disable(tp);
3857
3858 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
3859 }
3860
3861 static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
3862 {
3863 struct jumbo_ops *ops = &tp->jumbo_ops;
3864
3865 switch (tp->mac_version) {
3866 case RTL_GIGA_MAC_VER_11:
3867 ops->disable = r8168b_0_hw_jumbo_disable;
3868 ops->enable = r8168b_0_hw_jumbo_enable;
3869 break;
3870 case RTL_GIGA_MAC_VER_12:
3871 case RTL_GIGA_MAC_VER_17:
3872 ops->disable = r8168b_1_hw_jumbo_disable;
3873 ops->enable = r8168b_1_hw_jumbo_enable;
3874 break;
3875 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
3876 case RTL_GIGA_MAC_VER_19:
3877 case RTL_GIGA_MAC_VER_20:
3878 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
3879 case RTL_GIGA_MAC_VER_22:
3880 case RTL_GIGA_MAC_VER_23:
3881 case RTL_GIGA_MAC_VER_24:
3882 case RTL_GIGA_MAC_VER_25:
3883 case RTL_GIGA_MAC_VER_26:
3884 ops->disable = r8168c_hw_jumbo_disable;
3885 ops->enable = r8168c_hw_jumbo_enable;
3886 break;
3887 case RTL_GIGA_MAC_VER_27:
3888 case RTL_GIGA_MAC_VER_28:
3889 ops->disable = r8168dp_hw_jumbo_disable;
3890 ops->enable = r8168dp_hw_jumbo_enable;
3891 break;
3892 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
3893 case RTL_GIGA_MAC_VER_32:
3894 case RTL_GIGA_MAC_VER_33:
3895 case RTL_GIGA_MAC_VER_34:
3896 ops->disable = r8168e_hw_jumbo_disable;
3897 ops->enable = r8168e_hw_jumbo_enable;
3898 break;
3899
3900 /*
3901 * No action needed for jumbo frames with 8169.
3902 * No jumbo for 810x at all.
3903 */
3904 default:
3905 ops->disable = NULL;
3906 ops->enable = NULL;
3907 break;
3908 }
3909 }
3910
3911 static void rtl_hw_reset(struct rtl8169_private *tp)
3912 {
3913 void __iomem *ioaddr = tp->mmio_addr;
3914 int i;
3915
3916 /* Soft reset the chip. */
3917 RTL_W8(ChipCmd, CmdReset);
3918
3919 /* Check that the chip has finished the reset. */
3920 for (i = 0; i < 100; i++) {
3921 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3922 break;
3923 udelay(100);
3924 }
3925 }
3926
3927 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
3928 {
3929 struct rtl_fw *rtl_fw;
3930 const char *name;
3931 int rc = -ENOMEM;
3932
3933 name = rtl_lookup_firmware_name(tp);
3934 if (!name)
3935 goto out_no_firmware;
3936
3937 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
3938 if (!rtl_fw)
3939 goto err_warn;
3940
3941 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
3942 if (rc < 0)
3943 goto err_free;
3944
3945 rc = rtl_check_firmware(tp, rtl_fw);
3946 if (rc < 0)
3947 goto err_release_firmware;
3948
3949 tp->rtl_fw = rtl_fw;
3950 out:
3951 return;
3952
3953 err_release_firmware:
3954 release_firmware(rtl_fw->fw);
3955 err_free:
3956 kfree(rtl_fw);
3957 err_warn:
3958 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
3959 name, rc);
3960 out_no_firmware:
3961 tp->rtl_fw = NULL;
3962 goto out;
3963 }
3964
3965 static void rtl_request_firmware(struct rtl8169_private *tp)
3966 {
3967 if (IS_ERR(tp->rtl_fw))
3968 rtl_request_uncached_firmware(tp);
3969 }
3970
3971 static void rtl_rx_close(struct rtl8169_private *tp)
3972 {
3973 void __iomem *ioaddr = tp->mmio_addr;
3974
3975 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
3976 }
3977
3978 static void rtl8169_hw_reset(struct rtl8169_private *tp)
3979 {
3980 void __iomem *ioaddr = tp->mmio_addr;
3981
3982 /* Disable interrupts */
3983 rtl8169_irq_mask_and_ack(tp);
3984
3985 rtl_rx_close(tp);
3986
3987 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3988 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3989 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3990 while (RTL_R8(TxPoll) & NPQ)
3991 udelay(20);
3992 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
3993 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
3994 tp->mac_version == RTL_GIGA_MAC_VER_36) {
3995 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3996 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3997 udelay(100);
3998 } else {
3999 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4000 udelay(100);
4001 }
4002
4003 rtl_hw_reset(tp);
4004 }
4005
4006 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4007 {
4008 void __iomem *ioaddr = tp->mmio_addr;
4009
4010 /* Set DMA burst size and Interframe Gap Time */
4011 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4012 (InterFrameGap << TxInterFrameGapShift));
4013 }
4014
4015 static void rtl_hw_start(struct net_device *dev)
4016 {
4017 struct rtl8169_private *tp = netdev_priv(dev);
4018
4019 tp->hw_start(dev);
4020
4021 rtl_irq_enable_all(tp);
4022 }
4023
4024 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4025 void __iomem *ioaddr)
4026 {
4027 /*
4028 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4029 * register to be written before TxDescAddrLow to work.
4030 * Switching from MMIO to I/O access fixes the issue as well.
4031 */
4032 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4033 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4034 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4035 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4036 }
4037
4038 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4039 {
4040 u16 cmd;
4041
4042 cmd = RTL_R16(CPlusCmd);
4043 RTL_W16(CPlusCmd, cmd);
4044 return cmd;
4045 }
4046
4047 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4048 {
4049 /* Low hurts. Let's disable the filtering. */
4050 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4051 }
4052
4053 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4054 {
4055 static const struct rtl_cfg2_info {
4056 u32 mac_version;
4057 u32 clk;
4058 u32 val;
4059 } cfg2_info [] = {
4060 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4061 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4062 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4063 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4064 };
4065 const struct rtl_cfg2_info *p = cfg2_info;
4066 unsigned int i;
4067 u32 clk;
4068
4069 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4070 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4071 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4072 RTL_W32(0x7c, p->val);
4073 break;
4074 }
4075 }
4076 }
4077
4078 static void rtl_set_rx_mode(struct net_device *dev)
4079 {
4080 struct rtl8169_private *tp = netdev_priv(dev);
4081 void __iomem *ioaddr = tp->mmio_addr;
4082 u32 mc_filter[2]; /* Multicast hash filter */
4083 int rx_mode;
4084 u32 tmp = 0;
4085
4086 if (dev->flags & IFF_PROMISC) {
4087 /* Unconditionally log net taps. */
4088 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4089 rx_mode =
4090 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4091 AcceptAllPhys;
4092 mc_filter[1] = mc_filter[0] = 0xffffffff;
4093 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4094 (dev->flags & IFF_ALLMULTI)) {
4095 /* Too many to filter perfectly -- accept all multicasts. */
4096 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4097 mc_filter[1] = mc_filter[0] = 0xffffffff;
4098 } else {
4099 struct netdev_hw_addr *ha;
4100
4101 rx_mode = AcceptBroadcast | AcceptMyPhys;
4102 mc_filter[1] = mc_filter[0] = 0;
4103 netdev_for_each_mc_addr(ha, dev) {
4104 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4105 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4106 rx_mode |= AcceptMulticast;
4107 }
4108 }
4109
4110 if (dev->features & NETIF_F_RXALL)
4111 rx_mode |= (AcceptErr | AcceptRunt);
4112
4113 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4114
4115 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4116 u32 data = mc_filter[0];
4117
4118 mc_filter[0] = swab32(mc_filter[1]);
4119 mc_filter[1] = swab32(data);
4120 }
4121
4122 RTL_W32(MAR0 + 4, mc_filter[1]);
4123 RTL_W32(MAR0 + 0, mc_filter[0]);
4124
4125 RTL_W32(RxConfig, tmp);
4126 }
4127
4128 static void rtl_hw_start_8169(struct net_device *dev)
4129 {
4130 struct rtl8169_private *tp = netdev_priv(dev);
4131 void __iomem *ioaddr = tp->mmio_addr;
4132 struct pci_dev *pdev = tp->pci_dev;
4133
4134 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4135 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4136 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4137 }
4138
4139 RTL_W8(Cfg9346, Cfg9346_Unlock);
4140 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4141 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4142 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4143 tp->mac_version == RTL_GIGA_MAC_VER_04)
4144 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4145
4146 rtl_init_rxcfg(tp);
4147
4148 RTL_W8(EarlyTxThres, NoEarlyTx);
4149
4150 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4151
4152 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4153 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4154 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4155 tp->mac_version == RTL_GIGA_MAC_VER_04)
4156 rtl_set_rx_tx_config_registers(tp);
4157
4158 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4159
4160 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4161 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4162 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4163 "Bit-3 and bit-14 MUST be 1\n");
4164 tp->cp_cmd |= (1 << 14);
4165 }
4166
4167 RTL_W16(CPlusCmd, tp->cp_cmd);
4168
4169 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4170
4171 /*
4172 * Undocumented corner. Supposedly:
4173 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4174 */
4175 RTL_W16(IntrMitigate, 0x0000);
4176
4177 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4178
4179 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4180 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4181 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4182 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4183 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4184 rtl_set_rx_tx_config_registers(tp);
4185 }
4186
4187 RTL_W8(Cfg9346, Cfg9346_Lock);
4188
4189 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4190 RTL_R8(IntrMask);
4191
4192 RTL_W32(RxMissed, 0);
4193
4194 rtl_set_rx_mode(dev);
4195
4196 /* no early-rx interrupts */
4197 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4198 }
4199
4200 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4201 {
4202 if (tp->csi_ops.write)
4203 tp->csi_ops.write(tp->mmio_addr, addr, value);
4204 }
4205
4206 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4207 {
4208 if (tp->csi_ops.read)
4209 return tp->csi_ops.read(tp->mmio_addr, addr);
4210 else
4211 return ~0;
4212 }
4213
4214 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4215 {
4216 u32 csi;
4217
4218 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4219 rtl_csi_write(tp, 0x070c, csi | bits);
4220 }
4221
4222 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4223 {
4224 rtl_csi_access_enable(tp, 0x17000000);
4225 }
4226
4227 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4228 {
4229 rtl_csi_access_enable(tp, 0x27000000);
4230 }
4231
4232 static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
4233 {
4234 unsigned int i;
4235
4236 RTL_W32(CSIDR, value);
4237 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4238 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4239
4240 for (i = 0; i < 100; i++) {
4241 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4242 break;
4243 udelay(10);
4244 }
4245 }
4246
4247 static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
4248 {
4249 u32 value = ~0x00;
4250 unsigned int i;
4251
4252 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4253 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4254
4255 for (i = 0; i < 100; i++) {
4256 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4257 value = RTL_R32(CSIDR);
4258 break;
4259 }
4260 udelay(10);
4261 }
4262
4263 return value;
4264 }
4265
4266 static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4267 {
4268 struct csi_ops *ops = &tp->csi_ops;
4269
4270 switch (tp->mac_version) {
4271 case RTL_GIGA_MAC_VER_01:
4272 case RTL_GIGA_MAC_VER_02:
4273 case RTL_GIGA_MAC_VER_03:
4274 case RTL_GIGA_MAC_VER_04:
4275 case RTL_GIGA_MAC_VER_05:
4276 case RTL_GIGA_MAC_VER_06:
4277 case RTL_GIGA_MAC_VER_10:
4278 case RTL_GIGA_MAC_VER_11:
4279 case RTL_GIGA_MAC_VER_12:
4280 case RTL_GIGA_MAC_VER_13:
4281 case RTL_GIGA_MAC_VER_14:
4282 case RTL_GIGA_MAC_VER_15:
4283 case RTL_GIGA_MAC_VER_16:
4284 case RTL_GIGA_MAC_VER_17:
4285 ops->write = NULL;
4286 ops->read = NULL;
4287 break;
4288
4289 default:
4290 ops->write = r8169_csi_write;
4291 ops->read = r8169_csi_read;
4292 break;
4293 }
4294 }
4295
4296 struct ephy_info {
4297 unsigned int offset;
4298 u16 mask;
4299 u16 bits;
4300 };
4301
4302 static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
4303 {
4304 u16 w;
4305
4306 while (len-- > 0) {
4307 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
4308 rtl_ephy_write(ioaddr, e->offset, w);
4309 e++;
4310 }
4311 }
4312
4313 static void rtl_disable_clock_request(struct pci_dev *pdev)
4314 {
4315 int cap = pci_pcie_cap(pdev);
4316
4317 if (cap) {
4318 u16 ctl;
4319
4320 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4321 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4322 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4323 }
4324 }
4325
4326 static void rtl_enable_clock_request(struct pci_dev *pdev)
4327 {
4328 int cap = pci_pcie_cap(pdev);
4329
4330 if (cap) {
4331 u16 ctl;
4332
4333 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4334 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4335 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4336 }
4337 }
4338
4339 #define R8168_CPCMD_QUIRK_MASK (\
4340 EnableBist | \
4341 Mac_dbgo_oe | \
4342 Force_half_dup | \
4343 Force_rxflow_en | \
4344 Force_txflow_en | \
4345 Cxpl_dbg_sel | \
4346 ASF | \
4347 PktCntrDisable | \
4348 Mac_dbgo_sel)
4349
4350 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4351 {
4352 void __iomem *ioaddr = tp->mmio_addr;
4353 struct pci_dev *pdev = tp->pci_dev;
4354
4355 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4356
4357 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4358
4359 rtl_tx_performance_tweak(pdev,
4360 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4361 }
4362
4363 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4364 {
4365 void __iomem *ioaddr = tp->mmio_addr;
4366
4367 rtl_hw_start_8168bb(tp);
4368
4369 RTL_W8(MaxTxPacketSize, TxPacketMax);
4370
4371 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4372 }
4373
4374 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4375 {
4376 void __iomem *ioaddr = tp->mmio_addr;
4377 struct pci_dev *pdev = tp->pci_dev;
4378
4379 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4380
4381 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4382
4383 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4384
4385 rtl_disable_clock_request(pdev);
4386
4387 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4388 }
4389
4390 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4391 {
4392 void __iomem *ioaddr = tp->mmio_addr;
4393 static const struct ephy_info e_info_8168cp[] = {
4394 { 0x01, 0, 0x0001 },
4395 { 0x02, 0x0800, 0x1000 },
4396 { 0x03, 0, 0x0042 },
4397 { 0x06, 0x0080, 0x0000 },
4398 { 0x07, 0, 0x2000 }
4399 };
4400
4401 rtl_csi_access_enable_2(tp);
4402
4403 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4404
4405 __rtl_hw_start_8168cp(tp);
4406 }
4407
4408 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4409 {
4410 void __iomem *ioaddr = tp->mmio_addr;
4411 struct pci_dev *pdev = tp->pci_dev;
4412
4413 rtl_csi_access_enable_2(tp);
4414
4415 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4416
4417 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4418
4419 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4420 }
4421
4422 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4423 {
4424 void __iomem *ioaddr = tp->mmio_addr;
4425 struct pci_dev *pdev = tp->pci_dev;
4426
4427 rtl_csi_access_enable_2(tp);
4428
4429 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4430
4431 /* Magic. */
4432 RTL_W8(DBG_REG, 0x20);
4433
4434 RTL_W8(MaxTxPacketSize, TxPacketMax);
4435
4436 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4437
4438 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4439 }
4440
4441 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4442 {
4443 void __iomem *ioaddr = tp->mmio_addr;
4444 static const struct ephy_info e_info_8168c_1[] = {
4445 { 0x02, 0x0800, 0x1000 },
4446 { 0x03, 0, 0x0002 },
4447 { 0x06, 0x0080, 0x0000 }
4448 };
4449
4450 rtl_csi_access_enable_2(tp);
4451
4452 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4453
4454 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4455
4456 __rtl_hw_start_8168cp(tp);
4457 }
4458
4459 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4460 {
4461 void __iomem *ioaddr = tp->mmio_addr;
4462 static const struct ephy_info e_info_8168c_2[] = {
4463 { 0x01, 0, 0x0001 },
4464 { 0x03, 0x0400, 0x0220 }
4465 };
4466
4467 rtl_csi_access_enable_2(tp);
4468
4469 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4470
4471 __rtl_hw_start_8168cp(tp);
4472 }
4473
4474 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4475 {
4476 rtl_hw_start_8168c_2(tp);
4477 }
4478
4479 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4480 {
4481 rtl_csi_access_enable_2(tp);
4482
4483 __rtl_hw_start_8168cp(tp);
4484 }
4485
4486 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4487 {
4488 void __iomem *ioaddr = tp->mmio_addr;
4489 struct pci_dev *pdev = tp->pci_dev;
4490
4491 rtl_csi_access_enable_2(tp);
4492
4493 rtl_disable_clock_request(pdev);
4494
4495 RTL_W8(MaxTxPacketSize, TxPacketMax);
4496
4497 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4498
4499 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4500 }
4501
4502 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4503 {
4504 void __iomem *ioaddr = tp->mmio_addr;
4505 struct pci_dev *pdev = tp->pci_dev;
4506
4507 rtl_csi_access_enable_1(tp);
4508
4509 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4510
4511 RTL_W8(MaxTxPacketSize, TxPacketMax);
4512
4513 rtl_disable_clock_request(pdev);
4514 }
4515
4516 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4517 {
4518 void __iomem *ioaddr = tp->mmio_addr;
4519 struct pci_dev *pdev = tp->pci_dev;
4520 static const struct ephy_info e_info_8168d_4[] = {
4521 { 0x0b, ~0, 0x48 },
4522 { 0x19, 0x20, 0x50 },
4523 { 0x0c, ~0, 0x20 }
4524 };
4525 int i;
4526
4527 rtl_csi_access_enable_1(tp);
4528
4529 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4530
4531 RTL_W8(MaxTxPacketSize, TxPacketMax);
4532
4533 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4534 const struct ephy_info *e = e_info_8168d_4 + i;
4535 u16 w;
4536
4537 w = rtl_ephy_read(ioaddr, e->offset);
4538 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
4539 }
4540
4541 rtl_enable_clock_request(pdev);
4542 }
4543
4544 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4545 {
4546 void __iomem *ioaddr = tp->mmio_addr;
4547 struct pci_dev *pdev = tp->pci_dev;
4548 static const struct ephy_info e_info_8168e_1[] = {
4549 { 0x00, 0x0200, 0x0100 },
4550 { 0x00, 0x0000, 0x0004 },
4551 { 0x06, 0x0002, 0x0001 },
4552 { 0x06, 0x0000, 0x0030 },
4553 { 0x07, 0x0000, 0x2000 },
4554 { 0x00, 0x0000, 0x0020 },
4555 { 0x03, 0x5800, 0x2000 },
4556 { 0x03, 0x0000, 0x0001 },
4557 { 0x01, 0x0800, 0x1000 },
4558 { 0x07, 0x0000, 0x4000 },
4559 { 0x1e, 0x0000, 0x2000 },
4560 { 0x19, 0xffff, 0xfe6c },
4561 { 0x0a, 0x0000, 0x0040 }
4562 };
4563
4564 rtl_csi_access_enable_2(tp);
4565
4566 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4567
4568 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4569
4570 RTL_W8(MaxTxPacketSize, TxPacketMax);
4571
4572 rtl_disable_clock_request(pdev);
4573
4574 /* Reset tx FIFO pointer */
4575 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4576 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4577
4578 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4579 }
4580
4581 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4582 {
4583 void __iomem *ioaddr = tp->mmio_addr;
4584 struct pci_dev *pdev = tp->pci_dev;
4585 static const struct ephy_info e_info_8168e_2[] = {
4586 { 0x09, 0x0000, 0x0080 },
4587 { 0x19, 0x0000, 0x0224 }
4588 };
4589
4590 rtl_csi_access_enable_1(tp);
4591
4592 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4593
4594 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4595
4596 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4597 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4598 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4599 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4600 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4601 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4602 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4603 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4604 ERIAR_EXGMAC);
4605
4606 RTL_W8(MaxTxPacketSize, EarlySize);
4607
4608 rtl_disable_clock_request(pdev);
4609
4610 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4611 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4612
4613 /* Adjust EEE LED frequency */
4614 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4615
4616 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4617 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4618 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4619 }
4620
4621 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4622 {
4623 void __iomem *ioaddr = tp->mmio_addr;
4624 struct pci_dev *pdev = tp->pci_dev;
4625 static const struct ephy_info e_info_8168f_1[] = {
4626 { 0x06, 0x00c0, 0x0020 },
4627 { 0x08, 0x0001, 0x0002 },
4628 { 0x09, 0x0000, 0x0080 },
4629 { 0x19, 0x0000, 0x0224 }
4630 };
4631
4632 rtl_csi_access_enable_1(tp);
4633
4634 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4635
4636 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4637
4638 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4639 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4640 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4641 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4642 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4643 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4644 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4645 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4646 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4647 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4648 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4649 ERIAR_EXGMAC);
4650
4651 RTL_W8(MaxTxPacketSize, EarlySize);
4652
4653 rtl_disable_clock_request(pdev);
4654
4655 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4656 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4657
4658 /* Adjust EEE LED frequency */
4659 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4660
4661 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4662 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4663 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4664 }
4665
4666 static void rtl_hw_start_8168(struct net_device *dev)
4667 {
4668 struct rtl8169_private *tp = netdev_priv(dev);
4669 void __iomem *ioaddr = tp->mmio_addr;
4670
4671 RTL_W8(Cfg9346, Cfg9346_Unlock);
4672
4673 RTL_W8(MaxTxPacketSize, TxPacketMax);
4674
4675 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4676
4677 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
4678
4679 RTL_W16(CPlusCmd, tp->cp_cmd);
4680
4681 RTL_W16(IntrMitigate, 0x5151);
4682
4683 /* Work around for RxFIFO overflow. */
4684 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4685 tp->event_slow |= RxFIFOOver | PCSTimeout;
4686 tp->event_slow &= ~RxOverflow;
4687 }
4688
4689 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4690
4691 rtl_set_rx_mode(dev);
4692
4693 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4694 (InterFrameGap << TxInterFrameGapShift));
4695
4696 RTL_R8(IntrMask);
4697
4698 switch (tp->mac_version) {
4699 case RTL_GIGA_MAC_VER_11:
4700 rtl_hw_start_8168bb(tp);
4701 break;
4702
4703 case RTL_GIGA_MAC_VER_12:
4704 case RTL_GIGA_MAC_VER_17:
4705 rtl_hw_start_8168bef(tp);
4706 break;
4707
4708 case RTL_GIGA_MAC_VER_18:
4709 rtl_hw_start_8168cp_1(tp);
4710 break;
4711
4712 case RTL_GIGA_MAC_VER_19:
4713 rtl_hw_start_8168c_1(tp);
4714 break;
4715
4716 case RTL_GIGA_MAC_VER_20:
4717 rtl_hw_start_8168c_2(tp);
4718 break;
4719
4720 case RTL_GIGA_MAC_VER_21:
4721 rtl_hw_start_8168c_3(tp);
4722 break;
4723
4724 case RTL_GIGA_MAC_VER_22:
4725 rtl_hw_start_8168c_4(tp);
4726 break;
4727
4728 case RTL_GIGA_MAC_VER_23:
4729 rtl_hw_start_8168cp_2(tp);
4730 break;
4731
4732 case RTL_GIGA_MAC_VER_24:
4733 rtl_hw_start_8168cp_3(tp);
4734 break;
4735
4736 case RTL_GIGA_MAC_VER_25:
4737 case RTL_GIGA_MAC_VER_26:
4738 case RTL_GIGA_MAC_VER_27:
4739 rtl_hw_start_8168d(tp);
4740 break;
4741
4742 case RTL_GIGA_MAC_VER_28:
4743 rtl_hw_start_8168d_4(tp);
4744 break;
4745
4746 case RTL_GIGA_MAC_VER_31:
4747 rtl_hw_start_8168dp(tp);
4748 break;
4749
4750 case RTL_GIGA_MAC_VER_32:
4751 case RTL_GIGA_MAC_VER_33:
4752 rtl_hw_start_8168e_1(tp);
4753 break;
4754 case RTL_GIGA_MAC_VER_34:
4755 rtl_hw_start_8168e_2(tp);
4756 break;
4757
4758 case RTL_GIGA_MAC_VER_35:
4759 case RTL_GIGA_MAC_VER_36:
4760 rtl_hw_start_8168f_1(tp);
4761 break;
4762
4763 default:
4764 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
4765 dev->name, tp->mac_version);
4766 break;
4767 }
4768
4769 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4770
4771 RTL_W8(Cfg9346, Cfg9346_Lock);
4772
4773 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4774 }
4775
4776 #define R810X_CPCMD_QUIRK_MASK (\
4777 EnableBist | \
4778 Mac_dbgo_oe | \
4779 Force_half_dup | \
4780 Force_rxflow_en | \
4781 Force_txflow_en | \
4782 Cxpl_dbg_sel | \
4783 ASF | \
4784 PktCntrDisable | \
4785 Mac_dbgo_sel)
4786
4787 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
4788 {
4789 void __iomem *ioaddr = tp->mmio_addr;
4790 struct pci_dev *pdev = tp->pci_dev;
4791 static const struct ephy_info e_info_8102e_1[] = {
4792 { 0x01, 0, 0x6e65 },
4793 { 0x02, 0, 0x091f },
4794 { 0x03, 0, 0xc2f9 },
4795 { 0x06, 0, 0xafb5 },
4796 { 0x07, 0, 0x0e00 },
4797 { 0x19, 0, 0xec80 },
4798 { 0x01, 0, 0x2e65 },
4799 { 0x01, 0, 0x6e65 }
4800 };
4801 u8 cfg1;
4802
4803 rtl_csi_access_enable_2(tp);
4804
4805 RTL_W8(DBG_REG, FIX_NAK_1);
4806
4807 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4808
4809 RTL_W8(Config1,
4810 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
4811 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4812
4813 cfg1 = RTL_R8(Config1);
4814 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
4815 RTL_W8(Config1, cfg1 & ~LEDS0);
4816
4817 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
4818 }
4819
4820 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
4821 {
4822 void __iomem *ioaddr = tp->mmio_addr;
4823 struct pci_dev *pdev = tp->pci_dev;
4824
4825 rtl_csi_access_enable_2(tp);
4826
4827 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4828
4829 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
4830 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4831 }
4832
4833 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
4834 {
4835 rtl_hw_start_8102e_2(tp);
4836
4837 rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
4838 }
4839
4840 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
4841 {
4842 void __iomem *ioaddr = tp->mmio_addr;
4843 static const struct ephy_info e_info_8105e_1[] = {
4844 { 0x07, 0, 0x4000 },
4845 { 0x19, 0, 0x0200 },
4846 { 0x19, 0, 0x0020 },
4847 { 0x1e, 0, 0x2000 },
4848 { 0x03, 0, 0x0001 },
4849 { 0x19, 0, 0x0100 },
4850 { 0x19, 0, 0x0004 },
4851 { 0x0a, 0, 0x0020 }
4852 };
4853
4854 /* Force LAN exit from ASPM if Rx/Tx are not idle */
4855 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
4856
4857 /* Disable Early Tally Counter */
4858 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
4859
4860 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
4861 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4862
4863 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
4864 }
4865
4866 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
4867 {
4868 void __iomem *ioaddr = tp->mmio_addr;
4869
4870 rtl_hw_start_8105e_1(tp);
4871 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
4872 }
4873
4874 static void rtl_hw_start_8101(struct net_device *dev)
4875 {
4876 struct rtl8169_private *tp = netdev_priv(dev);
4877 void __iomem *ioaddr = tp->mmio_addr;
4878 struct pci_dev *pdev = tp->pci_dev;
4879
4880 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
4881 tp->event_slow &= ~RxFIFOOver;
4882
4883 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
4884 tp->mac_version == RTL_GIGA_MAC_VER_16) {
4885 int cap = pci_pcie_cap(pdev);
4886
4887 if (cap) {
4888 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
4889 PCI_EXP_DEVCTL_NOSNOOP_EN);
4890 }
4891 }
4892
4893 RTL_W8(Cfg9346, Cfg9346_Unlock);
4894
4895 switch (tp->mac_version) {
4896 case RTL_GIGA_MAC_VER_07:
4897 rtl_hw_start_8102e_1(tp);
4898 break;
4899
4900 case RTL_GIGA_MAC_VER_08:
4901 rtl_hw_start_8102e_3(tp);
4902 break;
4903
4904 case RTL_GIGA_MAC_VER_09:
4905 rtl_hw_start_8102e_2(tp);
4906 break;
4907
4908 case RTL_GIGA_MAC_VER_29:
4909 rtl_hw_start_8105e_1(tp);
4910 break;
4911 case RTL_GIGA_MAC_VER_30:
4912 rtl_hw_start_8105e_2(tp);
4913 break;
4914 }
4915
4916 RTL_W8(Cfg9346, Cfg9346_Lock);
4917
4918 RTL_W8(MaxTxPacketSize, TxPacketMax);
4919
4920 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4921
4922 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
4923 RTL_W16(CPlusCmd, tp->cp_cmd);
4924
4925 RTL_W16(IntrMitigate, 0x0000);
4926
4927 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4928
4929 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4930 rtl_set_rx_tx_config_registers(tp);
4931
4932 RTL_R8(IntrMask);
4933
4934 rtl_set_rx_mode(dev);
4935
4936 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
4937 }
4938
4939 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
4940 {
4941 struct rtl8169_private *tp = netdev_priv(dev);
4942
4943 if (new_mtu < ETH_ZLEN ||
4944 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
4945 return -EINVAL;
4946
4947 if (new_mtu > ETH_DATA_LEN)
4948 rtl_hw_jumbo_enable(tp);
4949 else
4950 rtl_hw_jumbo_disable(tp);
4951
4952 dev->mtu = new_mtu;
4953 netdev_update_features(dev);
4954
4955 return 0;
4956 }
4957
4958 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
4959 {
4960 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
4961 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
4962 }
4963
4964 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
4965 void **data_buff, struct RxDesc *desc)
4966 {
4967 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
4968 DMA_FROM_DEVICE);
4969
4970 kfree(*data_buff);
4971 *data_buff = NULL;
4972 rtl8169_make_unusable_by_asic(desc);
4973 }
4974
4975 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
4976 {
4977 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
4978
4979 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
4980 }
4981
4982 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
4983 u32 rx_buf_sz)
4984 {
4985 desc->addr = cpu_to_le64(mapping);
4986 wmb();
4987 rtl8169_mark_to_asic(desc, rx_buf_sz);
4988 }
4989
4990 static inline void *rtl8169_align(void *data)
4991 {
4992 return (void *)ALIGN((long)data, 16);
4993 }
4994
4995 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
4996 struct RxDesc *desc)
4997 {
4998 void *data;
4999 dma_addr_t mapping;
5000 struct device *d = &tp->pci_dev->dev;
5001 struct net_device *dev = tp->dev;
5002 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5003
5004 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5005 if (!data)
5006 return NULL;
5007
5008 if (rtl8169_align(data) != data) {
5009 kfree(data);
5010 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5011 if (!data)
5012 return NULL;
5013 }
5014
5015 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5016 DMA_FROM_DEVICE);
5017 if (unlikely(dma_mapping_error(d, mapping))) {
5018 if (net_ratelimit())
5019 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5020 goto err_out;
5021 }
5022
5023 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5024 return data;
5025
5026 err_out:
5027 kfree(data);
5028 return NULL;
5029 }
5030
5031 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5032 {
5033 unsigned int i;
5034
5035 for (i = 0; i < NUM_RX_DESC; i++) {
5036 if (tp->Rx_databuff[i]) {
5037 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5038 tp->RxDescArray + i);
5039 }
5040 }
5041 }
5042
5043 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5044 {
5045 desc->opts1 |= cpu_to_le32(RingEnd);
5046 }
5047
5048 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5049 {
5050 unsigned int i;
5051
5052 for (i = 0; i < NUM_RX_DESC; i++) {
5053 void *data;
5054
5055 if (tp->Rx_databuff[i])
5056 continue;
5057
5058 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5059 if (!data) {
5060 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5061 goto err_out;
5062 }
5063 tp->Rx_databuff[i] = data;
5064 }
5065
5066 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5067 return 0;
5068
5069 err_out:
5070 rtl8169_rx_clear(tp);
5071 return -ENOMEM;
5072 }
5073
5074 static int rtl8169_init_ring(struct net_device *dev)
5075 {
5076 struct rtl8169_private *tp = netdev_priv(dev);
5077
5078 rtl8169_init_ring_indexes(tp);
5079
5080 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5081 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5082
5083 return rtl8169_rx_fill(tp);
5084 }
5085
5086 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5087 struct TxDesc *desc)
5088 {
5089 unsigned int len = tx_skb->len;
5090
5091 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5092
5093 desc->opts1 = 0x00;
5094 desc->opts2 = 0x00;
5095 desc->addr = 0x00;
5096 tx_skb->len = 0;
5097 }
5098
5099 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5100 unsigned int n)
5101 {
5102 unsigned int i;
5103
5104 for (i = 0; i < n; i++) {
5105 unsigned int entry = (start + i) % NUM_TX_DESC;
5106 struct ring_info *tx_skb = tp->tx_skb + entry;
5107 unsigned int len = tx_skb->len;
5108
5109 if (len) {
5110 struct sk_buff *skb = tx_skb->skb;
5111
5112 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5113 tp->TxDescArray + entry);
5114 if (skb) {
5115 tp->dev->stats.tx_dropped++;
5116 dev_kfree_skb(skb);
5117 tx_skb->skb = NULL;
5118 }
5119 }
5120 }
5121 }
5122
5123 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5124 {
5125 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5126 tp->cur_tx = tp->dirty_tx = 0;
5127 netdev_reset_queue(tp->dev);
5128 }
5129
5130 static void rtl_reset_work(struct rtl8169_private *tp)
5131 {
5132 struct net_device *dev = tp->dev;
5133 int i;
5134
5135 napi_disable(&tp->napi);
5136 netif_stop_queue(dev);
5137 synchronize_sched();
5138
5139 rtl8169_hw_reset(tp);
5140
5141 for (i = 0; i < NUM_RX_DESC; i++)
5142 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5143
5144 rtl8169_tx_clear(tp);
5145 rtl8169_init_ring_indexes(tp);
5146
5147 napi_enable(&tp->napi);
5148 rtl_hw_start(dev);
5149 netif_wake_queue(dev);
5150 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5151 }
5152
5153 static void rtl8169_tx_timeout(struct net_device *dev)
5154 {
5155 struct rtl8169_private *tp = netdev_priv(dev);
5156
5157 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5158 }
5159
5160 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5161 u32 *opts)
5162 {
5163 struct skb_shared_info *info = skb_shinfo(skb);
5164 unsigned int cur_frag, entry;
5165 struct TxDesc * uninitialized_var(txd);
5166 struct device *d = &tp->pci_dev->dev;
5167
5168 entry = tp->cur_tx;
5169 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5170 const skb_frag_t *frag = info->frags + cur_frag;
5171 dma_addr_t mapping;
5172 u32 status, len;
5173 void *addr;
5174
5175 entry = (entry + 1) % NUM_TX_DESC;
5176
5177 txd = tp->TxDescArray + entry;
5178 len = skb_frag_size(frag);
5179 addr = skb_frag_address(frag);
5180 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5181 if (unlikely(dma_mapping_error(d, mapping))) {
5182 if (net_ratelimit())
5183 netif_err(tp, drv, tp->dev,
5184 "Failed to map TX fragments DMA!\n");
5185 goto err_out;
5186 }
5187
5188 /* Anti gcc 2.95.3 bugware (sic) */
5189 status = opts[0] | len |
5190 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5191
5192 txd->opts1 = cpu_to_le32(status);
5193 txd->opts2 = cpu_to_le32(opts[1]);
5194 txd->addr = cpu_to_le64(mapping);
5195
5196 tp->tx_skb[entry].len = len;
5197 }
5198
5199 if (cur_frag) {
5200 tp->tx_skb[entry].skb = skb;
5201 txd->opts1 |= cpu_to_le32(LastFrag);
5202 }
5203
5204 return cur_frag;
5205
5206 err_out:
5207 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5208 return -EIO;
5209 }
5210
5211 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5212 struct sk_buff *skb, u32 *opts)
5213 {
5214 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5215 u32 mss = skb_shinfo(skb)->gso_size;
5216 int offset = info->opts_offset;
5217
5218 if (mss) {
5219 opts[0] |= TD_LSO;
5220 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5221 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5222 const struct iphdr *ip = ip_hdr(skb);
5223
5224 if (ip->protocol == IPPROTO_TCP)
5225 opts[offset] |= info->checksum.tcp;
5226 else if (ip->protocol == IPPROTO_UDP)
5227 opts[offset] |= info->checksum.udp;
5228 else
5229 WARN_ON_ONCE(1);
5230 }
5231 }
5232
5233 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5234 struct net_device *dev)
5235 {
5236 struct rtl8169_private *tp = netdev_priv(dev);
5237 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5238 struct TxDesc *txd = tp->TxDescArray + entry;
5239 void __iomem *ioaddr = tp->mmio_addr;
5240 struct device *d = &tp->pci_dev->dev;
5241 dma_addr_t mapping;
5242 u32 status, len;
5243 u32 opts[2];
5244 int frags;
5245
5246 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
5247 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5248 goto err_stop_0;
5249 }
5250
5251 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5252 goto err_stop_0;
5253
5254 len = skb_headlen(skb);
5255 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5256 if (unlikely(dma_mapping_error(d, mapping))) {
5257 if (net_ratelimit())
5258 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5259 goto err_dma_0;
5260 }
5261
5262 tp->tx_skb[entry].len = len;
5263 txd->addr = cpu_to_le64(mapping);
5264
5265 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5266 opts[0] = DescOwn;
5267
5268 rtl8169_tso_csum(tp, skb, opts);
5269
5270 frags = rtl8169_xmit_frags(tp, skb, opts);
5271 if (frags < 0)
5272 goto err_dma_1;
5273 else if (frags)
5274 opts[0] |= FirstFrag;
5275 else {
5276 opts[0] |= FirstFrag | LastFrag;
5277 tp->tx_skb[entry].skb = skb;
5278 }
5279
5280 txd->opts2 = cpu_to_le32(opts[1]);
5281
5282 netdev_sent_queue(dev, skb->len);
5283
5284 skb_tx_timestamp(skb);
5285
5286 wmb();
5287
5288 /* Anti gcc 2.95.3 bugware (sic) */
5289 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5290 txd->opts1 = cpu_to_le32(status);
5291
5292 tp->cur_tx += frags + 1;
5293
5294 wmb();
5295
5296 RTL_W8(TxPoll, NPQ);
5297
5298 mmiowb();
5299
5300 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
5301 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5302 * not miss a ring update when it notices a stopped queue.
5303 */
5304 smp_wmb();
5305 netif_stop_queue(dev);
5306 /* Sync with rtl_tx:
5307 * - publish queue status and cur_tx ring index (write barrier)
5308 * - refresh dirty_tx ring index (read barrier).
5309 * May the current thread have a pessimistic view of the ring
5310 * status and forget to wake up queue, a racing rtl_tx thread
5311 * can't.
5312 */
5313 smp_mb();
5314 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
5315 netif_wake_queue(dev);
5316 }
5317
5318 return NETDEV_TX_OK;
5319
5320 err_dma_1:
5321 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5322 err_dma_0:
5323 dev_kfree_skb(skb);
5324 dev->stats.tx_dropped++;
5325 return NETDEV_TX_OK;
5326
5327 err_stop_0:
5328 netif_stop_queue(dev);
5329 dev->stats.tx_dropped++;
5330 return NETDEV_TX_BUSY;
5331 }
5332
5333 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5334 {
5335 struct rtl8169_private *tp = netdev_priv(dev);
5336 struct pci_dev *pdev = tp->pci_dev;
5337 u16 pci_status, pci_cmd;
5338
5339 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5340 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5341
5342 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5343 pci_cmd, pci_status);
5344
5345 /*
5346 * The recovery sequence below admits a very elaborated explanation:
5347 * - it seems to work;
5348 * - I did not see what else could be done;
5349 * - it makes iop3xx happy.
5350 *
5351 * Feel free to adjust to your needs.
5352 */
5353 if (pdev->broken_parity_status)
5354 pci_cmd &= ~PCI_COMMAND_PARITY;
5355 else
5356 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5357
5358 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5359
5360 pci_write_config_word(pdev, PCI_STATUS,
5361 pci_status & (PCI_STATUS_DETECTED_PARITY |
5362 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5363 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5364
5365 /* The infamous DAC f*ckup only happens at boot time */
5366 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5367 void __iomem *ioaddr = tp->mmio_addr;
5368
5369 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5370 tp->cp_cmd &= ~PCIDAC;
5371 RTL_W16(CPlusCmd, tp->cp_cmd);
5372 dev->features &= ~NETIF_F_HIGHDMA;
5373 }
5374
5375 rtl8169_hw_reset(tp);
5376
5377 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5378 }
5379
5380 struct rtl_txc {
5381 int packets;
5382 int bytes;
5383 };
5384
5385 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5386 {
5387 struct rtl8169_stats *tx_stats = &tp->tx_stats;
5388 unsigned int dirty_tx, tx_left;
5389 struct rtl_txc txc = { 0, 0 };
5390
5391 dirty_tx = tp->dirty_tx;
5392 smp_rmb();
5393 tx_left = tp->cur_tx - dirty_tx;
5394
5395 while (tx_left > 0) {
5396 unsigned int entry = dirty_tx % NUM_TX_DESC;
5397 struct ring_info *tx_skb = tp->tx_skb + entry;
5398 u32 status;
5399
5400 rmb();
5401 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5402 if (status & DescOwn)
5403 break;
5404
5405 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5406 tp->TxDescArray + entry);
5407 if (status & LastFrag) {
5408 struct sk_buff *skb = tx_skb->skb;
5409
5410 txc.packets++;
5411 txc.bytes += skb->len;
5412 dev_kfree_skb(skb);
5413 tx_skb->skb = NULL;
5414 }
5415 dirty_tx++;
5416 tx_left--;
5417 }
5418
5419 u64_stats_update_begin(&tx_stats->syncp);
5420 tx_stats->packets += txc.packets;
5421 tx_stats->bytes += txc.bytes;
5422 u64_stats_update_end(&tx_stats->syncp);
5423
5424 netdev_completed_queue(dev, txc.packets, txc.bytes);
5425
5426 if (tp->dirty_tx != dirty_tx) {
5427 tp->dirty_tx = dirty_tx;
5428 /* Sync with rtl8169_start_xmit:
5429 * - publish dirty_tx ring index (write barrier)
5430 * - refresh cur_tx ring index and queue status (read barrier)
5431 * May the current thread miss the stopped queue condition,
5432 * a racing xmit thread can only have a right view of the
5433 * ring status.
5434 */
5435 smp_mb();
5436 if (netif_queue_stopped(dev) &&
5437 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
5438 netif_wake_queue(dev);
5439 }
5440 /*
5441 * 8168 hack: TxPoll requests are lost when the Tx packets are
5442 * too close. Let's kick an extra TxPoll request when a burst
5443 * of start_xmit activity is detected (if it is not detected,
5444 * it is slow enough). -- FR
5445 */
5446 if (tp->cur_tx != dirty_tx) {
5447 void __iomem *ioaddr = tp->mmio_addr;
5448
5449 RTL_W8(TxPoll, NPQ);
5450 }
5451 }
5452 }
5453
5454 static inline int rtl8169_fragmented_frame(u32 status)
5455 {
5456 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5457 }
5458
5459 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5460 {
5461 u32 status = opts1 & RxProtoMask;
5462
5463 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5464 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5465 skb->ip_summed = CHECKSUM_UNNECESSARY;
5466 else
5467 skb_checksum_none_assert(skb);
5468 }
5469
5470 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5471 struct rtl8169_private *tp,
5472 int pkt_size,
5473 dma_addr_t addr)
5474 {
5475 struct sk_buff *skb;
5476 struct device *d = &tp->pci_dev->dev;
5477
5478 data = rtl8169_align(data);
5479 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5480 prefetch(data);
5481 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5482 if (skb)
5483 memcpy(skb->data, data, pkt_size);
5484 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5485
5486 return skb;
5487 }
5488
5489 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5490 {
5491 unsigned int cur_rx, rx_left;
5492 unsigned int count;
5493
5494 cur_rx = tp->cur_rx;
5495 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
5496 rx_left = min(rx_left, budget);
5497
5498 for (; rx_left > 0; rx_left--, cur_rx++) {
5499 unsigned int entry = cur_rx % NUM_RX_DESC;
5500 struct RxDesc *desc = tp->RxDescArray + entry;
5501 u32 status;
5502
5503 rmb();
5504 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5505
5506 if (status & DescOwn)
5507 break;
5508 if (unlikely(status & RxRES)) {
5509 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5510 status);
5511 dev->stats.rx_errors++;
5512 if (status & (RxRWT | RxRUNT))
5513 dev->stats.rx_length_errors++;
5514 if (status & RxCRC)
5515 dev->stats.rx_crc_errors++;
5516 if (status & RxFOVF) {
5517 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5518 dev->stats.rx_fifo_errors++;
5519 }
5520 if ((status & (RxRUNT | RxCRC)) &&
5521 !(status & (RxRWT | RxFOVF)) &&
5522 (dev->features & NETIF_F_RXALL))
5523 goto process_pkt;
5524
5525 rtl8169_mark_to_asic(desc, rx_buf_sz);
5526 } else {
5527 struct sk_buff *skb;
5528 dma_addr_t addr;
5529 int pkt_size;
5530
5531 process_pkt:
5532 addr = le64_to_cpu(desc->addr);
5533 if (likely(!(dev->features & NETIF_F_RXFCS)))
5534 pkt_size = (status & 0x00003fff) - 4;
5535 else
5536 pkt_size = status & 0x00003fff;
5537
5538 /*
5539 * The driver does not support incoming fragmented
5540 * frames. They are seen as a symptom of over-mtu
5541 * sized frames.
5542 */
5543 if (unlikely(rtl8169_fragmented_frame(status))) {
5544 dev->stats.rx_dropped++;
5545 dev->stats.rx_length_errors++;
5546 rtl8169_mark_to_asic(desc, rx_buf_sz);
5547 continue;
5548 }
5549
5550 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
5551 tp, pkt_size, addr);
5552 rtl8169_mark_to_asic(desc, rx_buf_sz);
5553 if (!skb) {
5554 dev->stats.rx_dropped++;
5555 continue;
5556 }
5557
5558 rtl8169_rx_csum(skb, status);
5559 skb_put(skb, pkt_size);
5560 skb->protocol = eth_type_trans(skb, dev);
5561
5562 rtl8169_rx_vlan_tag(desc, skb);
5563
5564 napi_gro_receive(&tp->napi, skb);
5565
5566 u64_stats_update_begin(&tp->rx_stats.syncp);
5567 tp->rx_stats.packets++;
5568 tp->rx_stats.bytes += pkt_size;
5569 u64_stats_update_end(&tp->rx_stats.syncp);
5570 }
5571
5572 /* Work around for AMD plateform. */
5573 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
5574 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
5575 desc->opts2 = 0;
5576 cur_rx++;
5577 }
5578 }
5579
5580 count = cur_rx - tp->cur_rx;
5581 tp->cur_rx = cur_rx;
5582
5583 tp->dirty_rx += count;
5584
5585 return count;
5586 }
5587
5588 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5589 {
5590 struct net_device *dev = dev_instance;
5591 struct rtl8169_private *tp = netdev_priv(dev);
5592 int handled = 0;
5593 u16 status;
5594
5595 status = rtl_get_events(tp);
5596 if (status && status != 0xffff) {
5597 status &= RTL_EVENT_NAPI | tp->event_slow;
5598 if (status) {
5599 handled = 1;
5600
5601 rtl_irq_disable(tp);
5602 napi_schedule(&tp->napi);
5603 }
5604 }
5605 return IRQ_RETVAL(handled);
5606 }
5607
5608 /*
5609 * Workqueue context.
5610 */
5611 static void rtl_slow_event_work(struct rtl8169_private *tp)
5612 {
5613 struct net_device *dev = tp->dev;
5614 u16 status;
5615
5616 status = rtl_get_events(tp) & tp->event_slow;
5617 rtl_ack_events(tp, status);
5618
5619 if (unlikely(status & RxFIFOOver)) {
5620 switch (tp->mac_version) {
5621 /* Work around for rx fifo overflow */
5622 case RTL_GIGA_MAC_VER_11:
5623 netif_stop_queue(dev);
5624 /* XXX - Hack alert. See rtl_task(). */
5625 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
5626 default:
5627 break;
5628 }
5629 }
5630
5631 if (unlikely(status & SYSErr))
5632 rtl8169_pcierr_interrupt(dev);
5633
5634 if (status & LinkChg)
5635 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5636
5637 napi_disable(&tp->napi);
5638 rtl_irq_disable(tp);
5639
5640 napi_enable(&tp->napi);
5641 napi_schedule(&tp->napi);
5642 }
5643
5644 static void rtl_task(struct work_struct *work)
5645 {
5646 static const struct {
5647 int bitnr;
5648 void (*action)(struct rtl8169_private *);
5649 } rtl_work[] = {
5650 /* XXX - keep rtl_slow_event_work() as first element. */
5651 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
5652 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
5653 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
5654 };
5655 struct rtl8169_private *tp =
5656 container_of(work, struct rtl8169_private, wk.work);
5657 struct net_device *dev = tp->dev;
5658 int i;
5659
5660 rtl_lock_work(tp);
5661
5662 if (!netif_running(dev) ||
5663 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
5664 goto out_unlock;
5665
5666 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5667 bool pending;
5668
5669 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
5670 if (pending)
5671 rtl_work[i].action(tp);
5672 }
5673
5674 out_unlock:
5675 rtl_unlock_work(tp);
5676 }
5677
5678 static int rtl8169_poll(struct napi_struct *napi, int budget)
5679 {
5680 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5681 struct net_device *dev = tp->dev;
5682 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
5683 int work_done= 0;
5684 u16 status;
5685
5686 status = rtl_get_events(tp);
5687 rtl_ack_events(tp, status & ~tp->event_slow);
5688
5689 if (status & RTL_EVENT_NAPI_RX)
5690 work_done = rtl_rx(dev, tp, (u32) budget);
5691
5692 if (status & RTL_EVENT_NAPI_TX)
5693 rtl_tx(dev, tp);
5694
5695 if (status & tp->event_slow) {
5696 enable_mask &= ~tp->event_slow;
5697
5698 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
5699 }
5700
5701 if (work_done < budget) {
5702 napi_complete(napi);
5703
5704 rtl_irq_enable(tp, enable_mask);
5705 mmiowb();
5706 }
5707
5708 return work_done;
5709 }
5710
5711 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
5712 {
5713 struct rtl8169_private *tp = netdev_priv(dev);
5714
5715 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
5716 return;
5717
5718 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
5719 RTL_W32(RxMissed, 0);
5720 }
5721
5722 static void rtl8169_down(struct net_device *dev)
5723 {
5724 struct rtl8169_private *tp = netdev_priv(dev);
5725 void __iomem *ioaddr = tp->mmio_addr;
5726
5727 del_timer_sync(&tp->timer);
5728
5729 napi_disable(&tp->napi);
5730 netif_stop_queue(dev);
5731
5732 rtl8169_hw_reset(tp);
5733 /*
5734 * At this point device interrupts can not be enabled in any function,
5735 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
5736 * and napi is disabled (rtl8169_poll).
5737 */
5738 rtl8169_rx_missed(dev, ioaddr);
5739
5740 /* Give a racing hard_start_xmit a few cycles to complete. */
5741 synchronize_sched();
5742
5743 rtl8169_tx_clear(tp);
5744
5745 rtl8169_rx_clear(tp);
5746
5747 rtl_pll_power_down(tp);
5748 }
5749
5750 static int rtl8169_close(struct net_device *dev)
5751 {
5752 struct rtl8169_private *tp = netdev_priv(dev);
5753 struct pci_dev *pdev = tp->pci_dev;
5754
5755 pm_runtime_get_sync(&pdev->dev);
5756
5757 /* Update counters before going down */
5758 rtl8169_update_counters(dev);
5759
5760 rtl_lock_work(tp);
5761 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5762
5763 rtl8169_down(dev);
5764 rtl_unlock_work(tp);
5765
5766 free_irq(pdev->irq, dev);
5767
5768 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5769 tp->RxPhyAddr);
5770 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5771 tp->TxPhyAddr);
5772 tp->TxDescArray = NULL;
5773 tp->RxDescArray = NULL;
5774
5775 pm_runtime_put_sync(&pdev->dev);
5776
5777 return 0;
5778 }
5779
5780 #ifdef CONFIG_NET_POLL_CONTROLLER
5781 static void rtl8169_netpoll(struct net_device *dev)
5782 {
5783 struct rtl8169_private *tp = netdev_priv(dev);
5784
5785 rtl8169_interrupt(tp->pci_dev->irq, dev);
5786 }
5787 #endif
5788
5789 static int rtl_open(struct net_device *dev)
5790 {
5791 struct rtl8169_private *tp = netdev_priv(dev);
5792 void __iomem *ioaddr = tp->mmio_addr;
5793 struct pci_dev *pdev = tp->pci_dev;
5794 int retval = -ENOMEM;
5795
5796 pm_runtime_get_sync(&pdev->dev);
5797
5798 /*
5799 * Rx and Tx desscriptors needs 256 bytes alignment.
5800 * dma_alloc_coherent provides more.
5801 */
5802 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
5803 &tp->TxPhyAddr, GFP_KERNEL);
5804 if (!tp->TxDescArray)
5805 goto err_pm_runtime_put;
5806
5807 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
5808 &tp->RxPhyAddr, GFP_KERNEL);
5809 if (!tp->RxDescArray)
5810 goto err_free_tx_0;
5811
5812 retval = rtl8169_init_ring(dev);
5813 if (retval < 0)
5814 goto err_free_rx_1;
5815
5816 INIT_WORK(&tp->wk.work, rtl_task);
5817
5818 smp_mb();
5819
5820 rtl_request_firmware(tp);
5821
5822 retval = request_irq(pdev->irq, rtl8169_interrupt,
5823 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
5824 dev->name, dev);
5825 if (retval < 0)
5826 goto err_release_fw_2;
5827
5828 rtl_lock_work(tp);
5829
5830 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5831
5832 napi_enable(&tp->napi);
5833
5834 rtl8169_init_phy(dev, tp);
5835
5836 __rtl8169_set_features(dev, dev->features);
5837
5838 rtl_pll_power_up(tp);
5839
5840 rtl_hw_start(dev);
5841
5842 netif_start_queue(dev);
5843
5844 rtl_unlock_work(tp);
5845
5846 tp->saved_wolopts = 0;
5847 pm_runtime_put_noidle(&pdev->dev);
5848
5849 rtl8169_check_link_status(dev, tp, ioaddr);
5850 out:
5851 return retval;
5852
5853 err_release_fw_2:
5854 rtl_release_firmware(tp);
5855 rtl8169_rx_clear(tp);
5856 err_free_rx_1:
5857 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5858 tp->RxPhyAddr);
5859 tp->RxDescArray = NULL;
5860 err_free_tx_0:
5861 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5862 tp->TxPhyAddr);
5863 tp->TxDescArray = NULL;
5864 err_pm_runtime_put:
5865 pm_runtime_put_noidle(&pdev->dev);
5866 goto out;
5867 }
5868
5869 static struct rtnl_link_stats64 *
5870 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5871 {
5872 struct rtl8169_private *tp = netdev_priv(dev);
5873 void __iomem *ioaddr = tp->mmio_addr;
5874 unsigned int start;
5875
5876 if (netif_running(dev))
5877 rtl8169_rx_missed(dev, ioaddr);
5878
5879 do {
5880 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
5881 stats->rx_packets = tp->rx_stats.packets;
5882 stats->rx_bytes = tp->rx_stats.bytes;
5883 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
5884
5885
5886 do {
5887 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
5888 stats->tx_packets = tp->tx_stats.packets;
5889 stats->tx_bytes = tp->tx_stats.bytes;
5890 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
5891
5892 stats->rx_dropped = dev->stats.rx_dropped;
5893 stats->tx_dropped = dev->stats.tx_dropped;
5894 stats->rx_length_errors = dev->stats.rx_length_errors;
5895 stats->rx_errors = dev->stats.rx_errors;
5896 stats->rx_crc_errors = dev->stats.rx_crc_errors;
5897 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
5898 stats->rx_missed_errors = dev->stats.rx_missed_errors;
5899
5900 return stats;
5901 }
5902
5903 static void rtl8169_net_suspend(struct net_device *dev)
5904 {
5905 struct rtl8169_private *tp = netdev_priv(dev);
5906
5907 if (!netif_running(dev))
5908 return;
5909
5910 netif_device_detach(dev);
5911 netif_stop_queue(dev);
5912
5913 rtl_lock_work(tp);
5914 napi_disable(&tp->napi);
5915 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5916 rtl_unlock_work(tp);
5917
5918 rtl_pll_power_down(tp);
5919 }
5920
5921 #ifdef CONFIG_PM
5922
5923 static int rtl8169_suspend(struct device *device)
5924 {
5925 struct pci_dev *pdev = to_pci_dev(device);
5926 struct net_device *dev = pci_get_drvdata(pdev);
5927
5928 rtl8169_net_suspend(dev);
5929
5930 return 0;
5931 }
5932
5933 static void __rtl8169_resume(struct net_device *dev)
5934 {
5935 struct rtl8169_private *tp = netdev_priv(dev);
5936
5937 netif_device_attach(dev);
5938
5939 rtl_pll_power_up(tp);
5940
5941 rtl_lock_work(tp);
5942 napi_enable(&tp->napi);
5943 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5944 rtl_unlock_work(tp);
5945
5946 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5947 }
5948
5949 static int rtl8169_resume(struct device *device)
5950 {
5951 struct pci_dev *pdev = to_pci_dev(device);
5952 struct net_device *dev = pci_get_drvdata(pdev);
5953 struct rtl8169_private *tp = netdev_priv(dev);
5954
5955 rtl8169_init_phy(dev, tp);
5956
5957 if (netif_running(dev))
5958 __rtl8169_resume(dev);
5959
5960 return 0;
5961 }
5962
5963 static int rtl8169_runtime_suspend(struct device *device)
5964 {
5965 struct pci_dev *pdev = to_pci_dev(device);
5966 struct net_device *dev = pci_get_drvdata(pdev);
5967 struct rtl8169_private *tp = netdev_priv(dev);
5968
5969 if (!tp->TxDescArray)
5970 return 0;
5971
5972 rtl_lock_work(tp);
5973 tp->saved_wolopts = __rtl8169_get_wol(tp);
5974 __rtl8169_set_wol(tp, WAKE_ANY);
5975 rtl_unlock_work(tp);
5976
5977 rtl8169_net_suspend(dev);
5978
5979 return 0;
5980 }
5981
5982 static int rtl8169_runtime_resume(struct device *device)
5983 {
5984 struct pci_dev *pdev = to_pci_dev(device);
5985 struct net_device *dev = pci_get_drvdata(pdev);
5986 struct rtl8169_private *tp = netdev_priv(dev);
5987
5988 if (!tp->TxDescArray)
5989 return 0;
5990
5991 rtl_lock_work(tp);
5992 __rtl8169_set_wol(tp, tp->saved_wolopts);
5993 tp->saved_wolopts = 0;
5994 rtl_unlock_work(tp);
5995
5996 rtl8169_init_phy(dev, tp);
5997
5998 __rtl8169_resume(dev);
5999
6000 return 0;
6001 }
6002
6003 static int rtl8169_runtime_idle(struct device *device)
6004 {
6005 struct pci_dev *pdev = to_pci_dev(device);
6006 struct net_device *dev = pci_get_drvdata(pdev);
6007 struct rtl8169_private *tp = netdev_priv(dev);
6008
6009 return tp->TxDescArray ? -EBUSY : 0;
6010 }
6011
6012 static const struct dev_pm_ops rtl8169_pm_ops = {
6013 .suspend = rtl8169_suspend,
6014 .resume = rtl8169_resume,
6015 .freeze = rtl8169_suspend,
6016 .thaw = rtl8169_resume,
6017 .poweroff = rtl8169_suspend,
6018 .restore = rtl8169_resume,
6019 .runtime_suspend = rtl8169_runtime_suspend,
6020 .runtime_resume = rtl8169_runtime_resume,
6021 .runtime_idle = rtl8169_runtime_idle,
6022 };
6023
6024 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6025
6026 #else /* !CONFIG_PM */
6027
6028 #define RTL8169_PM_OPS NULL
6029
6030 #endif /* !CONFIG_PM */
6031
6032 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6033 {
6034 void __iomem *ioaddr = tp->mmio_addr;
6035
6036 /* WoL fails with 8168b when the receiver is disabled. */
6037 switch (tp->mac_version) {
6038 case RTL_GIGA_MAC_VER_11:
6039 case RTL_GIGA_MAC_VER_12:
6040 case RTL_GIGA_MAC_VER_17:
6041 pci_clear_master(tp->pci_dev);
6042
6043 RTL_W8(ChipCmd, CmdRxEnb);
6044 /* PCI commit */
6045 RTL_R8(ChipCmd);
6046 break;
6047 default:
6048 break;
6049 }
6050 }
6051
6052 static void rtl_shutdown(struct pci_dev *pdev)
6053 {
6054 struct net_device *dev = pci_get_drvdata(pdev);
6055 struct rtl8169_private *tp = netdev_priv(dev);
6056 struct device *d = &pdev->dev;
6057
6058 pm_runtime_get_sync(d);
6059
6060 rtl8169_net_suspend(dev);
6061
6062 /* Restore original MAC address */
6063 rtl_rar_set(tp, dev->perm_addr);
6064
6065 rtl8169_hw_reset(tp);
6066
6067 if (system_state == SYSTEM_POWER_OFF) {
6068 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6069 rtl_wol_suspend_quirk(tp);
6070 rtl_wol_shutdown_quirk(tp);
6071 }
6072
6073 pci_wake_from_d3(pdev, true);
6074 pci_set_power_state(pdev, PCI_D3hot);
6075 }
6076
6077 pm_runtime_put_noidle(d);
6078 }
6079
6080 static void __devexit rtl_remove_one(struct pci_dev *pdev)
6081 {
6082 struct net_device *dev = pci_get_drvdata(pdev);
6083 struct rtl8169_private *tp = netdev_priv(dev);
6084
6085 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6086 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6087 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6088 rtl8168_driver_stop(tp);
6089 }
6090
6091 cancel_work_sync(&tp->wk.work);
6092
6093 unregister_netdev(dev);
6094
6095 rtl_release_firmware(tp);
6096
6097 if (pci_dev_run_wake(pdev))
6098 pm_runtime_get_noresume(&pdev->dev);
6099
6100 /* restore original MAC address */
6101 rtl_rar_set(tp, dev->perm_addr);
6102
6103 rtl_disable_msi(pdev, tp);
6104 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6105 pci_set_drvdata(pdev, NULL);
6106 }
6107
6108 static const struct net_device_ops rtl_netdev_ops = {
6109 .ndo_open = rtl_open,
6110 .ndo_stop = rtl8169_close,
6111 .ndo_get_stats64 = rtl8169_get_stats64,
6112 .ndo_start_xmit = rtl8169_start_xmit,
6113 .ndo_tx_timeout = rtl8169_tx_timeout,
6114 .ndo_validate_addr = eth_validate_addr,
6115 .ndo_change_mtu = rtl8169_change_mtu,
6116 .ndo_fix_features = rtl8169_fix_features,
6117 .ndo_set_features = rtl8169_set_features,
6118 .ndo_set_mac_address = rtl_set_mac_address,
6119 .ndo_do_ioctl = rtl8169_ioctl,
6120 .ndo_set_rx_mode = rtl_set_rx_mode,
6121 #ifdef CONFIG_NET_POLL_CONTROLLER
6122 .ndo_poll_controller = rtl8169_netpoll,
6123 #endif
6124
6125 };
6126
6127 static const struct rtl_cfg_info {
6128 void (*hw_start)(struct net_device *);
6129 unsigned int region;
6130 unsigned int align;
6131 u16 event_slow;
6132 unsigned features;
6133 u8 default_ver;
6134 } rtl_cfg_infos [] = {
6135 [RTL_CFG_0] = {
6136 .hw_start = rtl_hw_start_8169,
6137 .region = 1,
6138 .align = 0,
6139 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6140 .features = RTL_FEATURE_GMII,
6141 .default_ver = RTL_GIGA_MAC_VER_01,
6142 },
6143 [RTL_CFG_1] = {
6144 .hw_start = rtl_hw_start_8168,
6145 .region = 2,
6146 .align = 8,
6147 .event_slow = SYSErr | LinkChg | RxOverflow,
6148 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6149 .default_ver = RTL_GIGA_MAC_VER_11,
6150 },
6151 [RTL_CFG_2] = {
6152 .hw_start = rtl_hw_start_8101,
6153 .region = 2,
6154 .align = 8,
6155 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6156 PCSTimeout,
6157 .features = RTL_FEATURE_MSI,
6158 .default_ver = RTL_GIGA_MAC_VER_13,
6159 }
6160 };
6161
6162 /* Cfg9346_Unlock assumed. */
6163 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6164 const struct rtl_cfg_info *cfg)
6165 {
6166 void __iomem *ioaddr = tp->mmio_addr;
6167 unsigned msi = 0;
6168 u8 cfg2;
6169
6170 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6171 if (cfg->features & RTL_FEATURE_MSI) {
6172 if (pci_enable_msi(tp->pci_dev)) {
6173 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6174 } else {
6175 cfg2 |= MSIEnable;
6176 msi = RTL_FEATURE_MSI;
6177 }
6178 }
6179 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6180 RTL_W8(Config2, cfg2);
6181 return msi;
6182 }
6183
6184 static int __devinit
6185 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6186 {
6187 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6188 const unsigned int region = cfg->region;
6189 struct rtl8169_private *tp;
6190 struct mii_if_info *mii;
6191 struct net_device *dev;
6192 void __iomem *ioaddr;
6193 int chipset, i;
6194 int rc;
6195
6196 if (netif_msg_drv(&debug)) {
6197 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6198 MODULENAME, RTL8169_VERSION);
6199 }
6200
6201 dev = alloc_etherdev(sizeof (*tp));
6202 if (!dev) {
6203 rc = -ENOMEM;
6204 goto out;
6205 }
6206
6207 SET_NETDEV_DEV(dev, &pdev->dev);
6208 dev->netdev_ops = &rtl_netdev_ops;
6209 tp = netdev_priv(dev);
6210 tp->dev = dev;
6211 tp->pci_dev = pdev;
6212 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6213
6214 mii = &tp->mii;
6215 mii->dev = dev;
6216 mii->mdio_read = rtl_mdio_read;
6217 mii->mdio_write = rtl_mdio_write;
6218 mii->phy_id_mask = 0x1f;
6219 mii->reg_num_mask = 0x1f;
6220 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6221
6222 /* disable ASPM completely as that cause random device stop working
6223 * problems as well as full system hangs for some PCIe devices users */
6224 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6225 PCIE_LINK_STATE_CLKPM);
6226
6227 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6228 rc = pci_enable_device(pdev);
6229 if (rc < 0) {
6230 netif_err(tp, probe, dev, "enable failure\n");
6231 goto err_out_free_dev_1;
6232 }
6233
6234 if (pci_set_mwi(pdev) < 0)
6235 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6236
6237 /* make sure PCI base addr 1 is MMIO */
6238 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6239 netif_err(tp, probe, dev,
6240 "region #%d not an MMIO resource, aborting\n",
6241 region);
6242 rc = -ENODEV;
6243 goto err_out_mwi_2;
6244 }
6245
6246 /* check for weird/broken PCI region reporting */
6247 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6248 netif_err(tp, probe, dev,
6249 "Invalid PCI region size(s), aborting\n");
6250 rc = -ENODEV;
6251 goto err_out_mwi_2;
6252 }
6253
6254 rc = pci_request_regions(pdev, MODULENAME);
6255 if (rc < 0) {
6256 netif_err(tp, probe, dev, "could not request regions\n");
6257 goto err_out_mwi_2;
6258 }
6259
6260 tp->cp_cmd = RxChkSum;
6261
6262 if ((sizeof(dma_addr_t) > 4) &&
6263 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6264 tp->cp_cmd |= PCIDAC;
6265 dev->features |= NETIF_F_HIGHDMA;
6266 } else {
6267 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6268 if (rc < 0) {
6269 netif_err(tp, probe, dev, "DMA configuration failed\n");
6270 goto err_out_free_res_3;
6271 }
6272 }
6273
6274 /* ioremap MMIO region */
6275 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6276 if (!ioaddr) {
6277 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6278 rc = -EIO;
6279 goto err_out_free_res_3;
6280 }
6281 tp->mmio_addr = ioaddr;
6282
6283 if (!pci_is_pcie(pdev))
6284 netif_info(tp, probe, dev, "not PCI Express\n");
6285
6286 /* Identify chip attached to board */
6287 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6288
6289 rtl_init_rxcfg(tp);
6290
6291 rtl_irq_disable(tp);
6292
6293 rtl_hw_reset(tp);
6294
6295 rtl_ack_events(tp, 0xffff);
6296
6297 pci_set_master(pdev);
6298
6299 /*
6300 * Pretend we are using VLANs; This bypasses a nasty bug where
6301 * Interrupts stop flowing on high load on 8110SCd controllers.
6302 */
6303 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6304 tp->cp_cmd |= RxVlan;
6305
6306 rtl_init_mdio_ops(tp);
6307 rtl_init_pll_power_ops(tp);
6308 rtl_init_jumbo_ops(tp);
6309 rtl_init_csi_ops(tp);
6310
6311 rtl8169_print_mac_version(tp);
6312
6313 chipset = tp->mac_version;
6314 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6315
6316 RTL_W8(Cfg9346, Cfg9346_Unlock);
6317 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6318 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6319 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6320 tp->features |= RTL_FEATURE_WOL;
6321 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6322 tp->features |= RTL_FEATURE_WOL;
6323 tp->features |= rtl_try_msi(tp, cfg);
6324 RTL_W8(Cfg9346, Cfg9346_Lock);
6325
6326 if (rtl_tbi_enabled(tp)) {
6327 tp->set_speed = rtl8169_set_speed_tbi;
6328 tp->get_settings = rtl8169_gset_tbi;
6329 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6330 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6331 tp->link_ok = rtl8169_tbi_link_ok;
6332 tp->do_ioctl = rtl_tbi_ioctl;
6333 } else {
6334 tp->set_speed = rtl8169_set_speed_xmii;
6335 tp->get_settings = rtl8169_gset_xmii;
6336 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6337 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6338 tp->link_ok = rtl8169_xmii_link_ok;
6339 tp->do_ioctl = rtl_xmii_ioctl;
6340 }
6341
6342 mutex_init(&tp->wk.mutex);
6343
6344 /* Get MAC address */
6345 for (i = 0; i < ETH_ALEN; i++)
6346 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6347 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6348
6349 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6350 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6351
6352 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6353
6354 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6355 * properly for all devices */
6356 dev->features |= NETIF_F_RXCSUM |
6357 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6358
6359 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6360 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6361 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6362 NETIF_F_HIGHDMA;
6363
6364 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6365 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6366 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6367
6368 dev->hw_features |= NETIF_F_RXALL;
6369 dev->hw_features |= NETIF_F_RXFCS;
6370
6371 tp->hw_start = cfg->hw_start;
6372 tp->event_slow = cfg->event_slow;
6373
6374 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6375 ~(RxBOVF | RxFOVF) : ~0;
6376
6377 init_timer(&tp->timer);
6378 tp->timer.data = (unsigned long) dev;
6379 tp->timer.function = rtl8169_phy_timer;
6380
6381 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6382
6383 rc = register_netdev(dev);
6384 if (rc < 0)
6385 goto err_out_msi_4;
6386
6387 pci_set_drvdata(pdev, dev);
6388
6389 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6390 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6391 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6392 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6393 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6394 "tx checksumming: %s]\n",
6395 rtl_chip_infos[chipset].jumbo_max,
6396 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6397 }
6398
6399 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6400 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6401 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6402 rtl8168_driver_start(tp);
6403 }
6404
6405 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6406
6407 if (pci_dev_run_wake(pdev))
6408 pm_runtime_put_noidle(&pdev->dev);
6409
6410 netif_carrier_off(dev);
6411
6412 out:
6413 return rc;
6414
6415 err_out_msi_4:
6416 rtl_disable_msi(pdev, tp);
6417 iounmap(ioaddr);
6418 err_out_free_res_3:
6419 pci_release_regions(pdev);
6420 err_out_mwi_2:
6421 pci_clear_mwi(pdev);
6422 pci_disable_device(pdev);
6423 err_out_free_dev_1:
6424 free_netdev(dev);
6425 goto out;
6426 }
6427
6428 static struct pci_driver rtl8169_pci_driver = {
6429 .name = MODULENAME,
6430 .id_table = rtl8169_pci_tbl,
6431 .probe = rtl_init_one,
6432 .remove = __devexit_p(rtl_remove_one),
6433 .shutdown = rtl_shutdown,
6434 .driver.pm = RTL8169_PM_OPS,
6435 };
6436
6437 static int __init rtl8169_init_module(void)
6438 {
6439 return pci_register_driver(&rtl8169_pci_driver);
6440 }
6441
6442 static void __exit rtl8169_cleanup_module(void)
6443 {
6444 pci_unregister_driver(&rtl8169_pci_driver);
6445 }
6446
6447 module_init(rtl8169_init_module);
6448 module_exit(rtl8169_cleanup_module);
This page took 0.35479 seconds and 5 git commands to generate.