Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / net / ethernet / realtek / r8169.c
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
31
32 #include <asm/io.h>
33 #include <asm/irq.h>
34
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
38
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
51
52 #ifdef RTL8169_DEBUG
53 #define assert(expr) \
54 if (!(expr)) { \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
57 }
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
60 #else
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
64
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
67
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
70
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
74
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
78
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
82
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
89
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
92
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
100
101 enum mac_version {
102 RTL_GIGA_MAC_VER_01 = 0,
103 RTL_GIGA_MAC_VER_02,
104 RTL_GIGA_MAC_VER_03,
105 RTL_GIGA_MAC_VER_04,
106 RTL_GIGA_MAC_VER_05,
107 RTL_GIGA_MAC_VER_06,
108 RTL_GIGA_MAC_VER_07,
109 RTL_GIGA_MAC_VER_08,
110 RTL_GIGA_MAC_VER_09,
111 RTL_GIGA_MAC_VER_10,
112 RTL_GIGA_MAC_VER_11,
113 RTL_GIGA_MAC_VER_12,
114 RTL_GIGA_MAC_VER_13,
115 RTL_GIGA_MAC_VER_14,
116 RTL_GIGA_MAC_VER_15,
117 RTL_GIGA_MAC_VER_16,
118 RTL_GIGA_MAC_VER_17,
119 RTL_GIGA_MAC_VER_18,
120 RTL_GIGA_MAC_VER_19,
121 RTL_GIGA_MAC_VER_20,
122 RTL_GIGA_MAC_VER_21,
123 RTL_GIGA_MAC_VER_22,
124 RTL_GIGA_MAC_VER_23,
125 RTL_GIGA_MAC_VER_24,
126 RTL_GIGA_MAC_VER_25,
127 RTL_GIGA_MAC_VER_26,
128 RTL_GIGA_MAC_VER_27,
129 RTL_GIGA_MAC_VER_28,
130 RTL_GIGA_MAC_VER_29,
131 RTL_GIGA_MAC_VER_30,
132 RTL_GIGA_MAC_VER_31,
133 RTL_GIGA_MAC_VER_32,
134 RTL_GIGA_MAC_VER_33,
135 RTL_GIGA_MAC_VER_34,
136 RTL_GIGA_MAC_VER_35,
137 RTL_GIGA_MAC_VER_36,
138 RTL_GIGA_MAC_VER_37,
139 RTL_GIGA_MAC_VER_38,
140 RTL_GIGA_MAC_VER_39,
141 RTL_GIGA_MAC_VER_40,
142 RTL_GIGA_MAC_VER_41,
143 RTL_GIGA_MAC_NONE = 0xff,
144 };
145
146 enum rtl_tx_desc_version {
147 RTL_TD_0 = 0,
148 RTL_TD_1 = 1,
149 };
150
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
156
157 #define _R(NAME,TD,FW,SZ,B) { \
158 .name = NAME, \
159 .txd_version = TD, \
160 .fw_name = FW, \
161 .jumbo_max = SZ, \
162 .jumbo_tx_csum = B \
163 }
164
165 static const struct {
166 const char *name;
167 enum rtl_tx_desc_version txd_version;
168 const char *fw_name;
169 u16 jumbo_max;
170 bool jumbo_tx_csum;
171 } rtl_chip_infos[] = {
172 /* PCI devices. */
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
185 /* PCI-E devices. */
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
224 JUMBO_9K, false),
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
227 JUMBO_9K, false),
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
234 JUMBO_1K, true),
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
237 JUMBO_1K, true),
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
242 JUMBO_9K, false),
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
245 JUMBO_9K, false),
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
248 JUMBO_9K, false),
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
254 JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
257 JUMBO_1K, true),
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
260 JUMBO_9K, false),
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
263 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
269 };
270 #undef _R
271
272 enum cfg_version {
273 RTL_CFG_0 = 0x00,
274 RTL_CFG_1,
275 RTL_CFG_2
276 };
277
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
292 { 0x0001, 0x8168,
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
294 {0,},
295 };
296
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
298
299 static int rx_buf_sz = 16383;
300 static int use_dac;
301 static struct {
302 u32 msg_enable;
303 } debug = { -1 };
304
305 enum rtl_registers {
306 MAC0 = 0, /* Ethernet hardware address. */
307 MAC4 = 4,
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
315 FLASH = 0x30,
316 ERSR = 0x36,
317 ChipCmd = 0x37,
318 TxPoll = 0x38,
319 IntrMask = 0x3c,
320 IntrStatus = 0x3e,
321
322 TxConfig = 0x40,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
325
326 RxConfig = 0x44,
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
335
336 RxMissed = 0x4c,
337 Cfg9346 = 0x50,
338 Config0 = 0x51,
339 Config1 = 0x52,
340 Config2 = 0x53,
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
342
343 Config3 = 0x54,
344 Config4 = 0x55,
345 Config5 = 0x56,
346 MultiIntr = 0x5c,
347 PHYAR = 0x60,
348 PHYstatus = 0x6c,
349 RxMaxSize = 0xda,
350 CPlusCmd = 0xe0,
351 IntrMitigate = 0xe2,
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
355
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
357
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
359
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
362
363 FuncEvent = 0xf0,
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
367 };
368
369 enum rtl8110_registers {
370 TBICSR = 0x64,
371 TBI_ANAR = 0x68,
372 TBI_LPAR = 0x6a,
373 };
374
375 enum rtl8168_8101_registers {
376 CSIDR = 0x64,
377 CSIAR = 0x68,
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
386 PMCH = 0x6f,
387 EPHYAR = 0x80,
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
393 DLLPR = 0xd0,
394 #define PFM_EN (1 << 6)
395 DBG_REG = 0xd1,
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
398 TWSI = 0xd2,
399 MCU = 0xd3,
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
407 EFUSEAR = 0xdc,
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
414 };
415
416 enum rtl8168_registers {
417 LED_FREQ = 0x1a,
418 EEE_LED = 0x1b,
419 ERIDR = 0x70,
420 ERIAR = 0x74,
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
441 OCPAR = 0xb4,
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
445 GPHY_OCP = 0xb8,
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
453 #define FORCE_CLK (1 << 15) /* force clock request */
454 };
455
456 enum rtl_register_content {
457 /* InterruptStatusBits */
458 SYSErr = 0x8000,
459 PCSTimeout = 0x4000,
460 SWInt = 0x0100,
461 TxDescUnavail = 0x0080,
462 RxFIFOOver = 0x0040,
463 LinkChg = 0x0020,
464 RxOverflow = 0x0010,
465 TxErr = 0x0008,
466 TxOK = 0x0004,
467 RxErr = 0x0002,
468 RxOK = 0x0001,
469
470 /* RxStatusDesc */
471 RxBOVF = (1 << 24),
472 RxFOVF = (1 << 23),
473 RxRWT = (1 << 22),
474 RxRES = (1 << 21),
475 RxRUNT = (1 << 20),
476 RxCRC = (1 << 19),
477
478 /* ChipCmdBits */
479 StopReq = 0x80,
480 CmdReset = 0x10,
481 CmdRxEnb = 0x08,
482 CmdTxEnb = 0x04,
483 RxBufEmpty = 0x01,
484
485 /* TXPoll register p.5 */
486 HPQ = 0x80, /* Poll cmd on the high prio queue */
487 NPQ = 0x40, /* Poll cmd on the low prio queue */
488 FSWInt = 0x01, /* Forced software interrupt */
489
490 /* Cfg9346Bits */
491 Cfg9346_Lock = 0x00,
492 Cfg9346_Unlock = 0xc0,
493
494 /* rx_mode_bits */
495 AcceptErr = 0x20,
496 AcceptRunt = 0x10,
497 AcceptBroadcast = 0x08,
498 AcceptMulticast = 0x04,
499 AcceptMyPhys = 0x02,
500 AcceptAllPhys = 0x01,
501 #define RX_CONFIG_ACCEPT_MASK 0x3f
502
503 /* TxConfigBits */
504 TxInterFrameGapShift = 24,
505 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
506
507 /* Config1 register p.24 */
508 LEDS1 = (1 << 7),
509 LEDS0 = (1 << 6),
510 Speed_down = (1 << 4),
511 MEMMAP = (1 << 3),
512 IOMAP = (1 << 2),
513 VPD = (1 << 1),
514 PMEnable = (1 << 0), /* Power Management Enable */
515
516 /* Config2 register p. 25 */
517 ClkReqEn = (1 << 7), /* Clock Request Enable */
518 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
519 PCI_Clock_66MHz = 0x01,
520 PCI_Clock_33MHz = 0x00,
521
522 /* Config3 register p.25 */
523 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
524 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
525 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
526 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
527
528 /* Config4 register */
529 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
530
531 /* Config5 register p.27 */
532 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
533 MWF = (1 << 5), /* Accept Multicast wakeup frame */
534 UWF = (1 << 4), /* Accept Unicast wakeup frame */
535 Spi_en = (1 << 3),
536 LanWake = (1 << 1), /* LanWake enable/disable */
537 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 ASPM_en = (1 << 0), /* ASPM enable */
539
540 /* TBICSR p.28 */
541 TBIReset = 0x80000000,
542 TBILoopback = 0x40000000,
543 TBINwEnable = 0x20000000,
544 TBINwRestart = 0x10000000,
545 TBILinkOk = 0x02000000,
546 TBINwComplete = 0x01000000,
547
548 /* CPlusCmd p.31 */
549 EnableBist = (1 << 15), // 8168 8101
550 Mac_dbgo_oe = (1 << 14), // 8168 8101
551 Normal_mode = (1 << 13), // unused
552 Force_half_dup = (1 << 12), // 8168 8101
553 Force_rxflow_en = (1 << 11), // 8168 8101
554 Force_txflow_en = (1 << 10), // 8168 8101
555 Cxpl_dbg_sel = (1 << 9), // 8168 8101
556 ASF = (1 << 8), // 8168 8101
557 PktCntrDisable = (1 << 7), // 8168 8101
558 Mac_dbgo_sel = 0x001c, // 8168
559 RxVlan = (1 << 6),
560 RxChkSum = (1 << 5),
561 PCIDAC = (1 << 4),
562 PCIMulRW = (1 << 3),
563 INTT_0 = 0x0000, // 8168
564 INTT_1 = 0x0001, // 8168
565 INTT_2 = 0x0002, // 8168
566 INTT_3 = 0x0003, // 8168
567
568 /* rtl8169_PHYstatus */
569 TBI_Enable = 0x80,
570 TxFlowCtrl = 0x40,
571 RxFlowCtrl = 0x20,
572 _1000bpsF = 0x10,
573 _100bps = 0x08,
574 _10bps = 0x04,
575 LinkStatus = 0x02,
576 FullDup = 0x01,
577
578 /* _TBICSRBit */
579 TBILinkOK = 0x02000000,
580
581 /* DumpCounterCommand */
582 CounterDump = 0x8,
583 };
584
585 enum rtl_desc_bit {
586 /* First doubleword. */
587 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
588 RingEnd = (1 << 30), /* End of descriptor ring */
589 FirstFrag = (1 << 29), /* First segment of a packet */
590 LastFrag = (1 << 28), /* Final segment of a packet */
591 };
592
593 /* Generic case. */
594 enum rtl_tx_desc_bit {
595 /* First doubleword. */
596 TD_LSO = (1 << 27), /* Large Send Offload */
597 #define TD_MSS_MAX 0x07ffu /* MSS value */
598
599 /* Second doubleword. */
600 TxVlanTag = (1 << 17), /* Add VLAN tag */
601 };
602
603 /* 8169, 8168b and 810x except 8102e. */
604 enum rtl_tx_desc_bit_0 {
605 /* First doubleword. */
606 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
607 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
608 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
609 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
610 };
611
612 /* 8102e, 8168c and beyond. */
613 enum rtl_tx_desc_bit_1 {
614 /* Second doubleword. */
615 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
616 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
617 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
618 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
619 };
620
621 static const struct rtl_tx_desc_info {
622 struct {
623 u32 udp;
624 u32 tcp;
625 } checksum;
626 u16 mss_shift;
627 u16 opts_offset;
628 } tx_desc_info [] = {
629 [RTL_TD_0] = {
630 .checksum = {
631 .udp = TD0_IP_CS | TD0_UDP_CS,
632 .tcp = TD0_IP_CS | TD0_TCP_CS
633 },
634 .mss_shift = TD0_MSS_SHIFT,
635 .opts_offset = 0
636 },
637 [RTL_TD_1] = {
638 .checksum = {
639 .udp = TD1_IP_CS | TD1_UDP_CS,
640 .tcp = TD1_IP_CS | TD1_TCP_CS
641 },
642 .mss_shift = TD1_MSS_SHIFT,
643 .opts_offset = 1
644 }
645 };
646
647 enum rtl_rx_desc_bit {
648 /* Rx private */
649 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
650 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
651
652 #define RxProtoUDP (PID1)
653 #define RxProtoTCP (PID0)
654 #define RxProtoIP (PID1 | PID0)
655 #define RxProtoMask RxProtoIP
656
657 IPFail = (1 << 16), /* IP checksum failed */
658 UDPFail = (1 << 15), /* UDP/IP checksum failed */
659 TCPFail = (1 << 14), /* TCP/IP checksum failed */
660 RxVlanTag = (1 << 16), /* VLAN tag available */
661 };
662
663 #define RsvdMask 0x3fffc000
664
665 struct TxDesc {
666 __le32 opts1;
667 __le32 opts2;
668 __le64 addr;
669 };
670
671 struct RxDesc {
672 __le32 opts1;
673 __le32 opts2;
674 __le64 addr;
675 };
676
677 struct ring_info {
678 struct sk_buff *skb;
679 u32 len;
680 u8 __pad[sizeof(void *) - sizeof(u32)];
681 };
682
683 enum features {
684 RTL_FEATURE_WOL = (1 << 0),
685 RTL_FEATURE_MSI = (1 << 1),
686 RTL_FEATURE_GMII = (1 << 2),
687 RTL_FEATURE_FW_LOADED = (1 << 3),
688 };
689
690 struct rtl8169_counters {
691 __le64 tx_packets;
692 __le64 rx_packets;
693 __le64 tx_errors;
694 __le32 rx_errors;
695 __le16 rx_missed;
696 __le16 align_errors;
697 __le32 tx_one_collision;
698 __le32 tx_multi_collision;
699 __le64 rx_unicast;
700 __le64 rx_broadcast;
701 __le32 rx_multicast;
702 __le16 tx_aborted;
703 __le16 tx_underun;
704 };
705
706 enum rtl_flag {
707 RTL_FLAG_TASK_ENABLED,
708 RTL_FLAG_TASK_SLOW_PENDING,
709 RTL_FLAG_TASK_RESET_PENDING,
710 RTL_FLAG_TASK_PHY_PENDING,
711 RTL_FLAG_MAX
712 };
713
714 struct rtl8169_stats {
715 u64 packets;
716 u64 bytes;
717 struct u64_stats_sync syncp;
718 };
719
720 struct rtl8169_private {
721 void __iomem *mmio_addr; /* memory map physical address */
722 struct pci_dev *pci_dev;
723 struct net_device *dev;
724 struct napi_struct napi;
725 u32 msg_enable;
726 u16 txd_version;
727 u16 mac_version;
728 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
729 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
730 u32 dirty_tx;
731 struct rtl8169_stats rx_stats;
732 struct rtl8169_stats tx_stats;
733 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
734 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
735 dma_addr_t TxPhyAddr;
736 dma_addr_t RxPhyAddr;
737 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
738 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
739 struct timer_list timer;
740 u16 cp_cmd;
741
742 u16 event_slow;
743
744 struct mdio_ops {
745 void (*write)(struct rtl8169_private *, int, int);
746 int (*read)(struct rtl8169_private *, int);
747 } mdio_ops;
748
749 struct pll_power_ops {
750 void (*down)(struct rtl8169_private *);
751 void (*up)(struct rtl8169_private *);
752 } pll_power_ops;
753
754 struct jumbo_ops {
755 void (*enable)(struct rtl8169_private *);
756 void (*disable)(struct rtl8169_private *);
757 } jumbo_ops;
758
759 struct csi_ops {
760 void (*write)(struct rtl8169_private *, int, int);
761 u32 (*read)(struct rtl8169_private *, int);
762 } csi_ops;
763
764 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
765 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
766 void (*phy_reset_enable)(struct rtl8169_private *tp);
767 void (*hw_start)(struct net_device *);
768 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
769 unsigned int (*link_ok)(void __iomem *);
770 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
771
772 struct {
773 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
774 struct mutex mutex;
775 struct work_struct work;
776 } wk;
777
778 unsigned features;
779
780 struct mii_if_info mii;
781 struct rtl8169_counters counters;
782 u32 saved_wolopts;
783 u32 opts1_mask;
784
785 struct rtl_fw {
786 const struct firmware *fw;
787
788 #define RTL_VER_SIZE 32
789
790 char version[RTL_VER_SIZE];
791
792 struct rtl_fw_phy_action {
793 __le32 *code;
794 size_t size;
795 } phy_action;
796 } *rtl_fw;
797 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
798
799 u32 ocp_base;
800 };
801
802 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
803 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
804 module_param(use_dac, int, 0);
805 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
806 module_param_named(debug, debug.msg_enable, int, 0);
807 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
808 MODULE_LICENSE("GPL");
809 MODULE_VERSION(RTL8169_VERSION);
810 MODULE_FIRMWARE(FIRMWARE_8168D_1);
811 MODULE_FIRMWARE(FIRMWARE_8168D_2);
812 MODULE_FIRMWARE(FIRMWARE_8168E_1);
813 MODULE_FIRMWARE(FIRMWARE_8168E_2);
814 MODULE_FIRMWARE(FIRMWARE_8168E_3);
815 MODULE_FIRMWARE(FIRMWARE_8105E_1);
816 MODULE_FIRMWARE(FIRMWARE_8168F_1);
817 MODULE_FIRMWARE(FIRMWARE_8168F_2);
818 MODULE_FIRMWARE(FIRMWARE_8402_1);
819 MODULE_FIRMWARE(FIRMWARE_8411_1);
820 MODULE_FIRMWARE(FIRMWARE_8106E_1);
821 MODULE_FIRMWARE(FIRMWARE_8168G_1);
822
823 static void rtl_lock_work(struct rtl8169_private *tp)
824 {
825 mutex_lock(&tp->wk.mutex);
826 }
827
828 static void rtl_unlock_work(struct rtl8169_private *tp)
829 {
830 mutex_unlock(&tp->wk.mutex);
831 }
832
833 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
834 {
835 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
836 PCI_EXP_DEVCTL_READRQ, force);
837 }
838
839 struct rtl_cond {
840 bool (*check)(struct rtl8169_private *);
841 const char *msg;
842 };
843
844 static void rtl_udelay(unsigned int d)
845 {
846 udelay(d);
847 }
848
849 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
850 void (*delay)(unsigned int), unsigned int d, int n,
851 bool high)
852 {
853 int i;
854
855 for (i = 0; i < n; i++) {
856 delay(d);
857 if (c->check(tp) == high)
858 return true;
859 }
860 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
861 c->msg, !high, n, d);
862 return false;
863 }
864
865 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
866 const struct rtl_cond *c,
867 unsigned int d, int n)
868 {
869 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
870 }
871
872 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
873 const struct rtl_cond *c,
874 unsigned int d, int n)
875 {
876 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
877 }
878
879 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
880 const struct rtl_cond *c,
881 unsigned int d, int n)
882 {
883 return rtl_loop_wait(tp, c, msleep, d, n, true);
884 }
885
886 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
887 const struct rtl_cond *c,
888 unsigned int d, int n)
889 {
890 return rtl_loop_wait(tp, c, msleep, d, n, false);
891 }
892
893 #define DECLARE_RTL_COND(name) \
894 static bool name ## _check(struct rtl8169_private *); \
895 \
896 static const struct rtl_cond name = { \
897 .check = name ## _check, \
898 .msg = #name \
899 }; \
900 \
901 static bool name ## _check(struct rtl8169_private *tp)
902
903 DECLARE_RTL_COND(rtl_ocpar_cond)
904 {
905 void __iomem *ioaddr = tp->mmio_addr;
906
907 return RTL_R32(OCPAR) & OCPAR_FLAG;
908 }
909
910 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
911 {
912 void __iomem *ioaddr = tp->mmio_addr;
913
914 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
915
916 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
917 RTL_R32(OCPDR) : ~0;
918 }
919
920 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
921 {
922 void __iomem *ioaddr = tp->mmio_addr;
923
924 RTL_W32(OCPDR, data);
925 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
926
927 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
928 }
929
930 DECLARE_RTL_COND(rtl_eriar_cond)
931 {
932 void __iomem *ioaddr = tp->mmio_addr;
933
934 return RTL_R32(ERIAR) & ERIAR_FLAG;
935 }
936
937 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
938 {
939 void __iomem *ioaddr = tp->mmio_addr;
940
941 RTL_W8(ERIDR, cmd);
942 RTL_W32(ERIAR, 0x800010e8);
943 msleep(2);
944
945 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
946 return;
947
948 ocp_write(tp, 0x1, 0x30, 0x00000001);
949 }
950
951 #define OOB_CMD_RESET 0x00
952 #define OOB_CMD_DRIVER_START 0x05
953 #define OOB_CMD_DRIVER_STOP 0x06
954
955 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
956 {
957 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
958 }
959
960 DECLARE_RTL_COND(rtl_ocp_read_cond)
961 {
962 u16 reg;
963
964 reg = rtl8168_get_ocp_reg(tp);
965
966 return ocp_read(tp, 0x0f, reg) & 0x00000800;
967 }
968
969 static void rtl8168_driver_start(struct rtl8169_private *tp)
970 {
971 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
972
973 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
974 }
975
976 static void rtl8168_driver_stop(struct rtl8169_private *tp)
977 {
978 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
979
980 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
981 }
982
983 static int r8168dp_check_dash(struct rtl8169_private *tp)
984 {
985 u16 reg = rtl8168_get_ocp_reg(tp);
986
987 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
988 }
989
990 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
991 {
992 if (reg & 0xffff0001) {
993 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
994 return true;
995 }
996 return false;
997 }
998
999 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1000 {
1001 void __iomem *ioaddr = tp->mmio_addr;
1002
1003 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1004 }
1005
1006 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1007 {
1008 void __iomem *ioaddr = tp->mmio_addr;
1009
1010 if (rtl_ocp_reg_failure(tp, reg))
1011 return;
1012
1013 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1014
1015 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1016 }
1017
1018 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1019 {
1020 void __iomem *ioaddr = tp->mmio_addr;
1021
1022 if (rtl_ocp_reg_failure(tp, reg))
1023 return 0;
1024
1025 RTL_W32(GPHY_OCP, reg << 15);
1026
1027 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1028 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1029 }
1030
1031 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1032 {
1033 int val;
1034
1035 val = r8168_phy_ocp_read(tp, reg);
1036 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1037 }
1038
1039 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1040 {
1041 void __iomem *ioaddr = tp->mmio_addr;
1042
1043 if (rtl_ocp_reg_failure(tp, reg))
1044 return;
1045
1046 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1047 }
1048
1049 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1050 {
1051 void __iomem *ioaddr = tp->mmio_addr;
1052
1053 if (rtl_ocp_reg_failure(tp, reg))
1054 return 0;
1055
1056 RTL_W32(OCPDR, reg << 15);
1057
1058 return RTL_R32(OCPDR);
1059 }
1060
1061 #define OCP_STD_PHY_BASE 0xa400
1062
1063 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1064 {
1065 if (reg == 0x1f) {
1066 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1067 return;
1068 }
1069
1070 if (tp->ocp_base != OCP_STD_PHY_BASE)
1071 reg -= 0x10;
1072
1073 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1074 }
1075
1076 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1077 {
1078 if (tp->ocp_base != OCP_STD_PHY_BASE)
1079 reg -= 0x10;
1080
1081 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1082 }
1083
1084 DECLARE_RTL_COND(rtl_phyar_cond)
1085 {
1086 void __iomem *ioaddr = tp->mmio_addr;
1087
1088 return RTL_R32(PHYAR) & 0x80000000;
1089 }
1090
1091 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1092 {
1093 void __iomem *ioaddr = tp->mmio_addr;
1094
1095 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1096
1097 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1098 /*
1099 * According to hardware specs a 20us delay is required after write
1100 * complete indication, but before sending next command.
1101 */
1102 udelay(20);
1103 }
1104
1105 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1106 {
1107 void __iomem *ioaddr = tp->mmio_addr;
1108 int value;
1109
1110 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1111
1112 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1113 RTL_R32(PHYAR) & 0xffff : ~0;
1114
1115 /*
1116 * According to hardware specs a 20us delay is required after read
1117 * complete indication, but before sending next command.
1118 */
1119 udelay(20);
1120
1121 return value;
1122 }
1123
1124 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1125 {
1126 void __iomem *ioaddr = tp->mmio_addr;
1127
1128 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1129 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1130 RTL_W32(EPHY_RXER_NUM, 0);
1131
1132 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1133 }
1134
1135 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1136 {
1137 r8168dp_1_mdio_access(tp, reg,
1138 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1139 }
1140
1141 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1142 {
1143 void __iomem *ioaddr = tp->mmio_addr;
1144
1145 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1146
1147 mdelay(1);
1148 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1149 RTL_W32(EPHY_RXER_NUM, 0);
1150
1151 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1152 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1153 }
1154
1155 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1156
1157 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1158 {
1159 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1160 }
1161
1162 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1163 {
1164 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1165 }
1166
1167 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1168 {
1169 void __iomem *ioaddr = tp->mmio_addr;
1170
1171 r8168dp_2_mdio_start(ioaddr);
1172
1173 r8169_mdio_write(tp, reg, value);
1174
1175 r8168dp_2_mdio_stop(ioaddr);
1176 }
1177
1178 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1179 {
1180 void __iomem *ioaddr = tp->mmio_addr;
1181 int value;
1182
1183 r8168dp_2_mdio_start(ioaddr);
1184
1185 value = r8169_mdio_read(tp, reg);
1186
1187 r8168dp_2_mdio_stop(ioaddr);
1188
1189 return value;
1190 }
1191
1192 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1193 {
1194 tp->mdio_ops.write(tp, location, val);
1195 }
1196
1197 static int rtl_readphy(struct rtl8169_private *tp, int location)
1198 {
1199 return tp->mdio_ops.read(tp, location);
1200 }
1201
1202 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1203 {
1204 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1205 }
1206
1207 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1208 {
1209 int val;
1210
1211 val = rtl_readphy(tp, reg_addr);
1212 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1213 }
1214
1215 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1216 int val)
1217 {
1218 struct rtl8169_private *tp = netdev_priv(dev);
1219
1220 rtl_writephy(tp, location, val);
1221 }
1222
1223 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1224 {
1225 struct rtl8169_private *tp = netdev_priv(dev);
1226
1227 return rtl_readphy(tp, location);
1228 }
1229
1230 DECLARE_RTL_COND(rtl_ephyar_cond)
1231 {
1232 void __iomem *ioaddr = tp->mmio_addr;
1233
1234 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1235 }
1236
1237 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1238 {
1239 void __iomem *ioaddr = tp->mmio_addr;
1240
1241 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1242 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1243
1244 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1245
1246 udelay(10);
1247 }
1248
1249 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1250 {
1251 void __iomem *ioaddr = tp->mmio_addr;
1252
1253 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1254
1255 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1256 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1257 }
1258
1259 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1260 u32 val, int type)
1261 {
1262 void __iomem *ioaddr = tp->mmio_addr;
1263
1264 BUG_ON((addr & 3) || (mask == 0));
1265 RTL_W32(ERIDR, val);
1266 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1267
1268 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1269 }
1270
1271 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1272 {
1273 void __iomem *ioaddr = tp->mmio_addr;
1274
1275 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1276
1277 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1278 RTL_R32(ERIDR) : ~0;
1279 }
1280
1281 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1282 u32 m, int type)
1283 {
1284 u32 val;
1285
1286 val = rtl_eri_read(tp, addr, type);
1287 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1288 }
1289
1290 struct exgmac_reg {
1291 u16 addr;
1292 u16 mask;
1293 u32 val;
1294 };
1295
1296 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1297 const struct exgmac_reg *r, int len)
1298 {
1299 while (len-- > 0) {
1300 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1301 r++;
1302 }
1303 }
1304
1305 DECLARE_RTL_COND(rtl_efusear_cond)
1306 {
1307 void __iomem *ioaddr = tp->mmio_addr;
1308
1309 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1310 }
1311
1312 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1313 {
1314 void __iomem *ioaddr = tp->mmio_addr;
1315
1316 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1317
1318 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1319 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1320 }
1321
1322 static u16 rtl_get_events(struct rtl8169_private *tp)
1323 {
1324 void __iomem *ioaddr = tp->mmio_addr;
1325
1326 return RTL_R16(IntrStatus);
1327 }
1328
1329 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1330 {
1331 void __iomem *ioaddr = tp->mmio_addr;
1332
1333 RTL_W16(IntrStatus, bits);
1334 mmiowb();
1335 }
1336
1337 static void rtl_irq_disable(struct rtl8169_private *tp)
1338 {
1339 void __iomem *ioaddr = tp->mmio_addr;
1340
1341 RTL_W16(IntrMask, 0);
1342 mmiowb();
1343 }
1344
1345 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1346 {
1347 void __iomem *ioaddr = tp->mmio_addr;
1348
1349 RTL_W16(IntrMask, bits);
1350 }
1351
1352 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1353 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1354 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1355
1356 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1357 {
1358 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1359 }
1360
1361 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1362 {
1363 void __iomem *ioaddr = tp->mmio_addr;
1364
1365 rtl_irq_disable(tp);
1366 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1367 RTL_R8(ChipCmd);
1368 }
1369
1370 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1371 {
1372 void __iomem *ioaddr = tp->mmio_addr;
1373
1374 return RTL_R32(TBICSR) & TBIReset;
1375 }
1376
1377 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1378 {
1379 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1380 }
1381
1382 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1383 {
1384 return RTL_R32(TBICSR) & TBILinkOk;
1385 }
1386
1387 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1388 {
1389 return RTL_R8(PHYstatus) & LinkStatus;
1390 }
1391
1392 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1393 {
1394 void __iomem *ioaddr = tp->mmio_addr;
1395
1396 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1397 }
1398
1399 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1400 {
1401 unsigned int val;
1402
1403 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1404 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1405 }
1406
1407 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1408 {
1409 void __iomem *ioaddr = tp->mmio_addr;
1410 struct net_device *dev = tp->dev;
1411
1412 if (!netif_running(dev))
1413 return;
1414
1415 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1416 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1417 if (RTL_R8(PHYstatus) & _1000bpsF) {
1418 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1419 ERIAR_EXGMAC);
1420 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1421 ERIAR_EXGMAC);
1422 } else if (RTL_R8(PHYstatus) & _100bps) {
1423 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1424 ERIAR_EXGMAC);
1425 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1426 ERIAR_EXGMAC);
1427 } else {
1428 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1429 ERIAR_EXGMAC);
1430 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1431 ERIAR_EXGMAC);
1432 }
1433 /* Reset packet filter */
1434 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1435 ERIAR_EXGMAC);
1436 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1437 ERIAR_EXGMAC);
1438 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1439 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1440 if (RTL_R8(PHYstatus) & _1000bpsF) {
1441 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1442 ERIAR_EXGMAC);
1443 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1444 ERIAR_EXGMAC);
1445 } else {
1446 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1447 ERIAR_EXGMAC);
1448 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1449 ERIAR_EXGMAC);
1450 }
1451 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1452 if (RTL_R8(PHYstatus) & _10bps) {
1453 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1454 ERIAR_EXGMAC);
1455 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1456 ERIAR_EXGMAC);
1457 } else {
1458 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1459 ERIAR_EXGMAC);
1460 }
1461 }
1462 }
1463
1464 static void __rtl8169_check_link_status(struct net_device *dev,
1465 struct rtl8169_private *tp,
1466 void __iomem *ioaddr, bool pm)
1467 {
1468 if (tp->link_ok(ioaddr)) {
1469 rtl_link_chg_patch(tp);
1470 /* This is to cancel a scheduled suspend if there's one. */
1471 if (pm)
1472 pm_request_resume(&tp->pci_dev->dev);
1473 netif_carrier_on(dev);
1474 if (net_ratelimit())
1475 netif_info(tp, ifup, dev, "link up\n");
1476 } else {
1477 netif_carrier_off(dev);
1478 netif_info(tp, ifdown, dev, "link down\n");
1479 if (pm)
1480 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1481 }
1482 }
1483
1484 static void rtl8169_check_link_status(struct net_device *dev,
1485 struct rtl8169_private *tp,
1486 void __iomem *ioaddr)
1487 {
1488 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1489 }
1490
1491 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1492
1493 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1494 {
1495 void __iomem *ioaddr = tp->mmio_addr;
1496 u8 options;
1497 u32 wolopts = 0;
1498
1499 options = RTL_R8(Config1);
1500 if (!(options & PMEnable))
1501 return 0;
1502
1503 options = RTL_R8(Config3);
1504 if (options & LinkUp)
1505 wolopts |= WAKE_PHY;
1506 if (options & MagicPacket)
1507 wolopts |= WAKE_MAGIC;
1508
1509 options = RTL_R8(Config5);
1510 if (options & UWF)
1511 wolopts |= WAKE_UCAST;
1512 if (options & BWF)
1513 wolopts |= WAKE_BCAST;
1514 if (options & MWF)
1515 wolopts |= WAKE_MCAST;
1516
1517 return wolopts;
1518 }
1519
1520 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1521 {
1522 struct rtl8169_private *tp = netdev_priv(dev);
1523
1524 rtl_lock_work(tp);
1525
1526 wol->supported = WAKE_ANY;
1527 wol->wolopts = __rtl8169_get_wol(tp);
1528
1529 rtl_unlock_work(tp);
1530 }
1531
1532 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1533 {
1534 void __iomem *ioaddr = tp->mmio_addr;
1535 unsigned int i;
1536 static const struct {
1537 u32 opt;
1538 u16 reg;
1539 u8 mask;
1540 } cfg[] = {
1541 { WAKE_PHY, Config3, LinkUp },
1542 { WAKE_MAGIC, Config3, MagicPacket },
1543 { WAKE_UCAST, Config5, UWF },
1544 { WAKE_BCAST, Config5, BWF },
1545 { WAKE_MCAST, Config5, MWF },
1546 { WAKE_ANY, Config5, LanWake }
1547 };
1548 u8 options;
1549
1550 RTL_W8(Cfg9346, Cfg9346_Unlock);
1551
1552 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1553 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1554 if (wolopts & cfg[i].opt)
1555 options |= cfg[i].mask;
1556 RTL_W8(cfg[i].reg, options);
1557 }
1558
1559 switch (tp->mac_version) {
1560 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1561 options = RTL_R8(Config1) & ~PMEnable;
1562 if (wolopts)
1563 options |= PMEnable;
1564 RTL_W8(Config1, options);
1565 break;
1566 default:
1567 options = RTL_R8(Config2) & ~PME_SIGNAL;
1568 if (wolopts)
1569 options |= PME_SIGNAL;
1570 RTL_W8(Config2, options);
1571 break;
1572 }
1573
1574 RTL_W8(Cfg9346, Cfg9346_Lock);
1575 }
1576
1577 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1578 {
1579 struct rtl8169_private *tp = netdev_priv(dev);
1580
1581 rtl_lock_work(tp);
1582
1583 if (wol->wolopts)
1584 tp->features |= RTL_FEATURE_WOL;
1585 else
1586 tp->features &= ~RTL_FEATURE_WOL;
1587 __rtl8169_set_wol(tp, wol->wolopts);
1588
1589 rtl_unlock_work(tp);
1590
1591 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1592
1593 return 0;
1594 }
1595
1596 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1597 {
1598 return rtl_chip_infos[tp->mac_version].fw_name;
1599 }
1600
1601 static void rtl8169_get_drvinfo(struct net_device *dev,
1602 struct ethtool_drvinfo *info)
1603 {
1604 struct rtl8169_private *tp = netdev_priv(dev);
1605 struct rtl_fw *rtl_fw = tp->rtl_fw;
1606
1607 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1608 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1609 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1610 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1611 if (!IS_ERR_OR_NULL(rtl_fw))
1612 strlcpy(info->fw_version, rtl_fw->version,
1613 sizeof(info->fw_version));
1614 }
1615
1616 static int rtl8169_get_regs_len(struct net_device *dev)
1617 {
1618 return R8169_REGS_SIZE;
1619 }
1620
1621 static int rtl8169_set_speed_tbi(struct net_device *dev,
1622 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1623 {
1624 struct rtl8169_private *tp = netdev_priv(dev);
1625 void __iomem *ioaddr = tp->mmio_addr;
1626 int ret = 0;
1627 u32 reg;
1628
1629 reg = RTL_R32(TBICSR);
1630 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1631 (duplex == DUPLEX_FULL)) {
1632 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1633 } else if (autoneg == AUTONEG_ENABLE)
1634 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1635 else {
1636 netif_warn(tp, link, dev,
1637 "incorrect speed setting refused in TBI mode\n");
1638 ret = -EOPNOTSUPP;
1639 }
1640
1641 return ret;
1642 }
1643
1644 static int rtl8169_set_speed_xmii(struct net_device *dev,
1645 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1646 {
1647 struct rtl8169_private *tp = netdev_priv(dev);
1648 int giga_ctrl, bmcr;
1649 int rc = -EINVAL;
1650
1651 rtl_writephy(tp, 0x1f, 0x0000);
1652
1653 if (autoneg == AUTONEG_ENABLE) {
1654 int auto_nego;
1655
1656 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1657 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1658 ADVERTISE_100HALF | ADVERTISE_100FULL);
1659
1660 if (adv & ADVERTISED_10baseT_Half)
1661 auto_nego |= ADVERTISE_10HALF;
1662 if (adv & ADVERTISED_10baseT_Full)
1663 auto_nego |= ADVERTISE_10FULL;
1664 if (adv & ADVERTISED_100baseT_Half)
1665 auto_nego |= ADVERTISE_100HALF;
1666 if (adv & ADVERTISED_100baseT_Full)
1667 auto_nego |= ADVERTISE_100FULL;
1668
1669 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1670
1671 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1672 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1673
1674 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1675 if (tp->mii.supports_gmii) {
1676 if (adv & ADVERTISED_1000baseT_Half)
1677 giga_ctrl |= ADVERTISE_1000HALF;
1678 if (adv & ADVERTISED_1000baseT_Full)
1679 giga_ctrl |= ADVERTISE_1000FULL;
1680 } else if (adv & (ADVERTISED_1000baseT_Half |
1681 ADVERTISED_1000baseT_Full)) {
1682 netif_info(tp, link, dev,
1683 "PHY does not support 1000Mbps\n");
1684 goto out;
1685 }
1686
1687 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1688
1689 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1690 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1691 } else {
1692 giga_ctrl = 0;
1693
1694 if (speed == SPEED_10)
1695 bmcr = 0;
1696 else if (speed == SPEED_100)
1697 bmcr = BMCR_SPEED100;
1698 else
1699 goto out;
1700
1701 if (duplex == DUPLEX_FULL)
1702 bmcr |= BMCR_FULLDPLX;
1703 }
1704
1705 rtl_writephy(tp, MII_BMCR, bmcr);
1706
1707 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1708 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1709 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1710 rtl_writephy(tp, 0x17, 0x2138);
1711 rtl_writephy(tp, 0x0e, 0x0260);
1712 } else {
1713 rtl_writephy(tp, 0x17, 0x2108);
1714 rtl_writephy(tp, 0x0e, 0x0000);
1715 }
1716 }
1717
1718 rc = 0;
1719 out:
1720 return rc;
1721 }
1722
1723 static int rtl8169_set_speed(struct net_device *dev,
1724 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1725 {
1726 struct rtl8169_private *tp = netdev_priv(dev);
1727 int ret;
1728
1729 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1730 if (ret < 0)
1731 goto out;
1732
1733 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1734 (advertising & ADVERTISED_1000baseT_Full)) {
1735 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1736 }
1737 out:
1738 return ret;
1739 }
1740
1741 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1742 {
1743 struct rtl8169_private *tp = netdev_priv(dev);
1744 int ret;
1745
1746 del_timer_sync(&tp->timer);
1747
1748 rtl_lock_work(tp);
1749 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1750 cmd->duplex, cmd->advertising);
1751 rtl_unlock_work(tp);
1752
1753 return ret;
1754 }
1755
1756 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1757 netdev_features_t features)
1758 {
1759 struct rtl8169_private *tp = netdev_priv(dev);
1760
1761 if (dev->mtu > TD_MSS_MAX)
1762 features &= ~NETIF_F_ALL_TSO;
1763
1764 if (dev->mtu > JUMBO_1K &&
1765 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1766 features &= ~NETIF_F_IP_CSUM;
1767
1768 return features;
1769 }
1770
1771 static void __rtl8169_set_features(struct net_device *dev,
1772 netdev_features_t features)
1773 {
1774 struct rtl8169_private *tp = netdev_priv(dev);
1775 netdev_features_t changed = features ^ dev->features;
1776 void __iomem *ioaddr = tp->mmio_addr;
1777
1778 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1779 return;
1780
1781 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1782 if (features & NETIF_F_RXCSUM)
1783 tp->cp_cmd |= RxChkSum;
1784 else
1785 tp->cp_cmd &= ~RxChkSum;
1786
1787 if (dev->features & NETIF_F_HW_VLAN_RX)
1788 tp->cp_cmd |= RxVlan;
1789 else
1790 tp->cp_cmd &= ~RxVlan;
1791
1792 RTL_W16(CPlusCmd, tp->cp_cmd);
1793 RTL_R16(CPlusCmd);
1794 }
1795 if (changed & NETIF_F_RXALL) {
1796 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1797 if (features & NETIF_F_RXALL)
1798 tmp |= (AcceptErr | AcceptRunt);
1799 RTL_W32(RxConfig, tmp);
1800 }
1801 }
1802
1803 static int rtl8169_set_features(struct net_device *dev,
1804 netdev_features_t features)
1805 {
1806 struct rtl8169_private *tp = netdev_priv(dev);
1807
1808 rtl_lock_work(tp);
1809 __rtl8169_set_features(dev, features);
1810 rtl_unlock_work(tp);
1811
1812 return 0;
1813 }
1814
1815
1816 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1817 {
1818 return (vlan_tx_tag_present(skb)) ?
1819 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1820 }
1821
1822 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1823 {
1824 u32 opts2 = le32_to_cpu(desc->opts2);
1825
1826 if (opts2 & RxVlanTag)
1827 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1828 }
1829
1830 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1831 {
1832 struct rtl8169_private *tp = netdev_priv(dev);
1833 void __iomem *ioaddr = tp->mmio_addr;
1834 u32 status;
1835
1836 cmd->supported =
1837 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1838 cmd->port = PORT_FIBRE;
1839 cmd->transceiver = XCVR_INTERNAL;
1840
1841 status = RTL_R32(TBICSR);
1842 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1843 cmd->autoneg = !!(status & TBINwEnable);
1844
1845 ethtool_cmd_speed_set(cmd, SPEED_1000);
1846 cmd->duplex = DUPLEX_FULL; /* Always set */
1847
1848 return 0;
1849 }
1850
1851 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1852 {
1853 struct rtl8169_private *tp = netdev_priv(dev);
1854
1855 return mii_ethtool_gset(&tp->mii, cmd);
1856 }
1857
1858 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1859 {
1860 struct rtl8169_private *tp = netdev_priv(dev);
1861 int rc;
1862
1863 rtl_lock_work(tp);
1864 rc = tp->get_settings(dev, cmd);
1865 rtl_unlock_work(tp);
1866
1867 return rc;
1868 }
1869
1870 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1871 void *p)
1872 {
1873 struct rtl8169_private *tp = netdev_priv(dev);
1874
1875 if (regs->len > R8169_REGS_SIZE)
1876 regs->len = R8169_REGS_SIZE;
1877
1878 rtl_lock_work(tp);
1879 memcpy_fromio(p, tp->mmio_addr, regs->len);
1880 rtl_unlock_work(tp);
1881 }
1882
1883 static u32 rtl8169_get_msglevel(struct net_device *dev)
1884 {
1885 struct rtl8169_private *tp = netdev_priv(dev);
1886
1887 return tp->msg_enable;
1888 }
1889
1890 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1891 {
1892 struct rtl8169_private *tp = netdev_priv(dev);
1893
1894 tp->msg_enable = value;
1895 }
1896
1897 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1898 "tx_packets",
1899 "rx_packets",
1900 "tx_errors",
1901 "rx_errors",
1902 "rx_missed",
1903 "align_errors",
1904 "tx_single_collisions",
1905 "tx_multi_collisions",
1906 "unicast",
1907 "broadcast",
1908 "multicast",
1909 "tx_aborted",
1910 "tx_underrun",
1911 };
1912
1913 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1914 {
1915 switch (sset) {
1916 case ETH_SS_STATS:
1917 return ARRAY_SIZE(rtl8169_gstrings);
1918 default:
1919 return -EOPNOTSUPP;
1920 }
1921 }
1922
1923 DECLARE_RTL_COND(rtl_counters_cond)
1924 {
1925 void __iomem *ioaddr = tp->mmio_addr;
1926
1927 return RTL_R32(CounterAddrLow) & CounterDump;
1928 }
1929
1930 static void rtl8169_update_counters(struct net_device *dev)
1931 {
1932 struct rtl8169_private *tp = netdev_priv(dev);
1933 void __iomem *ioaddr = tp->mmio_addr;
1934 struct device *d = &tp->pci_dev->dev;
1935 struct rtl8169_counters *counters;
1936 dma_addr_t paddr;
1937 u32 cmd;
1938
1939 /*
1940 * Some chips are unable to dump tally counters when the receiver
1941 * is disabled.
1942 */
1943 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1944 return;
1945
1946 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1947 if (!counters)
1948 return;
1949
1950 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1951 cmd = (u64)paddr & DMA_BIT_MASK(32);
1952 RTL_W32(CounterAddrLow, cmd);
1953 RTL_W32(CounterAddrLow, cmd | CounterDump);
1954
1955 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1956 memcpy(&tp->counters, counters, sizeof(*counters));
1957
1958 RTL_W32(CounterAddrLow, 0);
1959 RTL_W32(CounterAddrHigh, 0);
1960
1961 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1962 }
1963
1964 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1965 struct ethtool_stats *stats, u64 *data)
1966 {
1967 struct rtl8169_private *tp = netdev_priv(dev);
1968
1969 ASSERT_RTNL();
1970
1971 rtl8169_update_counters(dev);
1972
1973 data[0] = le64_to_cpu(tp->counters.tx_packets);
1974 data[1] = le64_to_cpu(tp->counters.rx_packets);
1975 data[2] = le64_to_cpu(tp->counters.tx_errors);
1976 data[3] = le32_to_cpu(tp->counters.rx_errors);
1977 data[4] = le16_to_cpu(tp->counters.rx_missed);
1978 data[5] = le16_to_cpu(tp->counters.align_errors);
1979 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1980 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1981 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1982 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1983 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1984 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1985 data[12] = le16_to_cpu(tp->counters.tx_underun);
1986 }
1987
1988 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1989 {
1990 switch(stringset) {
1991 case ETH_SS_STATS:
1992 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1993 break;
1994 }
1995 }
1996
1997 static const struct ethtool_ops rtl8169_ethtool_ops = {
1998 .get_drvinfo = rtl8169_get_drvinfo,
1999 .get_regs_len = rtl8169_get_regs_len,
2000 .get_link = ethtool_op_get_link,
2001 .get_settings = rtl8169_get_settings,
2002 .set_settings = rtl8169_set_settings,
2003 .get_msglevel = rtl8169_get_msglevel,
2004 .set_msglevel = rtl8169_set_msglevel,
2005 .get_regs = rtl8169_get_regs,
2006 .get_wol = rtl8169_get_wol,
2007 .set_wol = rtl8169_set_wol,
2008 .get_strings = rtl8169_get_strings,
2009 .get_sset_count = rtl8169_get_sset_count,
2010 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2011 .get_ts_info = ethtool_op_get_ts_info,
2012 };
2013
2014 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2015 struct net_device *dev, u8 default_version)
2016 {
2017 void __iomem *ioaddr = tp->mmio_addr;
2018 /*
2019 * The driver currently handles the 8168Bf and the 8168Be identically
2020 * but they can be identified more specifically through the test below
2021 * if needed:
2022 *
2023 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2024 *
2025 * Same thing for the 8101Eb and the 8101Ec:
2026 *
2027 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2028 */
2029 static const struct rtl_mac_info {
2030 u32 mask;
2031 u32 val;
2032 int mac_version;
2033 } mac_info[] = {
2034 /* 8168G family. */
2035 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2036 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2037
2038 /* 8168F family. */
2039 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2040 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2041 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2042
2043 /* 8168E family. */
2044 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2045 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2046 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2047 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2048
2049 /* 8168D family. */
2050 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2051 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2052 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2053
2054 /* 8168DP family. */
2055 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2056 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2057 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2058
2059 /* 8168C family. */
2060 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2061 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2062 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2063 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2064 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2065 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2066 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2067 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2068 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2069
2070 /* 8168B family. */
2071 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2072 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2073 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2074 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2075
2076 /* 8101 family. */
2077 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2078 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2079 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2080 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2081 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2082 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2083 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2084 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2085 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2086 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2087 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2088 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2089 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2090 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2091 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2092 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2093 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2094 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2095 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2096 /* FIXME: where did these entries come from ? -- FR */
2097 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2098 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2099
2100 /* 8110 family. */
2101 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2102 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2103 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2104 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2105 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2106 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2107
2108 /* Catch-all */
2109 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2110 };
2111 const struct rtl_mac_info *p = mac_info;
2112 u32 reg;
2113
2114 reg = RTL_R32(TxConfig);
2115 while ((reg & p->mask) != p->val)
2116 p++;
2117 tp->mac_version = p->mac_version;
2118
2119 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2120 netif_notice(tp, probe, dev,
2121 "unknown MAC, using family default\n");
2122 tp->mac_version = default_version;
2123 }
2124 }
2125
2126 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2127 {
2128 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2129 }
2130
2131 struct phy_reg {
2132 u16 reg;
2133 u16 val;
2134 };
2135
2136 static void rtl_writephy_batch(struct rtl8169_private *tp,
2137 const struct phy_reg *regs, int len)
2138 {
2139 while (len-- > 0) {
2140 rtl_writephy(tp, regs->reg, regs->val);
2141 regs++;
2142 }
2143 }
2144
2145 #define PHY_READ 0x00000000
2146 #define PHY_DATA_OR 0x10000000
2147 #define PHY_DATA_AND 0x20000000
2148 #define PHY_BJMPN 0x30000000
2149 #define PHY_READ_EFUSE 0x40000000
2150 #define PHY_READ_MAC_BYTE 0x50000000
2151 #define PHY_WRITE_MAC_BYTE 0x60000000
2152 #define PHY_CLEAR_READCOUNT 0x70000000
2153 #define PHY_WRITE 0x80000000
2154 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2155 #define PHY_COMP_EQ_SKIPN 0xa0000000
2156 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2157 #define PHY_WRITE_PREVIOUS 0xc0000000
2158 #define PHY_SKIPN 0xd0000000
2159 #define PHY_DELAY_MS 0xe0000000
2160 #define PHY_WRITE_ERI_WORD 0xf0000000
2161
2162 struct fw_info {
2163 u32 magic;
2164 char version[RTL_VER_SIZE];
2165 __le32 fw_start;
2166 __le32 fw_len;
2167 u8 chksum;
2168 } __packed;
2169
2170 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2171
2172 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2173 {
2174 const struct firmware *fw = rtl_fw->fw;
2175 struct fw_info *fw_info = (struct fw_info *)fw->data;
2176 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2177 char *version = rtl_fw->version;
2178 bool rc = false;
2179
2180 if (fw->size < FW_OPCODE_SIZE)
2181 goto out;
2182
2183 if (!fw_info->magic) {
2184 size_t i, size, start;
2185 u8 checksum = 0;
2186
2187 if (fw->size < sizeof(*fw_info))
2188 goto out;
2189
2190 for (i = 0; i < fw->size; i++)
2191 checksum += fw->data[i];
2192 if (checksum != 0)
2193 goto out;
2194
2195 start = le32_to_cpu(fw_info->fw_start);
2196 if (start > fw->size)
2197 goto out;
2198
2199 size = le32_to_cpu(fw_info->fw_len);
2200 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2201 goto out;
2202
2203 memcpy(version, fw_info->version, RTL_VER_SIZE);
2204
2205 pa->code = (__le32 *)(fw->data + start);
2206 pa->size = size;
2207 } else {
2208 if (fw->size % FW_OPCODE_SIZE)
2209 goto out;
2210
2211 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2212
2213 pa->code = (__le32 *)fw->data;
2214 pa->size = fw->size / FW_OPCODE_SIZE;
2215 }
2216 version[RTL_VER_SIZE - 1] = 0;
2217
2218 rc = true;
2219 out:
2220 return rc;
2221 }
2222
2223 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2224 struct rtl_fw_phy_action *pa)
2225 {
2226 bool rc = false;
2227 size_t index;
2228
2229 for (index = 0; index < pa->size; index++) {
2230 u32 action = le32_to_cpu(pa->code[index]);
2231 u32 regno = (action & 0x0fff0000) >> 16;
2232
2233 switch(action & 0xf0000000) {
2234 case PHY_READ:
2235 case PHY_DATA_OR:
2236 case PHY_DATA_AND:
2237 case PHY_READ_EFUSE:
2238 case PHY_CLEAR_READCOUNT:
2239 case PHY_WRITE:
2240 case PHY_WRITE_PREVIOUS:
2241 case PHY_DELAY_MS:
2242 break;
2243
2244 case PHY_BJMPN:
2245 if (regno > index) {
2246 netif_err(tp, ifup, tp->dev,
2247 "Out of range of firmware\n");
2248 goto out;
2249 }
2250 break;
2251 case PHY_READCOUNT_EQ_SKIP:
2252 if (index + 2 >= pa->size) {
2253 netif_err(tp, ifup, tp->dev,
2254 "Out of range of firmware\n");
2255 goto out;
2256 }
2257 break;
2258 case PHY_COMP_EQ_SKIPN:
2259 case PHY_COMP_NEQ_SKIPN:
2260 case PHY_SKIPN:
2261 if (index + 1 + regno >= pa->size) {
2262 netif_err(tp, ifup, tp->dev,
2263 "Out of range of firmware\n");
2264 goto out;
2265 }
2266 break;
2267
2268 case PHY_READ_MAC_BYTE:
2269 case PHY_WRITE_MAC_BYTE:
2270 case PHY_WRITE_ERI_WORD:
2271 default:
2272 netif_err(tp, ifup, tp->dev,
2273 "Invalid action 0x%08x\n", action);
2274 goto out;
2275 }
2276 }
2277 rc = true;
2278 out:
2279 return rc;
2280 }
2281
2282 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2283 {
2284 struct net_device *dev = tp->dev;
2285 int rc = -EINVAL;
2286
2287 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2288 netif_err(tp, ifup, dev, "invalid firwmare\n");
2289 goto out;
2290 }
2291
2292 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2293 rc = 0;
2294 out:
2295 return rc;
2296 }
2297
2298 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2299 {
2300 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2301 u32 predata, count;
2302 size_t index;
2303
2304 predata = count = 0;
2305
2306 for (index = 0; index < pa->size; ) {
2307 u32 action = le32_to_cpu(pa->code[index]);
2308 u32 data = action & 0x0000ffff;
2309 u32 regno = (action & 0x0fff0000) >> 16;
2310
2311 if (!action)
2312 break;
2313
2314 switch(action & 0xf0000000) {
2315 case PHY_READ:
2316 predata = rtl_readphy(tp, regno);
2317 count++;
2318 index++;
2319 break;
2320 case PHY_DATA_OR:
2321 predata |= data;
2322 index++;
2323 break;
2324 case PHY_DATA_AND:
2325 predata &= data;
2326 index++;
2327 break;
2328 case PHY_BJMPN:
2329 index -= regno;
2330 break;
2331 case PHY_READ_EFUSE:
2332 predata = rtl8168d_efuse_read(tp, regno);
2333 index++;
2334 break;
2335 case PHY_CLEAR_READCOUNT:
2336 count = 0;
2337 index++;
2338 break;
2339 case PHY_WRITE:
2340 rtl_writephy(tp, regno, data);
2341 index++;
2342 break;
2343 case PHY_READCOUNT_EQ_SKIP:
2344 index += (count == data) ? 2 : 1;
2345 break;
2346 case PHY_COMP_EQ_SKIPN:
2347 if (predata == data)
2348 index += regno;
2349 index++;
2350 break;
2351 case PHY_COMP_NEQ_SKIPN:
2352 if (predata != data)
2353 index += regno;
2354 index++;
2355 break;
2356 case PHY_WRITE_PREVIOUS:
2357 rtl_writephy(tp, regno, predata);
2358 index++;
2359 break;
2360 case PHY_SKIPN:
2361 index += regno + 1;
2362 break;
2363 case PHY_DELAY_MS:
2364 mdelay(data);
2365 index++;
2366 break;
2367
2368 case PHY_READ_MAC_BYTE:
2369 case PHY_WRITE_MAC_BYTE:
2370 case PHY_WRITE_ERI_WORD:
2371 default:
2372 BUG();
2373 }
2374 }
2375 }
2376
2377 static void rtl_release_firmware(struct rtl8169_private *tp)
2378 {
2379 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2380 release_firmware(tp->rtl_fw->fw);
2381 kfree(tp->rtl_fw);
2382 }
2383 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2384 }
2385
2386 static void rtl_apply_firmware(struct rtl8169_private *tp)
2387 {
2388 struct rtl_fw *rtl_fw = tp->rtl_fw;
2389
2390 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2391 if (!IS_ERR_OR_NULL(rtl_fw)) {
2392 rtl_phy_write_fw(tp, rtl_fw);
2393 tp->features |= RTL_FEATURE_FW_LOADED;
2394 }
2395 }
2396
2397 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2398 {
2399 if (rtl_readphy(tp, reg) != val)
2400 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2401 else
2402 rtl_apply_firmware(tp);
2403 }
2404
2405 static void r810x_aldps_disable(struct rtl8169_private *tp)
2406 {
2407 rtl_writephy(tp, 0x1f, 0x0000);
2408 rtl_writephy(tp, 0x18, 0x0310);
2409 msleep(100);
2410 }
2411
2412 static void r810x_aldps_enable(struct rtl8169_private *tp)
2413 {
2414 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2415 return;
2416
2417 rtl_writephy(tp, 0x1f, 0x0000);
2418 rtl_writephy(tp, 0x18, 0x8310);
2419 }
2420
2421 static void r8168_aldps_enable_1(struct rtl8169_private *tp)
2422 {
2423 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2424 return;
2425
2426 rtl_writephy(tp, 0x1f, 0x0000);
2427 rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
2428 }
2429
2430 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2431 {
2432 static const struct phy_reg phy_reg_init[] = {
2433 { 0x1f, 0x0001 },
2434 { 0x06, 0x006e },
2435 { 0x08, 0x0708 },
2436 { 0x15, 0x4000 },
2437 { 0x18, 0x65c7 },
2438
2439 { 0x1f, 0x0001 },
2440 { 0x03, 0x00a1 },
2441 { 0x02, 0x0008 },
2442 { 0x01, 0x0120 },
2443 { 0x00, 0x1000 },
2444 { 0x04, 0x0800 },
2445 { 0x04, 0x0000 },
2446
2447 { 0x03, 0xff41 },
2448 { 0x02, 0xdf60 },
2449 { 0x01, 0x0140 },
2450 { 0x00, 0x0077 },
2451 { 0x04, 0x7800 },
2452 { 0x04, 0x7000 },
2453
2454 { 0x03, 0x802f },
2455 { 0x02, 0x4f02 },
2456 { 0x01, 0x0409 },
2457 { 0x00, 0xf0f9 },
2458 { 0x04, 0x9800 },
2459 { 0x04, 0x9000 },
2460
2461 { 0x03, 0xdf01 },
2462 { 0x02, 0xdf20 },
2463 { 0x01, 0xff95 },
2464 { 0x00, 0xba00 },
2465 { 0x04, 0xa800 },
2466 { 0x04, 0xa000 },
2467
2468 { 0x03, 0xff41 },
2469 { 0x02, 0xdf20 },
2470 { 0x01, 0x0140 },
2471 { 0x00, 0x00bb },
2472 { 0x04, 0xb800 },
2473 { 0x04, 0xb000 },
2474
2475 { 0x03, 0xdf41 },
2476 { 0x02, 0xdc60 },
2477 { 0x01, 0x6340 },
2478 { 0x00, 0x007d },
2479 { 0x04, 0xd800 },
2480 { 0x04, 0xd000 },
2481
2482 { 0x03, 0xdf01 },
2483 { 0x02, 0xdf20 },
2484 { 0x01, 0x100a },
2485 { 0x00, 0xa0ff },
2486 { 0x04, 0xf800 },
2487 { 0x04, 0xf000 },
2488
2489 { 0x1f, 0x0000 },
2490 { 0x0b, 0x0000 },
2491 { 0x00, 0x9200 }
2492 };
2493
2494 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2495 }
2496
2497 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2498 {
2499 static const struct phy_reg phy_reg_init[] = {
2500 { 0x1f, 0x0002 },
2501 { 0x01, 0x90d0 },
2502 { 0x1f, 0x0000 }
2503 };
2504
2505 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2506 }
2507
2508 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2509 {
2510 struct pci_dev *pdev = tp->pci_dev;
2511
2512 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2513 (pdev->subsystem_device != 0xe000))
2514 return;
2515
2516 rtl_writephy(tp, 0x1f, 0x0001);
2517 rtl_writephy(tp, 0x10, 0xf01b);
2518 rtl_writephy(tp, 0x1f, 0x0000);
2519 }
2520
2521 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2522 {
2523 static const struct phy_reg phy_reg_init[] = {
2524 { 0x1f, 0x0001 },
2525 { 0x04, 0x0000 },
2526 { 0x03, 0x00a1 },
2527 { 0x02, 0x0008 },
2528 { 0x01, 0x0120 },
2529 { 0x00, 0x1000 },
2530 { 0x04, 0x0800 },
2531 { 0x04, 0x9000 },
2532 { 0x03, 0x802f },
2533 { 0x02, 0x4f02 },
2534 { 0x01, 0x0409 },
2535 { 0x00, 0xf099 },
2536 { 0x04, 0x9800 },
2537 { 0x04, 0xa000 },
2538 { 0x03, 0xdf01 },
2539 { 0x02, 0xdf20 },
2540 { 0x01, 0xff95 },
2541 { 0x00, 0xba00 },
2542 { 0x04, 0xa800 },
2543 { 0x04, 0xf000 },
2544 { 0x03, 0xdf01 },
2545 { 0x02, 0xdf20 },
2546 { 0x01, 0x101a },
2547 { 0x00, 0xa0ff },
2548 { 0x04, 0xf800 },
2549 { 0x04, 0x0000 },
2550 { 0x1f, 0x0000 },
2551
2552 { 0x1f, 0x0001 },
2553 { 0x10, 0xf41b },
2554 { 0x14, 0xfb54 },
2555 { 0x18, 0xf5c7 },
2556 { 0x1f, 0x0000 },
2557
2558 { 0x1f, 0x0001 },
2559 { 0x17, 0x0cc0 },
2560 { 0x1f, 0x0000 }
2561 };
2562
2563 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2564
2565 rtl8169scd_hw_phy_config_quirk(tp);
2566 }
2567
2568 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2569 {
2570 static const struct phy_reg phy_reg_init[] = {
2571 { 0x1f, 0x0001 },
2572 { 0x04, 0x0000 },
2573 { 0x03, 0x00a1 },
2574 { 0x02, 0x0008 },
2575 { 0x01, 0x0120 },
2576 { 0x00, 0x1000 },
2577 { 0x04, 0x0800 },
2578 { 0x04, 0x9000 },
2579 { 0x03, 0x802f },
2580 { 0x02, 0x4f02 },
2581 { 0x01, 0x0409 },
2582 { 0x00, 0xf099 },
2583 { 0x04, 0x9800 },
2584 { 0x04, 0xa000 },
2585 { 0x03, 0xdf01 },
2586 { 0x02, 0xdf20 },
2587 { 0x01, 0xff95 },
2588 { 0x00, 0xba00 },
2589 { 0x04, 0xa800 },
2590 { 0x04, 0xf000 },
2591 { 0x03, 0xdf01 },
2592 { 0x02, 0xdf20 },
2593 { 0x01, 0x101a },
2594 { 0x00, 0xa0ff },
2595 { 0x04, 0xf800 },
2596 { 0x04, 0x0000 },
2597 { 0x1f, 0x0000 },
2598
2599 { 0x1f, 0x0001 },
2600 { 0x0b, 0x8480 },
2601 { 0x1f, 0x0000 },
2602
2603 { 0x1f, 0x0001 },
2604 { 0x18, 0x67c7 },
2605 { 0x04, 0x2000 },
2606 { 0x03, 0x002f },
2607 { 0x02, 0x4360 },
2608 { 0x01, 0x0109 },
2609 { 0x00, 0x3022 },
2610 { 0x04, 0x2800 },
2611 { 0x1f, 0x0000 },
2612
2613 { 0x1f, 0x0001 },
2614 { 0x17, 0x0cc0 },
2615 { 0x1f, 0x0000 }
2616 };
2617
2618 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2619 }
2620
2621 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2622 {
2623 static const struct phy_reg phy_reg_init[] = {
2624 { 0x10, 0xf41b },
2625 { 0x1f, 0x0000 }
2626 };
2627
2628 rtl_writephy(tp, 0x1f, 0x0001);
2629 rtl_patchphy(tp, 0x16, 1 << 0);
2630
2631 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2632 }
2633
2634 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2635 {
2636 static const struct phy_reg phy_reg_init[] = {
2637 { 0x1f, 0x0001 },
2638 { 0x10, 0xf41b },
2639 { 0x1f, 0x0000 }
2640 };
2641
2642 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2643 }
2644
2645 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2646 {
2647 static const struct phy_reg phy_reg_init[] = {
2648 { 0x1f, 0x0000 },
2649 { 0x1d, 0x0f00 },
2650 { 0x1f, 0x0002 },
2651 { 0x0c, 0x1ec8 },
2652 { 0x1f, 0x0000 }
2653 };
2654
2655 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2656 }
2657
2658 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2659 {
2660 static const struct phy_reg phy_reg_init[] = {
2661 { 0x1f, 0x0001 },
2662 { 0x1d, 0x3d98 },
2663 { 0x1f, 0x0000 }
2664 };
2665
2666 rtl_writephy(tp, 0x1f, 0x0000);
2667 rtl_patchphy(tp, 0x14, 1 << 5);
2668 rtl_patchphy(tp, 0x0d, 1 << 5);
2669
2670 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2671 }
2672
2673 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2674 {
2675 static const struct phy_reg phy_reg_init[] = {
2676 { 0x1f, 0x0001 },
2677 { 0x12, 0x2300 },
2678 { 0x1f, 0x0002 },
2679 { 0x00, 0x88d4 },
2680 { 0x01, 0x82b1 },
2681 { 0x03, 0x7002 },
2682 { 0x08, 0x9e30 },
2683 { 0x09, 0x01f0 },
2684 { 0x0a, 0x5500 },
2685 { 0x0c, 0x00c8 },
2686 { 0x1f, 0x0003 },
2687 { 0x12, 0xc096 },
2688 { 0x16, 0x000a },
2689 { 0x1f, 0x0000 },
2690 { 0x1f, 0x0000 },
2691 { 0x09, 0x2000 },
2692 { 0x09, 0x0000 }
2693 };
2694
2695 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2696
2697 rtl_patchphy(tp, 0x14, 1 << 5);
2698 rtl_patchphy(tp, 0x0d, 1 << 5);
2699 rtl_writephy(tp, 0x1f, 0x0000);
2700 }
2701
2702 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2703 {
2704 static const struct phy_reg phy_reg_init[] = {
2705 { 0x1f, 0x0001 },
2706 { 0x12, 0x2300 },
2707 { 0x03, 0x802f },
2708 { 0x02, 0x4f02 },
2709 { 0x01, 0x0409 },
2710 { 0x00, 0xf099 },
2711 { 0x04, 0x9800 },
2712 { 0x04, 0x9000 },
2713 { 0x1d, 0x3d98 },
2714 { 0x1f, 0x0002 },
2715 { 0x0c, 0x7eb8 },
2716 { 0x06, 0x0761 },
2717 { 0x1f, 0x0003 },
2718 { 0x16, 0x0f0a },
2719 { 0x1f, 0x0000 }
2720 };
2721
2722 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2723
2724 rtl_patchphy(tp, 0x16, 1 << 0);
2725 rtl_patchphy(tp, 0x14, 1 << 5);
2726 rtl_patchphy(tp, 0x0d, 1 << 5);
2727 rtl_writephy(tp, 0x1f, 0x0000);
2728 }
2729
2730 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2731 {
2732 static const struct phy_reg phy_reg_init[] = {
2733 { 0x1f, 0x0001 },
2734 { 0x12, 0x2300 },
2735 { 0x1d, 0x3d98 },
2736 { 0x1f, 0x0002 },
2737 { 0x0c, 0x7eb8 },
2738 { 0x06, 0x5461 },
2739 { 0x1f, 0x0003 },
2740 { 0x16, 0x0f0a },
2741 { 0x1f, 0x0000 }
2742 };
2743
2744 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2745
2746 rtl_patchphy(tp, 0x16, 1 << 0);
2747 rtl_patchphy(tp, 0x14, 1 << 5);
2748 rtl_patchphy(tp, 0x0d, 1 << 5);
2749 rtl_writephy(tp, 0x1f, 0x0000);
2750 }
2751
2752 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2753 {
2754 rtl8168c_3_hw_phy_config(tp);
2755 }
2756
2757 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2758 {
2759 static const struct phy_reg phy_reg_init_0[] = {
2760 /* Channel Estimation */
2761 { 0x1f, 0x0001 },
2762 { 0x06, 0x4064 },
2763 { 0x07, 0x2863 },
2764 { 0x08, 0x059c },
2765 { 0x09, 0x26b4 },
2766 { 0x0a, 0x6a19 },
2767 { 0x0b, 0xdcc8 },
2768 { 0x10, 0xf06d },
2769 { 0x14, 0x7f68 },
2770 { 0x18, 0x7fd9 },
2771 { 0x1c, 0xf0ff },
2772 { 0x1d, 0x3d9c },
2773 { 0x1f, 0x0003 },
2774 { 0x12, 0xf49f },
2775 { 0x13, 0x070b },
2776 { 0x1a, 0x05ad },
2777 { 0x14, 0x94c0 },
2778
2779 /*
2780 * Tx Error Issue
2781 * Enhance line driver power
2782 */
2783 { 0x1f, 0x0002 },
2784 { 0x06, 0x5561 },
2785 { 0x1f, 0x0005 },
2786 { 0x05, 0x8332 },
2787 { 0x06, 0x5561 },
2788
2789 /*
2790 * Can not link to 1Gbps with bad cable
2791 * Decrease SNR threshold form 21.07dB to 19.04dB
2792 */
2793 { 0x1f, 0x0001 },
2794 { 0x17, 0x0cc0 },
2795
2796 { 0x1f, 0x0000 },
2797 { 0x0d, 0xf880 }
2798 };
2799
2800 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2801
2802 /*
2803 * Rx Error Issue
2804 * Fine Tune Switching regulator parameter
2805 */
2806 rtl_writephy(tp, 0x1f, 0x0002);
2807 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2808 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2809
2810 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2811 static const struct phy_reg phy_reg_init[] = {
2812 { 0x1f, 0x0002 },
2813 { 0x05, 0x669a },
2814 { 0x1f, 0x0005 },
2815 { 0x05, 0x8330 },
2816 { 0x06, 0x669a },
2817 { 0x1f, 0x0002 }
2818 };
2819 int val;
2820
2821 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2822
2823 val = rtl_readphy(tp, 0x0d);
2824
2825 if ((val & 0x00ff) != 0x006c) {
2826 static const u32 set[] = {
2827 0x0065, 0x0066, 0x0067, 0x0068,
2828 0x0069, 0x006a, 0x006b, 0x006c
2829 };
2830 int i;
2831
2832 rtl_writephy(tp, 0x1f, 0x0002);
2833
2834 val &= 0xff00;
2835 for (i = 0; i < ARRAY_SIZE(set); i++)
2836 rtl_writephy(tp, 0x0d, val | set[i]);
2837 }
2838 } else {
2839 static const struct phy_reg phy_reg_init[] = {
2840 { 0x1f, 0x0002 },
2841 { 0x05, 0x6662 },
2842 { 0x1f, 0x0005 },
2843 { 0x05, 0x8330 },
2844 { 0x06, 0x6662 }
2845 };
2846
2847 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2848 }
2849
2850 /* RSET couple improve */
2851 rtl_writephy(tp, 0x1f, 0x0002);
2852 rtl_patchphy(tp, 0x0d, 0x0300);
2853 rtl_patchphy(tp, 0x0f, 0x0010);
2854
2855 /* Fine tune PLL performance */
2856 rtl_writephy(tp, 0x1f, 0x0002);
2857 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2858 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2859
2860 rtl_writephy(tp, 0x1f, 0x0005);
2861 rtl_writephy(tp, 0x05, 0x001b);
2862
2863 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2864
2865 rtl_writephy(tp, 0x1f, 0x0000);
2866 }
2867
2868 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2869 {
2870 static const struct phy_reg phy_reg_init_0[] = {
2871 /* Channel Estimation */
2872 { 0x1f, 0x0001 },
2873 { 0x06, 0x4064 },
2874 { 0x07, 0x2863 },
2875 { 0x08, 0x059c },
2876 { 0x09, 0x26b4 },
2877 { 0x0a, 0x6a19 },
2878 { 0x0b, 0xdcc8 },
2879 { 0x10, 0xf06d },
2880 { 0x14, 0x7f68 },
2881 { 0x18, 0x7fd9 },
2882 { 0x1c, 0xf0ff },
2883 { 0x1d, 0x3d9c },
2884 { 0x1f, 0x0003 },
2885 { 0x12, 0xf49f },
2886 { 0x13, 0x070b },
2887 { 0x1a, 0x05ad },
2888 { 0x14, 0x94c0 },
2889
2890 /*
2891 * Tx Error Issue
2892 * Enhance line driver power
2893 */
2894 { 0x1f, 0x0002 },
2895 { 0x06, 0x5561 },
2896 { 0x1f, 0x0005 },
2897 { 0x05, 0x8332 },
2898 { 0x06, 0x5561 },
2899
2900 /*
2901 * Can not link to 1Gbps with bad cable
2902 * Decrease SNR threshold form 21.07dB to 19.04dB
2903 */
2904 { 0x1f, 0x0001 },
2905 { 0x17, 0x0cc0 },
2906
2907 { 0x1f, 0x0000 },
2908 { 0x0d, 0xf880 }
2909 };
2910
2911 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2912
2913 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2914 static const struct phy_reg phy_reg_init[] = {
2915 { 0x1f, 0x0002 },
2916 { 0x05, 0x669a },
2917 { 0x1f, 0x0005 },
2918 { 0x05, 0x8330 },
2919 { 0x06, 0x669a },
2920
2921 { 0x1f, 0x0002 }
2922 };
2923 int val;
2924
2925 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2926
2927 val = rtl_readphy(tp, 0x0d);
2928 if ((val & 0x00ff) != 0x006c) {
2929 static const u32 set[] = {
2930 0x0065, 0x0066, 0x0067, 0x0068,
2931 0x0069, 0x006a, 0x006b, 0x006c
2932 };
2933 int i;
2934
2935 rtl_writephy(tp, 0x1f, 0x0002);
2936
2937 val &= 0xff00;
2938 for (i = 0; i < ARRAY_SIZE(set); i++)
2939 rtl_writephy(tp, 0x0d, val | set[i]);
2940 }
2941 } else {
2942 static const struct phy_reg phy_reg_init[] = {
2943 { 0x1f, 0x0002 },
2944 { 0x05, 0x2642 },
2945 { 0x1f, 0x0005 },
2946 { 0x05, 0x8330 },
2947 { 0x06, 0x2642 }
2948 };
2949
2950 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2951 }
2952
2953 /* Fine tune PLL performance */
2954 rtl_writephy(tp, 0x1f, 0x0002);
2955 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2956 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2957
2958 /* Switching regulator Slew rate */
2959 rtl_writephy(tp, 0x1f, 0x0002);
2960 rtl_patchphy(tp, 0x0f, 0x0017);
2961
2962 rtl_writephy(tp, 0x1f, 0x0005);
2963 rtl_writephy(tp, 0x05, 0x001b);
2964
2965 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2966
2967 rtl_writephy(tp, 0x1f, 0x0000);
2968 }
2969
2970 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2971 {
2972 static const struct phy_reg phy_reg_init[] = {
2973 { 0x1f, 0x0002 },
2974 { 0x10, 0x0008 },
2975 { 0x0d, 0x006c },
2976
2977 { 0x1f, 0x0000 },
2978 { 0x0d, 0xf880 },
2979
2980 { 0x1f, 0x0001 },
2981 { 0x17, 0x0cc0 },
2982
2983 { 0x1f, 0x0001 },
2984 { 0x0b, 0xa4d8 },
2985 { 0x09, 0x281c },
2986 { 0x07, 0x2883 },
2987 { 0x0a, 0x6b35 },
2988 { 0x1d, 0x3da4 },
2989 { 0x1c, 0xeffd },
2990 { 0x14, 0x7f52 },
2991 { 0x18, 0x7fc6 },
2992 { 0x08, 0x0601 },
2993 { 0x06, 0x4063 },
2994 { 0x10, 0xf074 },
2995 { 0x1f, 0x0003 },
2996 { 0x13, 0x0789 },
2997 { 0x12, 0xf4bd },
2998 { 0x1a, 0x04fd },
2999 { 0x14, 0x84b0 },
3000 { 0x1f, 0x0000 },
3001 { 0x00, 0x9200 },
3002
3003 { 0x1f, 0x0005 },
3004 { 0x01, 0x0340 },
3005 { 0x1f, 0x0001 },
3006 { 0x04, 0x4000 },
3007 { 0x03, 0x1d21 },
3008 { 0x02, 0x0c32 },
3009 { 0x01, 0x0200 },
3010 { 0x00, 0x5554 },
3011 { 0x04, 0x4800 },
3012 { 0x04, 0x4000 },
3013 { 0x04, 0xf000 },
3014 { 0x03, 0xdf01 },
3015 { 0x02, 0xdf20 },
3016 { 0x01, 0x101a },
3017 { 0x00, 0xa0ff },
3018 { 0x04, 0xf800 },
3019 { 0x04, 0xf000 },
3020 { 0x1f, 0x0000 },
3021
3022 { 0x1f, 0x0007 },
3023 { 0x1e, 0x0023 },
3024 { 0x16, 0x0000 },
3025 { 0x1f, 0x0000 }
3026 };
3027
3028 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3029 }
3030
3031 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3032 {
3033 static const struct phy_reg phy_reg_init[] = {
3034 { 0x1f, 0x0001 },
3035 { 0x17, 0x0cc0 },
3036
3037 { 0x1f, 0x0007 },
3038 { 0x1e, 0x002d },
3039 { 0x18, 0x0040 },
3040 { 0x1f, 0x0000 }
3041 };
3042
3043 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3044 rtl_patchphy(tp, 0x0d, 1 << 5);
3045 }
3046
3047 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3048 {
3049 static const struct phy_reg phy_reg_init[] = {
3050 /* Enable Delay cap */
3051 { 0x1f, 0x0005 },
3052 { 0x05, 0x8b80 },
3053 { 0x06, 0xc896 },
3054 { 0x1f, 0x0000 },
3055
3056 /* Channel estimation fine tune */
3057 { 0x1f, 0x0001 },
3058 { 0x0b, 0x6c20 },
3059 { 0x07, 0x2872 },
3060 { 0x1c, 0xefff },
3061 { 0x1f, 0x0003 },
3062 { 0x14, 0x6420 },
3063 { 0x1f, 0x0000 },
3064
3065 /* Update PFM & 10M TX idle timer */
3066 { 0x1f, 0x0007 },
3067 { 0x1e, 0x002f },
3068 { 0x15, 0x1919 },
3069 { 0x1f, 0x0000 },
3070
3071 { 0x1f, 0x0007 },
3072 { 0x1e, 0x00ac },
3073 { 0x18, 0x0006 },
3074 { 0x1f, 0x0000 }
3075 };
3076
3077 rtl_apply_firmware(tp);
3078
3079 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3080
3081 /* DCO enable for 10M IDLE Power */
3082 rtl_writephy(tp, 0x1f, 0x0007);
3083 rtl_writephy(tp, 0x1e, 0x0023);
3084 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3085 rtl_writephy(tp, 0x1f, 0x0000);
3086
3087 /* For impedance matching */
3088 rtl_writephy(tp, 0x1f, 0x0002);
3089 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3090 rtl_writephy(tp, 0x1f, 0x0000);
3091
3092 /* PHY auto speed down */
3093 rtl_writephy(tp, 0x1f, 0x0007);
3094 rtl_writephy(tp, 0x1e, 0x002d);
3095 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3096 rtl_writephy(tp, 0x1f, 0x0000);
3097 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3098
3099 rtl_writephy(tp, 0x1f, 0x0005);
3100 rtl_writephy(tp, 0x05, 0x8b86);
3101 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3102 rtl_writephy(tp, 0x1f, 0x0000);
3103
3104 rtl_writephy(tp, 0x1f, 0x0005);
3105 rtl_writephy(tp, 0x05, 0x8b85);
3106 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3107 rtl_writephy(tp, 0x1f, 0x0007);
3108 rtl_writephy(tp, 0x1e, 0x0020);
3109 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3110 rtl_writephy(tp, 0x1f, 0x0006);
3111 rtl_writephy(tp, 0x00, 0x5a00);
3112 rtl_writephy(tp, 0x1f, 0x0000);
3113 rtl_writephy(tp, 0x0d, 0x0007);
3114 rtl_writephy(tp, 0x0e, 0x003c);
3115 rtl_writephy(tp, 0x0d, 0x4007);
3116 rtl_writephy(tp, 0x0e, 0x0000);
3117 rtl_writephy(tp, 0x0d, 0x0000);
3118 }
3119
3120 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3121 {
3122 const u16 w[] = {
3123 addr[0] | (addr[1] << 8),
3124 addr[2] | (addr[3] << 8),
3125 addr[4] | (addr[5] << 8)
3126 };
3127 const struct exgmac_reg e[] = {
3128 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3129 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3130 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3131 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3132 };
3133
3134 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3135 }
3136
3137 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3138 {
3139 static const struct phy_reg phy_reg_init[] = {
3140 /* Enable Delay cap */
3141 { 0x1f, 0x0004 },
3142 { 0x1f, 0x0007 },
3143 { 0x1e, 0x00ac },
3144 { 0x18, 0x0006 },
3145 { 0x1f, 0x0002 },
3146 { 0x1f, 0x0000 },
3147 { 0x1f, 0x0000 },
3148
3149 /* Channel estimation fine tune */
3150 { 0x1f, 0x0003 },
3151 { 0x09, 0xa20f },
3152 { 0x1f, 0x0000 },
3153 { 0x1f, 0x0000 },
3154
3155 /* Green Setting */
3156 { 0x1f, 0x0005 },
3157 { 0x05, 0x8b5b },
3158 { 0x06, 0x9222 },
3159 { 0x05, 0x8b6d },
3160 { 0x06, 0x8000 },
3161 { 0x05, 0x8b76 },
3162 { 0x06, 0x8000 },
3163 { 0x1f, 0x0000 }
3164 };
3165
3166 rtl_apply_firmware(tp);
3167
3168 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3169
3170 /* For 4-corner performance improve */
3171 rtl_writephy(tp, 0x1f, 0x0005);
3172 rtl_writephy(tp, 0x05, 0x8b80);
3173 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3174 rtl_writephy(tp, 0x1f, 0x0000);
3175
3176 /* PHY auto speed down */
3177 rtl_writephy(tp, 0x1f, 0x0004);
3178 rtl_writephy(tp, 0x1f, 0x0007);
3179 rtl_writephy(tp, 0x1e, 0x002d);
3180 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3181 rtl_writephy(tp, 0x1f, 0x0002);
3182 rtl_writephy(tp, 0x1f, 0x0000);
3183 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3184
3185 /* improve 10M EEE waveform */
3186 rtl_writephy(tp, 0x1f, 0x0005);
3187 rtl_writephy(tp, 0x05, 0x8b86);
3188 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3189 rtl_writephy(tp, 0x1f, 0x0000);
3190
3191 /* Improve 2-pair detection performance */
3192 rtl_writephy(tp, 0x1f, 0x0005);
3193 rtl_writephy(tp, 0x05, 0x8b85);
3194 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3195 rtl_writephy(tp, 0x1f, 0x0000);
3196
3197 /* EEE setting */
3198 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3199 rtl_writephy(tp, 0x1f, 0x0005);
3200 rtl_writephy(tp, 0x05, 0x8b85);
3201 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3202 rtl_writephy(tp, 0x1f, 0x0004);
3203 rtl_writephy(tp, 0x1f, 0x0007);
3204 rtl_writephy(tp, 0x1e, 0x0020);
3205 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3206 rtl_writephy(tp, 0x1f, 0x0002);
3207 rtl_writephy(tp, 0x1f, 0x0000);
3208 rtl_writephy(tp, 0x0d, 0x0007);
3209 rtl_writephy(tp, 0x0e, 0x003c);
3210 rtl_writephy(tp, 0x0d, 0x4007);
3211 rtl_writephy(tp, 0x0e, 0x0000);
3212 rtl_writephy(tp, 0x0d, 0x0000);
3213
3214 /* Green feature */
3215 rtl_writephy(tp, 0x1f, 0x0003);
3216 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3217 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3218 rtl_writephy(tp, 0x1f, 0x0000);
3219
3220 r8168_aldps_enable_1(tp);
3221
3222 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3223 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3224 }
3225
3226 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3227 {
3228 /* For 4-corner performance improve */
3229 rtl_writephy(tp, 0x1f, 0x0005);
3230 rtl_writephy(tp, 0x05, 0x8b80);
3231 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3232 rtl_writephy(tp, 0x1f, 0x0000);
3233
3234 /* PHY auto speed down */
3235 rtl_writephy(tp, 0x1f, 0x0007);
3236 rtl_writephy(tp, 0x1e, 0x002d);
3237 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3238 rtl_writephy(tp, 0x1f, 0x0000);
3239 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3240
3241 /* Improve 10M EEE waveform */
3242 rtl_writephy(tp, 0x1f, 0x0005);
3243 rtl_writephy(tp, 0x05, 0x8b86);
3244 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3245 rtl_writephy(tp, 0x1f, 0x0000);
3246 }
3247
3248 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3249 {
3250 static const struct phy_reg phy_reg_init[] = {
3251 /* Channel estimation fine tune */
3252 { 0x1f, 0x0003 },
3253 { 0x09, 0xa20f },
3254 { 0x1f, 0x0000 },
3255
3256 /* Modify green table for giga & fnet */
3257 { 0x1f, 0x0005 },
3258 { 0x05, 0x8b55 },
3259 { 0x06, 0x0000 },
3260 { 0x05, 0x8b5e },
3261 { 0x06, 0x0000 },
3262 { 0x05, 0x8b67 },
3263 { 0x06, 0x0000 },
3264 { 0x05, 0x8b70 },
3265 { 0x06, 0x0000 },
3266 { 0x1f, 0x0000 },
3267 { 0x1f, 0x0007 },
3268 { 0x1e, 0x0078 },
3269 { 0x17, 0x0000 },
3270 { 0x19, 0x00fb },
3271 { 0x1f, 0x0000 },
3272
3273 /* Modify green table for 10M */
3274 { 0x1f, 0x0005 },
3275 { 0x05, 0x8b79 },
3276 { 0x06, 0xaa00 },
3277 { 0x1f, 0x0000 },
3278
3279 /* Disable hiimpedance detection (RTCT) */
3280 { 0x1f, 0x0003 },
3281 { 0x01, 0x328a },
3282 { 0x1f, 0x0000 }
3283 };
3284
3285 rtl_apply_firmware(tp);
3286
3287 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3288
3289 rtl8168f_hw_phy_config(tp);
3290
3291 /* Improve 2-pair detection performance */
3292 rtl_writephy(tp, 0x1f, 0x0005);
3293 rtl_writephy(tp, 0x05, 0x8b85);
3294 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3295 rtl_writephy(tp, 0x1f, 0x0000);
3296
3297 r8168_aldps_enable_1(tp);
3298 }
3299
3300 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3301 {
3302 rtl_apply_firmware(tp);
3303
3304 rtl8168f_hw_phy_config(tp);
3305
3306 r8168_aldps_enable_1(tp);
3307 }
3308
3309 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3310 {
3311 static const struct phy_reg phy_reg_init[] = {
3312 /* Channel estimation fine tune */
3313 { 0x1f, 0x0003 },
3314 { 0x09, 0xa20f },
3315 { 0x1f, 0x0000 },
3316
3317 /* Modify green table for giga & fnet */
3318 { 0x1f, 0x0005 },
3319 { 0x05, 0x8b55 },
3320 { 0x06, 0x0000 },
3321 { 0x05, 0x8b5e },
3322 { 0x06, 0x0000 },
3323 { 0x05, 0x8b67 },
3324 { 0x06, 0x0000 },
3325 { 0x05, 0x8b70 },
3326 { 0x06, 0x0000 },
3327 { 0x1f, 0x0000 },
3328 { 0x1f, 0x0007 },
3329 { 0x1e, 0x0078 },
3330 { 0x17, 0x0000 },
3331 { 0x19, 0x00aa },
3332 { 0x1f, 0x0000 },
3333
3334 /* Modify green table for 10M */
3335 { 0x1f, 0x0005 },
3336 { 0x05, 0x8b79 },
3337 { 0x06, 0xaa00 },
3338 { 0x1f, 0x0000 },
3339
3340 /* Disable hiimpedance detection (RTCT) */
3341 { 0x1f, 0x0003 },
3342 { 0x01, 0x328a },
3343 { 0x1f, 0x0000 }
3344 };
3345
3346
3347 rtl_apply_firmware(tp);
3348
3349 rtl8168f_hw_phy_config(tp);
3350
3351 /* Improve 2-pair detection performance */
3352 rtl_writephy(tp, 0x1f, 0x0005);
3353 rtl_writephy(tp, 0x05, 0x8b85);
3354 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3355 rtl_writephy(tp, 0x1f, 0x0000);
3356
3357 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3358
3359 /* Modify green table for giga */
3360 rtl_writephy(tp, 0x1f, 0x0005);
3361 rtl_writephy(tp, 0x05, 0x8b54);
3362 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3363 rtl_writephy(tp, 0x05, 0x8b5d);
3364 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3365 rtl_writephy(tp, 0x05, 0x8a7c);
3366 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3367 rtl_writephy(tp, 0x05, 0x8a7f);
3368 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3369 rtl_writephy(tp, 0x05, 0x8a82);
3370 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3371 rtl_writephy(tp, 0x05, 0x8a85);
3372 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3373 rtl_writephy(tp, 0x05, 0x8a88);
3374 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3375 rtl_writephy(tp, 0x1f, 0x0000);
3376
3377 /* uc same-seed solution */
3378 rtl_writephy(tp, 0x1f, 0x0005);
3379 rtl_writephy(tp, 0x05, 0x8b85);
3380 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3381 rtl_writephy(tp, 0x1f, 0x0000);
3382
3383 /* eee setting */
3384 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3385 rtl_writephy(tp, 0x1f, 0x0005);
3386 rtl_writephy(tp, 0x05, 0x8b85);
3387 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3388 rtl_writephy(tp, 0x1f, 0x0004);
3389 rtl_writephy(tp, 0x1f, 0x0007);
3390 rtl_writephy(tp, 0x1e, 0x0020);
3391 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3392 rtl_writephy(tp, 0x1f, 0x0000);
3393 rtl_writephy(tp, 0x0d, 0x0007);
3394 rtl_writephy(tp, 0x0e, 0x003c);
3395 rtl_writephy(tp, 0x0d, 0x4007);
3396 rtl_writephy(tp, 0x0e, 0x0000);
3397 rtl_writephy(tp, 0x0d, 0x0000);
3398
3399 /* Green feature */
3400 rtl_writephy(tp, 0x1f, 0x0003);
3401 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3402 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3403 rtl_writephy(tp, 0x1f, 0x0000);
3404
3405 r8168_aldps_enable_1(tp);
3406 }
3407
3408 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3409 {
3410 static const u16 mac_ocp_patch[] = {
3411 0xe008, 0xe01b, 0xe01d, 0xe01f,
3412 0xe021, 0xe023, 0xe025, 0xe027,
3413 0x49d2, 0xf10d, 0x766c, 0x49e2,
3414 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3415
3416 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3417 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3418 0xbe00, 0xb416, 0x0076, 0xe86c,
3419 0xc602, 0xbe00, 0x0000, 0xc602,
3420
3421 0xbe00, 0x0000, 0xc602, 0xbe00,
3422 0x0000, 0xc602, 0xbe00, 0x0000,
3423 0xc602, 0xbe00, 0x0000, 0xc602,
3424 0xbe00, 0x0000, 0xc602, 0xbe00,
3425
3426 0x0000, 0x0000, 0x0000, 0x0000
3427 };
3428 u32 i;
3429
3430 /* Patch code for GPHY reset */
3431 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3432 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3433 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3434 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3435
3436 rtl_apply_firmware(tp);
3437
3438 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3439 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3440 else
3441 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3442
3443 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3444 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3445 else
3446 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3447
3448 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3449 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3450
3451 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3452 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3453
3454 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3455 }
3456
3457 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3458 {
3459 static const struct phy_reg phy_reg_init[] = {
3460 { 0x1f, 0x0003 },
3461 { 0x08, 0x441d },
3462 { 0x01, 0x9100 },
3463 { 0x1f, 0x0000 }
3464 };
3465
3466 rtl_writephy(tp, 0x1f, 0x0000);
3467 rtl_patchphy(tp, 0x11, 1 << 12);
3468 rtl_patchphy(tp, 0x19, 1 << 13);
3469 rtl_patchphy(tp, 0x10, 1 << 15);
3470
3471 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3472 }
3473
3474 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3475 {
3476 static const struct phy_reg phy_reg_init[] = {
3477 { 0x1f, 0x0005 },
3478 { 0x1a, 0x0000 },
3479 { 0x1f, 0x0000 },
3480
3481 { 0x1f, 0x0004 },
3482 { 0x1c, 0x0000 },
3483 { 0x1f, 0x0000 },
3484
3485 { 0x1f, 0x0001 },
3486 { 0x15, 0x7701 },
3487 { 0x1f, 0x0000 }
3488 };
3489
3490 /* Disable ALDPS before ram code */
3491 r810x_aldps_disable(tp);
3492
3493 rtl_apply_firmware(tp);
3494
3495 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3496
3497 r810x_aldps_enable(tp);
3498 }
3499
3500 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3501 {
3502 /* Disable ALDPS before setting firmware */
3503 r810x_aldps_disable(tp);
3504
3505 rtl_apply_firmware(tp);
3506
3507 /* EEE setting */
3508 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3509 rtl_writephy(tp, 0x1f, 0x0004);
3510 rtl_writephy(tp, 0x10, 0x401f);
3511 rtl_writephy(tp, 0x19, 0x7030);
3512 rtl_writephy(tp, 0x1f, 0x0000);
3513
3514 r810x_aldps_enable(tp);
3515 }
3516
3517 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3518 {
3519 static const struct phy_reg phy_reg_init[] = {
3520 { 0x1f, 0x0004 },
3521 { 0x10, 0xc07f },
3522 { 0x19, 0x7030 },
3523 { 0x1f, 0x0000 }
3524 };
3525
3526 /* Disable ALDPS before ram code */
3527 r810x_aldps_disable(tp);
3528
3529 rtl_apply_firmware(tp);
3530
3531 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3532 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3533
3534 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3535
3536 r810x_aldps_enable(tp);
3537 }
3538
3539 static void rtl_hw_phy_config(struct net_device *dev)
3540 {
3541 struct rtl8169_private *tp = netdev_priv(dev);
3542
3543 rtl8169_print_mac_version(tp);
3544
3545 switch (tp->mac_version) {
3546 case RTL_GIGA_MAC_VER_01:
3547 break;
3548 case RTL_GIGA_MAC_VER_02:
3549 case RTL_GIGA_MAC_VER_03:
3550 rtl8169s_hw_phy_config(tp);
3551 break;
3552 case RTL_GIGA_MAC_VER_04:
3553 rtl8169sb_hw_phy_config(tp);
3554 break;
3555 case RTL_GIGA_MAC_VER_05:
3556 rtl8169scd_hw_phy_config(tp);
3557 break;
3558 case RTL_GIGA_MAC_VER_06:
3559 rtl8169sce_hw_phy_config(tp);
3560 break;
3561 case RTL_GIGA_MAC_VER_07:
3562 case RTL_GIGA_MAC_VER_08:
3563 case RTL_GIGA_MAC_VER_09:
3564 rtl8102e_hw_phy_config(tp);
3565 break;
3566 case RTL_GIGA_MAC_VER_11:
3567 rtl8168bb_hw_phy_config(tp);
3568 break;
3569 case RTL_GIGA_MAC_VER_12:
3570 rtl8168bef_hw_phy_config(tp);
3571 break;
3572 case RTL_GIGA_MAC_VER_17:
3573 rtl8168bef_hw_phy_config(tp);
3574 break;
3575 case RTL_GIGA_MAC_VER_18:
3576 rtl8168cp_1_hw_phy_config(tp);
3577 break;
3578 case RTL_GIGA_MAC_VER_19:
3579 rtl8168c_1_hw_phy_config(tp);
3580 break;
3581 case RTL_GIGA_MAC_VER_20:
3582 rtl8168c_2_hw_phy_config(tp);
3583 break;
3584 case RTL_GIGA_MAC_VER_21:
3585 rtl8168c_3_hw_phy_config(tp);
3586 break;
3587 case RTL_GIGA_MAC_VER_22:
3588 rtl8168c_4_hw_phy_config(tp);
3589 break;
3590 case RTL_GIGA_MAC_VER_23:
3591 case RTL_GIGA_MAC_VER_24:
3592 rtl8168cp_2_hw_phy_config(tp);
3593 break;
3594 case RTL_GIGA_MAC_VER_25:
3595 rtl8168d_1_hw_phy_config(tp);
3596 break;
3597 case RTL_GIGA_MAC_VER_26:
3598 rtl8168d_2_hw_phy_config(tp);
3599 break;
3600 case RTL_GIGA_MAC_VER_27:
3601 rtl8168d_3_hw_phy_config(tp);
3602 break;
3603 case RTL_GIGA_MAC_VER_28:
3604 rtl8168d_4_hw_phy_config(tp);
3605 break;
3606 case RTL_GIGA_MAC_VER_29:
3607 case RTL_GIGA_MAC_VER_30:
3608 rtl8105e_hw_phy_config(tp);
3609 break;
3610 case RTL_GIGA_MAC_VER_31:
3611 /* None. */
3612 break;
3613 case RTL_GIGA_MAC_VER_32:
3614 case RTL_GIGA_MAC_VER_33:
3615 rtl8168e_1_hw_phy_config(tp);
3616 break;
3617 case RTL_GIGA_MAC_VER_34:
3618 rtl8168e_2_hw_phy_config(tp);
3619 break;
3620 case RTL_GIGA_MAC_VER_35:
3621 rtl8168f_1_hw_phy_config(tp);
3622 break;
3623 case RTL_GIGA_MAC_VER_36:
3624 rtl8168f_2_hw_phy_config(tp);
3625 break;
3626
3627 case RTL_GIGA_MAC_VER_37:
3628 rtl8402_hw_phy_config(tp);
3629 break;
3630
3631 case RTL_GIGA_MAC_VER_38:
3632 rtl8411_hw_phy_config(tp);
3633 break;
3634
3635 case RTL_GIGA_MAC_VER_39:
3636 rtl8106e_hw_phy_config(tp);
3637 break;
3638
3639 case RTL_GIGA_MAC_VER_40:
3640 rtl8168g_1_hw_phy_config(tp);
3641 break;
3642
3643 case RTL_GIGA_MAC_VER_41:
3644 default:
3645 break;
3646 }
3647 }
3648
3649 static void rtl_phy_work(struct rtl8169_private *tp)
3650 {
3651 struct timer_list *timer = &tp->timer;
3652 void __iomem *ioaddr = tp->mmio_addr;
3653 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3654
3655 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3656
3657 if (tp->phy_reset_pending(tp)) {
3658 /*
3659 * A busy loop could burn quite a few cycles on nowadays CPU.
3660 * Let's delay the execution of the timer for a few ticks.
3661 */
3662 timeout = HZ/10;
3663 goto out_mod_timer;
3664 }
3665
3666 if (tp->link_ok(ioaddr))
3667 return;
3668
3669 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3670
3671 tp->phy_reset_enable(tp);
3672
3673 out_mod_timer:
3674 mod_timer(timer, jiffies + timeout);
3675 }
3676
3677 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3678 {
3679 if (!test_and_set_bit(flag, tp->wk.flags))
3680 schedule_work(&tp->wk.work);
3681 }
3682
3683 static void rtl8169_phy_timer(unsigned long __opaque)
3684 {
3685 struct net_device *dev = (struct net_device *)__opaque;
3686 struct rtl8169_private *tp = netdev_priv(dev);
3687
3688 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3689 }
3690
3691 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3692 void __iomem *ioaddr)
3693 {
3694 iounmap(ioaddr);
3695 pci_release_regions(pdev);
3696 pci_clear_mwi(pdev);
3697 pci_disable_device(pdev);
3698 free_netdev(dev);
3699 }
3700
3701 DECLARE_RTL_COND(rtl_phy_reset_cond)
3702 {
3703 return tp->phy_reset_pending(tp);
3704 }
3705
3706 static void rtl8169_phy_reset(struct net_device *dev,
3707 struct rtl8169_private *tp)
3708 {
3709 tp->phy_reset_enable(tp);
3710 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3711 }
3712
3713 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3714 {
3715 void __iomem *ioaddr = tp->mmio_addr;
3716
3717 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3718 (RTL_R8(PHYstatus) & TBI_Enable);
3719 }
3720
3721 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3722 {
3723 void __iomem *ioaddr = tp->mmio_addr;
3724
3725 rtl_hw_phy_config(dev);
3726
3727 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3728 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3729 RTL_W8(0x82, 0x01);
3730 }
3731
3732 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3733
3734 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3735 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3736
3737 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3738 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3739 RTL_W8(0x82, 0x01);
3740 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3741 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3742 }
3743
3744 rtl8169_phy_reset(dev, tp);
3745
3746 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3747 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3748 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3749 (tp->mii.supports_gmii ?
3750 ADVERTISED_1000baseT_Half |
3751 ADVERTISED_1000baseT_Full : 0));
3752
3753 if (rtl_tbi_enabled(tp))
3754 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3755 }
3756
3757 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3758 {
3759 void __iomem *ioaddr = tp->mmio_addr;
3760
3761 rtl_lock_work(tp);
3762
3763 RTL_W8(Cfg9346, Cfg9346_Unlock);
3764
3765 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3766 RTL_R32(MAC4);
3767
3768 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3769 RTL_R32(MAC0);
3770
3771 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3772 rtl_rar_exgmac_set(tp, addr);
3773
3774 RTL_W8(Cfg9346, Cfg9346_Lock);
3775
3776 rtl_unlock_work(tp);
3777 }
3778
3779 static int rtl_set_mac_address(struct net_device *dev, void *p)
3780 {
3781 struct rtl8169_private *tp = netdev_priv(dev);
3782 struct sockaddr *addr = p;
3783
3784 if (!is_valid_ether_addr(addr->sa_data))
3785 return -EADDRNOTAVAIL;
3786
3787 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3788
3789 rtl_rar_set(tp, dev->dev_addr);
3790
3791 return 0;
3792 }
3793
3794 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3795 {
3796 struct rtl8169_private *tp = netdev_priv(dev);
3797 struct mii_ioctl_data *data = if_mii(ifr);
3798
3799 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3800 }
3801
3802 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3803 struct mii_ioctl_data *data, int cmd)
3804 {
3805 switch (cmd) {
3806 case SIOCGMIIPHY:
3807 data->phy_id = 32; /* Internal PHY */
3808 return 0;
3809
3810 case SIOCGMIIREG:
3811 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3812 return 0;
3813
3814 case SIOCSMIIREG:
3815 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3816 return 0;
3817 }
3818 return -EOPNOTSUPP;
3819 }
3820
3821 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3822 {
3823 return -EOPNOTSUPP;
3824 }
3825
3826 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3827 {
3828 if (tp->features & RTL_FEATURE_MSI) {
3829 pci_disable_msi(pdev);
3830 tp->features &= ~RTL_FEATURE_MSI;
3831 }
3832 }
3833
3834 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3835 {
3836 struct mdio_ops *ops = &tp->mdio_ops;
3837
3838 switch (tp->mac_version) {
3839 case RTL_GIGA_MAC_VER_27:
3840 ops->write = r8168dp_1_mdio_write;
3841 ops->read = r8168dp_1_mdio_read;
3842 break;
3843 case RTL_GIGA_MAC_VER_28:
3844 case RTL_GIGA_MAC_VER_31:
3845 ops->write = r8168dp_2_mdio_write;
3846 ops->read = r8168dp_2_mdio_read;
3847 break;
3848 case RTL_GIGA_MAC_VER_40:
3849 case RTL_GIGA_MAC_VER_41:
3850 ops->write = r8168g_mdio_write;
3851 ops->read = r8168g_mdio_read;
3852 break;
3853 default:
3854 ops->write = r8169_mdio_write;
3855 ops->read = r8169_mdio_read;
3856 break;
3857 }
3858 }
3859
3860 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3861 {
3862 void __iomem *ioaddr = tp->mmio_addr;
3863
3864 switch (tp->mac_version) {
3865 case RTL_GIGA_MAC_VER_25:
3866 case RTL_GIGA_MAC_VER_26:
3867 case RTL_GIGA_MAC_VER_29:
3868 case RTL_GIGA_MAC_VER_30:
3869 case RTL_GIGA_MAC_VER_32:
3870 case RTL_GIGA_MAC_VER_33:
3871 case RTL_GIGA_MAC_VER_34:
3872 case RTL_GIGA_MAC_VER_37:
3873 case RTL_GIGA_MAC_VER_38:
3874 case RTL_GIGA_MAC_VER_39:
3875 case RTL_GIGA_MAC_VER_40:
3876 case RTL_GIGA_MAC_VER_41:
3877 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3878 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3879 break;
3880 default:
3881 break;
3882 }
3883 }
3884
3885 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3886 {
3887 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3888 return false;
3889
3890 rtl_writephy(tp, 0x1f, 0x0000);
3891 rtl_writephy(tp, MII_BMCR, 0x0000);
3892
3893 rtl_wol_suspend_quirk(tp);
3894
3895 return true;
3896 }
3897
3898 static void r810x_phy_power_down(struct rtl8169_private *tp)
3899 {
3900 rtl_writephy(tp, 0x1f, 0x0000);
3901 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3902 }
3903
3904 static void r810x_phy_power_up(struct rtl8169_private *tp)
3905 {
3906 rtl_writephy(tp, 0x1f, 0x0000);
3907 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3908 }
3909
3910 static void r810x_pll_power_down(struct rtl8169_private *tp)
3911 {
3912 void __iomem *ioaddr = tp->mmio_addr;
3913
3914 if (rtl_wol_pll_power_down(tp))
3915 return;
3916
3917 r810x_phy_power_down(tp);
3918
3919 switch (tp->mac_version) {
3920 case RTL_GIGA_MAC_VER_07:
3921 case RTL_GIGA_MAC_VER_08:
3922 case RTL_GIGA_MAC_VER_09:
3923 case RTL_GIGA_MAC_VER_10:
3924 case RTL_GIGA_MAC_VER_13:
3925 case RTL_GIGA_MAC_VER_16:
3926 break;
3927 default:
3928 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3929 break;
3930 }
3931 }
3932
3933 static void r810x_pll_power_up(struct rtl8169_private *tp)
3934 {
3935 void __iomem *ioaddr = tp->mmio_addr;
3936
3937 r810x_phy_power_up(tp);
3938
3939 switch (tp->mac_version) {
3940 case RTL_GIGA_MAC_VER_07:
3941 case RTL_GIGA_MAC_VER_08:
3942 case RTL_GIGA_MAC_VER_09:
3943 case RTL_GIGA_MAC_VER_10:
3944 case RTL_GIGA_MAC_VER_13:
3945 case RTL_GIGA_MAC_VER_16:
3946 break;
3947 default:
3948 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3949 break;
3950 }
3951 }
3952
3953 static void r8168_phy_power_up(struct rtl8169_private *tp)
3954 {
3955 rtl_writephy(tp, 0x1f, 0x0000);
3956 switch (tp->mac_version) {
3957 case RTL_GIGA_MAC_VER_11:
3958 case RTL_GIGA_MAC_VER_12:
3959 case RTL_GIGA_MAC_VER_17:
3960 case RTL_GIGA_MAC_VER_18:
3961 case RTL_GIGA_MAC_VER_19:
3962 case RTL_GIGA_MAC_VER_20:
3963 case RTL_GIGA_MAC_VER_21:
3964 case RTL_GIGA_MAC_VER_22:
3965 case RTL_GIGA_MAC_VER_23:
3966 case RTL_GIGA_MAC_VER_24:
3967 case RTL_GIGA_MAC_VER_25:
3968 case RTL_GIGA_MAC_VER_26:
3969 case RTL_GIGA_MAC_VER_27:
3970 case RTL_GIGA_MAC_VER_28:
3971 case RTL_GIGA_MAC_VER_31:
3972 rtl_writephy(tp, 0x0e, 0x0000);
3973 break;
3974 default:
3975 break;
3976 }
3977 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3978 }
3979
3980 static void r8168_phy_power_down(struct rtl8169_private *tp)
3981 {
3982 rtl_writephy(tp, 0x1f, 0x0000);
3983 switch (tp->mac_version) {
3984 case RTL_GIGA_MAC_VER_32:
3985 case RTL_GIGA_MAC_VER_33:
3986 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3987 break;
3988
3989 case RTL_GIGA_MAC_VER_11:
3990 case RTL_GIGA_MAC_VER_12:
3991 case RTL_GIGA_MAC_VER_17:
3992 case RTL_GIGA_MAC_VER_18:
3993 case RTL_GIGA_MAC_VER_19:
3994 case RTL_GIGA_MAC_VER_20:
3995 case RTL_GIGA_MAC_VER_21:
3996 case RTL_GIGA_MAC_VER_22:
3997 case RTL_GIGA_MAC_VER_23:
3998 case RTL_GIGA_MAC_VER_24:
3999 case RTL_GIGA_MAC_VER_25:
4000 case RTL_GIGA_MAC_VER_26:
4001 case RTL_GIGA_MAC_VER_27:
4002 case RTL_GIGA_MAC_VER_28:
4003 case RTL_GIGA_MAC_VER_31:
4004 rtl_writephy(tp, 0x0e, 0x0200);
4005 default:
4006 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4007 break;
4008 }
4009 }
4010
4011 static void r8168_pll_power_down(struct rtl8169_private *tp)
4012 {
4013 void __iomem *ioaddr = tp->mmio_addr;
4014
4015 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4016 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4017 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4018 r8168dp_check_dash(tp)) {
4019 return;
4020 }
4021
4022 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4023 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4024 (RTL_R16(CPlusCmd) & ASF)) {
4025 return;
4026 }
4027
4028 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4029 tp->mac_version == RTL_GIGA_MAC_VER_33)
4030 rtl_ephy_write(tp, 0x19, 0xff64);
4031
4032 if (rtl_wol_pll_power_down(tp))
4033 return;
4034
4035 r8168_phy_power_down(tp);
4036
4037 switch (tp->mac_version) {
4038 case RTL_GIGA_MAC_VER_25:
4039 case RTL_GIGA_MAC_VER_26:
4040 case RTL_GIGA_MAC_VER_27:
4041 case RTL_GIGA_MAC_VER_28:
4042 case RTL_GIGA_MAC_VER_31:
4043 case RTL_GIGA_MAC_VER_32:
4044 case RTL_GIGA_MAC_VER_33:
4045 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4046 break;
4047 }
4048 }
4049
4050 static void r8168_pll_power_up(struct rtl8169_private *tp)
4051 {
4052 void __iomem *ioaddr = tp->mmio_addr;
4053
4054 switch (tp->mac_version) {
4055 case RTL_GIGA_MAC_VER_25:
4056 case RTL_GIGA_MAC_VER_26:
4057 case RTL_GIGA_MAC_VER_27:
4058 case RTL_GIGA_MAC_VER_28:
4059 case RTL_GIGA_MAC_VER_31:
4060 case RTL_GIGA_MAC_VER_32:
4061 case RTL_GIGA_MAC_VER_33:
4062 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4063 break;
4064 }
4065
4066 r8168_phy_power_up(tp);
4067 }
4068
4069 static void rtl_generic_op(struct rtl8169_private *tp,
4070 void (*op)(struct rtl8169_private *))
4071 {
4072 if (op)
4073 op(tp);
4074 }
4075
4076 static void rtl_pll_power_down(struct rtl8169_private *tp)
4077 {
4078 rtl_generic_op(tp, tp->pll_power_ops.down);
4079 }
4080
4081 static void rtl_pll_power_up(struct rtl8169_private *tp)
4082 {
4083 rtl_generic_op(tp, tp->pll_power_ops.up);
4084 }
4085
4086 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4087 {
4088 struct pll_power_ops *ops = &tp->pll_power_ops;
4089
4090 switch (tp->mac_version) {
4091 case RTL_GIGA_MAC_VER_07:
4092 case RTL_GIGA_MAC_VER_08:
4093 case RTL_GIGA_MAC_VER_09:
4094 case RTL_GIGA_MAC_VER_10:
4095 case RTL_GIGA_MAC_VER_16:
4096 case RTL_GIGA_MAC_VER_29:
4097 case RTL_GIGA_MAC_VER_30:
4098 case RTL_GIGA_MAC_VER_37:
4099 case RTL_GIGA_MAC_VER_39:
4100 ops->down = r810x_pll_power_down;
4101 ops->up = r810x_pll_power_up;
4102 break;
4103
4104 case RTL_GIGA_MAC_VER_11:
4105 case RTL_GIGA_MAC_VER_12:
4106 case RTL_GIGA_MAC_VER_17:
4107 case RTL_GIGA_MAC_VER_18:
4108 case RTL_GIGA_MAC_VER_19:
4109 case RTL_GIGA_MAC_VER_20:
4110 case RTL_GIGA_MAC_VER_21:
4111 case RTL_GIGA_MAC_VER_22:
4112 case RTL_GIGA_MAC_VER_23:
4113 case RTL_GIGA_MAC_VER_24:
4114 case RTL_GIGA_MAC_VER_25:
4115 case RTL_GIGA_MAC_VER_26:
4116 case RTL_GIGA_MAC_VER_27:
4117 case RTL_GIGA_MAC_VER_28:
4118 case RTL_GIGA_MAC_VER_31:
4119 case RTL_GIGA_MAC_VER_32:
4120 case RTL_GIGA_MAC_VER_33:
4121 case RTL_GIGA_MAC_VER_34:
4122 case RTL_GIGA_MAC_VER_35:
4123 case RTL_GIGA_MAC_VER_36:
4124 case RTL_GIGA_MAC_VER_38:
4125 case RTL_GIGA_MAC_VER_40:
4126 case RTL_GIGA_MAC_VER_41:
4127 ops->down = r8168_pll_power_down;
4128 ops->up = r8168_pll_power_up;
4129 break;
4130
4131 default:
4132 ops->down = NULL;
4133 ops->up = NULL;
4134 break;
4135 }
4136 }
4137
4138 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4139 {
4140 void __iomem *ioaddr = tp->mmio_addr;
4141
4142 switch (tp->mac_version) {
4143 case RTL_GIGA_MAC_VER_01:
4144 case RTL_GIGA_MAC_VER_02:
4145 case RTL_GIGA_MAC_VER_03:
4146 case RTL_GIGA_MAC_VER_04:
4147 case RTL_GIGA_MAC_VER_05:
4148 case RTL_GIGA_MAC_VER_06:
4149 case RTL_GIGA_MAC_VER_10:
4150 case RTL_GIGA_MAC_VER_11:
4151 case RTL_GIGA_MAC_VER_12:
4152 case RTL_GIGA_MAC_VER_13:
4153 case RTL_GIGA_MAC_VER_14:
4154 case RTL_GIGA_MAC_VER_15:
4155 case RTL_GIGA_MAC_VER_16:
4156 case RTL_GIGA_MAC_VER_17:
4157 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4158 break;
4159 case RTL_GIGA_MAC_VER_18:
4160 case RTL_GIGA_MAC_VER_19:
4161 case RTL_GIGA_MAC_VER_20:
4162 case RTL_GIGA_MAC_VER_21:
4163 case RTL_GIGA_MAC_VER_22:
4164 case RTL_GIGA_MAC_VER_23:
4165 case RTL_GIGA_MAC_VER_24:
4166 case RTL_GIGA_MAC_VER_34:
4167 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4168 break;
4169 default:
4170 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4171 break;
4172 }
4173 }
4174
4175 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4176 {
4177 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4178 }
4179
4180 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4181 {
4182 void __iomem *ioaddr = tp->mmio_addr;
4183
4184 RTL_W8(Cfg9346, Cfg9346_Unlock);
4185 rtl_generic_op(tp, tp->jumbo_ops.enable);
4186 RTL_W8(Cfg9346, Cfg9346_Lock);
4187 }
4188
4189 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4190 {
4191 void __iomem *ioaddr = tp->mmio_addr;
4192
4193 RTL_W8(Cfg9346, Cfg9346_Unlock);
4194 rtl_generic_op(tp, tp->jumbo_ops.disable);
4195 RTL_W8(Cfg9346, Cfg9346_Lock);
4196 }
4197
4198 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4199 {
4200 void __iomem *ioaddr = tp->mmio_addr;
4201
4202 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4203 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4204 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4205 }
4206
4207 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4208 {
4209 void __iomem *ioaddr = tp->mmio_addr;
4210
4211 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4212 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4213 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4214 }
4215
4216 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4217 {
4218 void __iomem *ioaddr = tp->mmio_addr;
4219
4220 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4221 }
4222
4223 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4224 {
4225 void __iomem *ioaddr = tp->mmio_addr;
4226
4227 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4228 }
4229
4230 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4231 {
4232 void __iomem *ioaddr = tp->mmio_addr;
4233
4234 RTL_W8(MaxTxPacketSize, 0x3f);
4235 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4236 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4237 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4238 }
4239
4240 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4241 {
4242 void __iomem *ioaddr = tp->mmio_addr;
4243
4244 RTL_W8(MaxTxPacketSize, 0x0c);
4245 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4246 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4247 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4248 }
4249
4250 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4251 {
4252 rtl_tx_performance_tweak(tp->pci_dev,
4253 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4254 }
4255
4256 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4257 {
4258 rtl_tx_performance_tweak(tp->pci_dev,
4259 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4260 }
4261
4262 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4263 {
4264 void __iomem *ioaddr = tp->mmio_addr;
4265
4266 r8168b_0_hw_jumbo_enable(tp);
4267
4268 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4269 }
4270
4271 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4272 {
4273 void __iomem *ioaddr = tp->mmio_addr;
4274
4275 r8168b_0_hw_jumbo_disable(tp);
4276
4277 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4278 }
4279
4280 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4281 {
4282 struct jumbo_ops *ops = &tp->jumbo_ops;
4283
4284 switch (tp->mac_version) {
4285 case RTL_GIGA_MAC_VER_11:
4286 ops->disable = r8168b_0_hw_jumbo_disable;
4287 ops->enable = r8168b_0_hw_jumbo_enable;
4288 break;
4289 case RTL_GIGA_MAC_VER_12:
4290 case RTL_GIGA_MAC_VER_17:
4291 ops->disable = r8168b_1_hw_jumbo_disable;
4292 ops->enable = r8168b_1_hw_jumbo_enable;
4293 break;
4294 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4295 case RTL_GIGA_MAC_VER_19:
4296 case RTL_GIGA_MAC_VER_20:
4297 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4298 case RTL_GIGA_MAC_VER_22:
4299 case RTL_GIGA_MAC_VER_23:
4300 case RTL_GIGA_MAC_VER_24:
4301 case RTL_GIGA_MAC_VER_25:
4302 case RTL_GIGA_MAC_VER_26:
4303 ops->disable = r8168c_hw_jumbo_disable;
4304 ops->enable = r8168c_hw_jumbo_enable;
4305 break;
4306 case RTL_GIGA_MAC_VER_27:
4307 case RTL_GIGA_MAC_VER_28:
4308 ops->disable = r8168dp_hw_jumbo_disable;
4309 ops->enable = r8168dp_hw_jumbo_enable;
4310 break;
4311 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4312 case RTL_GIGA_MAC_VER_32:
4313 case RTL_GIGA_MAC_VER_33:
4314 case RTL_GIGA_MAC_VER_34:
4315 ops->disable = r8168e_hw_jumbo_disable;
4316 ops->enable = r8168e_hw_jumbo_enable;
4317 break;
4318
4319 /*
4320 * No action needed for jumbo frames with 8169.
4321 * No jumbo for 810x at all.
4322 */
4323 case RTL_GIGA_MAC_VER_40:
4324 case RTL_GIGA_MAC_VER_41:
4325 default:
4326 ops->disable = NULL;
4327 ops->enable = NULL;
4328 break;
4329 }
4330 }
4331
4332 DECLARE_RTL_COND(rtl_chipcmd_cond)
4333 {
4334 void __iomem *ioaddr = tp->mmio_addr;
4335
4336 return RTL_R8(ChipCmd) & CmdReset;
4337 }
4338
4339 static void rtl_hw_reset(struct rtl8169_private *tp)
4340 {
4341 void __iomem *ioaddr = tp->mmio_addr;
4342
4343 RTL_W8(ChipCmd, CmdReset);
4344
4345 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4346 }
4347
4348 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4349 {
4350 struct rtl_fw *rtl_fw;
4351 const char *name;
4352 int rc = -ENOMEM;
4353
4354 name = rtl_lookup_firmware_name(tp);
4355 if (!name)
4356 goto out_no_firmware;
4357
4358 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4359 if (!rtl_fw)
4360 goto err_warn;
4361
4362 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4363 if (rc < 0)
4364 goto err_free;
4365
4366 rc = rtl_check_firmware(tp, rtl_fw);
4367 if (rc < 0)
4368 goto err_release_firmware;
4369
4370 tp->rtl_fw = rtl_fw;
4371 out:
4372 return;
4373
4374 err_release_firmware:
4375 release_firmware(rtl_fw->fw);
4376 err_free:
4377 kfree(rtl_fw);
4378 err_warn:
4379 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4380 name, rc);
4381 out_no_firmware:
4382 tp->rtl_fw = NULL;
4383 goto out;
4384 }
4385
4386 static void rtl_request_firmware(struct rtl8169_private *tp)
4387 {
4388 if (IS_ERR(tp->rtl_fw))
4389 rtl_request_uncached_firmware(tp);
4390 }
4391
4392 static void rtl_rx_close(struct rtl8169_private *tp)
4393 {
4394 void __iomem *ioaddr = tp->mmio_addr;
4395
4396 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4397 }
4398
4399 DECLARE_RTL_COND(rtl_npq_cond)
4400 {
4401 void __iomem *ioaddr = tp->mmio_addr;
4402
4403 return RTL_R8(TxPoll) & NPQ;
4404 }
4405
4406 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4407 {
4408 void __iomem *ioaddr = tp->mmio_addr;
4409
4410 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4411 }
4412
4413 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4414 {
4415 void __iomem *ioaddr = tp->mmio_addr;
4416
4417 /* Disable interrupts */
4418 rtl8169_irq_mask_and_ack(tp);
4419
4420 rtl_rx_close(tp);
4421
4422 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4423 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4424 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4425 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4426 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4427 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4428 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4429 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4430 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4431 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4432 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4433 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4434 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4435 } else {
4436 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4437 udelay(100);
4438 }
4439
4440 rtl_hw_reset(tp);
4441 }
4442
4443 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4444 {
4445 void __iomem *ioaddr = tp->mmio_addr;
4446
4447 /* Set DMA burst size and Interframe Gap Time */
4448 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4449 (InterFrameGap << TxInterFrameGapShift));
4450 }
4451
4452 static void rtl_hw_start(struct net_device *dev)
4453 {
4454 struct rtl8169_private *tp = netdev_priv(dev);
4455
4456 tp->hw_start(dev);
4457
4458 rtl_irq_enable_all(tp);
4459 }
4460
4461 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4462 void __iomem *ioaddr)
4463 {
4464 /*
4465 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4466 * register to be written before TxDescAddrLow to work.
4467 * Switching from MMIO to I/O access fixes the issue as well.
4468 */
4469 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4470 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4471 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4472 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4473 }
4474
4475 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4476 {
4477 u16 cmd;
4478
4479 cmd = RTL_R16(CPlusCmd);
4480 RTL_W16(CPlusCmd, cmd);
4481 return cmd;
4482 }
4483
4484 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4485 {
4486 /* Low hurts. Let's disable the filtering. */
4487 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4488 }
4489
4490 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4491 {
4492 static const struct rtl_cfg2_info {
4493 u32 mac_version;
4494 u32 clk;
4495 u32 val;
4496 } cfg2_info [] = {
4497 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4498 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4499 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4500 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4501 };
4502 const struct rtl_cfg2_info *p = cfg2_info;
4503 unsigned int i;
4504 u32 clk;
4505
4506 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4507 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4508 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4509 RTL_W32(0x7c, p->val);
4510 break;
4511 }
4512 }
4513 }
4514
4515 static void rtl_set_rx_mode(struct net_device *dev)
4516 {
4517 struct rtl8169_private *tp = netdev_priv(dev);
4518 void __iomem *ioaddr = tp->mmio_addr;
4519 u32 mc_filter[2]; /* Multicast hash filter */
4520 int rx_mode;
4521 u32 tmp = 0;
4522
4523 if (dev->flags & IFF_PROMISC) {
4524 /* Unconditionally log net taps. */
4525 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4526 rx_mode =
4527 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4528 AcceptAllPhys;
4529 mc_filter[1] = mc_filter[0] = 0xffffffff;
4530 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4531 (dev->flags & IFF_ALLMULTI)) {
4532 /* Too many to filter perfectly -- accept all multicasts. */
4533 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4534 mc_filter[1] = mc_filter[0] = 0xffffffff;
4535 } else {
4536 struct netdev_hw_addr *ha;
4537
4538 rx_mode = AcceptBroadcast | AcceptMyPhys;
4539 mc_filter[1] = mc_filter[0] = 0;
4540 netdev_for_each_mc_addr(ha, dev) {
4541 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4542 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4543 rx_mode |= AcceptMulticast;
4544 }
4545 }
4546
4547 if (dev->features & NETIF_F_RXALL)
4548 rx_mode |= (AcceptErr | AcceptRunt);
4549
4550 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4551
4552 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4553 u32 data = mc_filter[0];
4554
4555 mc_filter[0] = swab32(mc_filter[1]);
4556 mc_filter[1] = swab32(data);
4557 }
4558
4559 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4560 mc_filter[1] = mc_filter[0] = 0xffffffff;
4561
4562 RTL_W32(MAR0 + 4, mc_filter[1]);
4563 RTL_W32(MAR0 + 0, mc_filter[0]);
4564
4565 RTL_W32(RxConfig, tmp);
4566 }
4567
4568 static void rtl_hw_start_8169(struct net_device *dev)
4569 {
4570 struct rtl8169_private *tp = netdev_priv(dev);
4571 void __iomem *ioaddr = tp->mmio_addr;
4572 struct pci_dev *pdev = tp->pci_dev;
4573
4574 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4575 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4576 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4577 }
4578
4579 RTL_W8(Cfg9346, Cfg9346_Unlock);
4580 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4581 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4582 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4583 tp->mac_version == RTL_GIGA_MAC_VER_04)
4584 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4585
4586 rtl_init_rxcfg(tp);
4587
4588 RTL_W8(EarlyTxThres, NoEarlyTx);
4589
4590 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4591
4592 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4593 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4594 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4595 tp->mac_version == RTL_GIGA_MAC_VER_04)
4596 rtl_set_rx_tx_config_registers(tp);
4597
4598 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4599
4600 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4601 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4602 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4603 "Bit-3 and bit-14 MUST be 1\n");
4604 tp->cp_cmd |= (1 << 14);
4605 }
4606
4607 RTL_W16(CPlusCmd, tp->cp_cmd);
4608
4609 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4610
4611 /*
4612 * Undocumented corner. Supposedly:
4613 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4614 */
4615 RTL_W16(IntrMitigate, 0x0000);
4616
4617 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4618
4619 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4620 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4621 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4622 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4623 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4624 rtl_set_rx_tx_config_registers(tp);
4625 }
4626
4627 RTL_W8(Cfg9346, Cfg9346_Lock);
4628
4629 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4630 RTL_R8(IntrMask);
4631
4632 RTL_W32(RxMissed, 0);
4633
4634 rtl_set_rx_mode(dev);
4635
4636 /* no early-rx interrupts */
4637 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4638 }
4639
4640 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4641 {
4642 if (tp->csi_ops.write)
4643 tp->csi_ops.write(tp, addr, value);
4644 }
4645
4646 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4647 {
4648 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4649 }
4650
4651 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4652 {
4653 u32 csi;
4654
4655 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4656 rtl_csi_write(tp, 0x070c, csi | bits);
4657 }
4658
4659 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4660 {
4661 rtl_csi_access_enable(tp, 0x17000000);
4662 }
4663
4664 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4665 {
4666 rtl_csi_access_enable(tp, 0x27000000);
4667 }
4668
4669 DECLARE_RTL_COND(rtl_csiar_cond)
4670 {
4671 void __iomem *ioaddr = tp->mmio_addr;
4672
4673 return RTL_R32(CSIAR) & CSIAR_FLAG;
4674 }
4675
4676 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4677 {
4678 void __iomem *ioaddr = tp->mmio_addr;
4679
4680 RTL_W32(CSIDR, value);
4681 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4682 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4683
4684 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4685 }
4686
4687 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4688 {
4689 void __iomem *ioaddr = tp->mmio_addr;
4690
4691 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4692 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4693
4694 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4695 RTL_R32(CSIDR) : ~0;
4696 }
4697
4698 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4699 {
4700 void __iomem *ioaddr = tp->mmio_addr;
4701
4702 RTL_W32(CSIDR, value);
4703 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4704 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4705 CSIAR_FUNC_NIC);
4706
4707 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4708 }
4709
4710 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4711 {
4712 void __iomem *ioaddr = tp->mmio_addr;
4713
4714 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4715 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4716
4717 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4718 RTL_R32(CSIDR) : ~0;
4719 }
4720
4721 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4722 {
4723 struct csi_ops *ops = &tp->csi_ops;
4724
4725 switch (tp->mac_version) {
4726 case RTL_GIGA_MAC_VER_01:
4727 case RTL_GIGA_MAC_VER_02:
4728 case RTL_GIGA_MAC_VER_03:
4729 case RTL_GIGA_MAC_VER_04:
4730 case RTL_GIGA_MAC_VER_05:
4731 case RTL_GIGA_MAC_VER_06:
4732 case RTL_GIGA_MAC_VER_10:
4733 case RTL_GIGA_MAC_VER_11:
4734 case RTL_GIGA_MAC_VER_12:
4735 case RTL_GIGA_MAC_VER_13:
4736 case RTL_GIGA_MAC_VER_14:
4737 case RTL_GIGA_MAC_VER_15:
4738 case RTL_GIGA_MAC_VER_16:
4739 case RTL_GIGA_MAC_VER_17:
4740 ops->write = NULL;
4741 ops->read = NULL;
4742 break;
4743
4744 case RTL_GIGA_MAC_VER_37:
4745 case RTL_GIGA_MAC_VER_38:
4746 ops->write = r8402_csi_write;
4747 ops->read = r8402_csi_read;
4748 break;
4749
4750 default:
4751 ops->write = r8169_csi_write;
4752 ops->read = r8169_csi_read;
4753 break;
4754 }
4755 }
4756
4757 struct ephy_info {
4758 unsigned int offset;
4759 u16 mask;
4760 u16 bits;
4761 };
4762
4763 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4764 int len)
4765 {
4766 u16 w;
4767
4768 while (len-- > 0) {
4769 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4770 rtl_ephy_write(tp, e->offset, w);
4771 e++;
4772 }
4773 }
4774
4775 static void rtl_disable_clock_request(struct pci_dev *pdev)
4776 {
4777 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4778 PCI_EXP_LNKCTL_CLKREQ_EN);
4779 }
4780
4781 static void rtl_enable_clock_request(struct pci_dev *pdev)
4782 {
4783 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4784 PCI_EXP_LNKCTL_CLKREQ_EN);
4785 }
4786
4787 #define R8168_CPCMD_QUIRK_MASK (\
4788 EnableBist | \
4789 Mac_dbgo_oe | \
4790 Force_half_dup | \
4791 Force_rxflow_en | \
4792 Force_txflow_en | \
4793 Cxpl_dbg_sel | \
4794 ASF | \
4795 PktCntrDisable | \
4796 Mac_dbgo_sel)
4797
4798 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4799 {
4800 void __iomem *ioaddr = tp->mmio_addr;
4801 struct pci_dev *pdev = tp->pci_dev;
4802
4803 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4804
4805 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4806
4807 rtl_tx_performance_tweak(pdev,
4808 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4809 }
4810
4811 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4812 {
4813 void __iomem *ioaddr = tp->mmio_addr;
4814
4815 rtl_hw_start_8168bb(tp);
4816
4817 RTL_W8(MaxTxPacketSize, TxPacketMax);
4818
4819 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4820 }
4821
4822 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4823 {
4824 void __iomem *ioaddr = tp->mmio_addr;
4825 struct pci_dev *pdev = tp->pci_dev;
4826
4827 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4828
4829 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4830
4831 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4832
4833 rtl_disable_clock_request(pdev);
4834
4835 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4836 }
4837
4838 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4839 {
4840 static const struct ephy_info e_info_8168cp[] = {
4841 { 0x01, 0, 0x0001 },
4842 { 0x02, 0x0800, 0x1000 },
4843 { 0x03, 0, 0x0042 },
4844 { 0x06, 0x0080, 0x0000 },
4845 { 0x07, 0, 0x2000 }
4846 };
4847
4848 rtl_csi_access_enable_2(tp);
4849
4850 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4851
4852 __rtl_hw_start_8168cp(tp);
4853 }
4854
4855 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4856 {
4857 void __iomem *ioaddr = tp->mmio_addr;
4858 struct pci_dev *pdev = tp->pci_dev;
4859
4860 rtl_csi_access_enable_2(tp);
4861
4862 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4863
4864 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4865
4866 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4867 }
4868
4869 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4870 {
4871 void __iomem *ioaddr = tp->mmio_addr;
4872 struct pci_dev *pdev = tp->pci_dev;
4873
4874 rtl_csi_access_enable_2(tp);
4875
4876 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4877
4878 /* Magic. */
4879 RTL_W8(DBG_REG, 0x20);
4880
4881 RTL_W8(MaxTxPacketSize, TxPacketMax);
4882
4883 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4884
4885 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4886 }
4887
4888 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4889 {
4890 void __iomem *ioaddr = tp->mmio_addr;
4891 static const struct ephy_info e_info_8168c_1[] = {
4892 { 0x02, 0x0800, 0x1000 },
4893 { 0x03, 0, 0x0002 },
4894 { 0x06, 0x0080, 0x0000 }
4895 };
4896
4897 rtl_csi_access_enable_2(tp);
4898
4899 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4900
4901 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4902
4903 __rtl_hw_start_8168cp(tp);
4904 }
4905
4906 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4907 {
4908 static const struct ephy_info e_info_8168c_2[] = {
4909 { 0x01, 0, 0x0001 },
4910 { 0x03, 0x0400, 0x0220 }
4911 };
4912
4913 rtl_csi_access_enable_2(tp);
4914
4915 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4916
4917 __rtl_hw_start_8168cp(tp);
4918 }
4919
4920 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4921 {
4922 rtl_hw_start_8168c_2(tp);
4923 }
4924
4925 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4926 {
4927 rtl_csi_access_enable_2(tp);
4928
4929 __rtl_hw_start_8168cp(tp);
4930 }
4931
4932 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4933 {
4934 void __iomem *ioaddr = tp->mmio_addr;
4935 struct pci_dev *pdev = tp->pci_dev;
4936
4937 rtl_csi_access_enable_2(tp);
4938
4939 rtl_disable_clock_request(pdev);
4940
4941 RTL_W8(MaxTxPacketSize, TxPacketMax);
4942
4943 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4944
4945 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4946 }
4947
4948 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4949 {
4950 void __iomem *ioaddr = tp->mmio_addr;
4951 struct pci_dev *pdev = tp->pci_dev;
4952
4953 rtl_csi_access_enable_1(tp);
4954
4955 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4956
4957 RTL_W8(MaxTxPacketSize, TxPacketMax);
4958
4959 rtl_disable_clock_request(pdev);
4960 }
4961
4962 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4963 {
4964 void __iomem *ioaddr = tp->mmio_addr;
4965 struct pci_dev *pdev = tp->pci_dev;
4966 static const struct ephy_info e_info_8168d_4[] = {
4967 { 0x0b, ~0, 0x48 },
4968 { 0x19, 0x20, 0x50 },
4969 { 0x0c, ~0, 0x20 }
4970 };
4971 int i;
4972
4973 rtl_csi_access_enable_1(tp);
4974
4975 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4976
4977 RTL_W8(MaxTxPacketSize, TxPacketMax);
4978
4979 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4980 const struct ephy_info *e = e_info_8168d_4 + i;
4981 u16 w;
4982
4983 w = rtl_ephy_read(tp, e->offset);
4984 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4985 }
4986
4987 rtl_enable_clock_request(pdev);
4988 }
4989
4990 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4991 {
4992 void __iomem *ioaddr = tp->mmio_addr;
4993 struct pci_dev *pdev = tp->pci_dev;
4994 static const struct ephy_info e_info_8168e_1[] = {
4995 { 0x00, 0x0200, 0x0100 },
4996 { 0x00, 0x0000, 0x0004 },
4997 { 0x06, 0x0002, 0x0001 },
4998 { 0x06, 0x0000, 0x0030 },
4999 { 0x07, 0x0000, 0x2000 },
5000 { 0x00, 0x0000, 0x0020 },
5001 { 0x03, 0x5800, 0x2000 },
5002 { 0x03, 0x0000, 0x0001 },
5003 { 0x01, 0x0800, 0x1000 },
5004 { 0x07, 0x0000, 0x4000 },
5005 { 0x1e, 0x0000, 0x2000 },
5006 { 0x19, 0xffff, 0xfe6c },
5007 { 0x0a, 0x0000, 0x0040 }
5008 };
5009
5010 rtl_csi_access_enable_2(tp);
5011
5012 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5013
5014 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5015
5016 RTL_W8(MaxTxPacketSize, TxPacketMax);
5017
5018 rtl_disable_clock_request(pdev);
5019
5020 /* Reset tx FIFO pointer */
5021 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5022 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5023
5024 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5025 }
5026
5027 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5028 {
5029 void __iomem *ioaddr = tp->mmio_addr;
5030 struct pci_dev *pdev = tp->pci_dev;
5031 static const struct ephy_info e_info_8168e_2[] = {
5032 { 0x09, 0x0000, 0x0080 },
5033 { 0x19, 0x0000, 0x0224 }
5034 };
5035
5036 rtl_csi_access_enable_1(tp);
5037
5038 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5039
5040 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5041
5042 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5043 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5044 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5045 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5046 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5047 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5048 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5049 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5050
5051 RTL_W8(MaxTxPacketSize, EarlySize);
5052
5053 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5054 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5055
5056 /* Adjust EEE LED frequency */
5057 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5058
5059 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5060 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5061 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5062 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5063 }
5064
5065 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5066 {
5067 void __iomem *ioaddr = tp->mmio_addr;
5068 struct pci_dev *pdev = tp->pci_dev;
5069
5070 rtl_csi_access_enable_2(tp);
5071
5072 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5073
5074 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5075 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5076 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5077 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5078 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5079 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5080 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5081 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5082 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5083 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5084
5085 RTL_W8(MaxTxPacketSize, EarlySize);
5086
5087 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5088 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5089 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5090 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
5091 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5092 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5093 }
5094
5095 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5096 {
5097 void __iomem *ioaddr = tp->mmio_addr;
5098 static const struct ephy_info e_info_8168f_1[] = {
5099 { 0x06, 0x00c0, 0x0020 },
5100 { 0x08, 0x0001, 0x0002 },
5101 { 0x09, 0x0000, 0x0080 },
5102 { 0x19, 0x0000, 0x0224 }
5103 };
5104
5105 rtl_hw_start_8168f(tp);
5106
5107 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5108
5109 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5110
5111 /* Adjust EEE LED frequency */
5112 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5113 }
5114
5115 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5116 {
5117 static const struct ephy_info e_info_8168f_1[] = {
5118 { 0x06, 0x00c0, 0x0020 },
5119 { 0x0f, 0xffff, 0x5200 },
5120 { 0x1e, 0x0000, 0x4000 },
5121 { 0x19, 0x0000, 0x0224 }
5122 };
5123
5124 rtl_hw_start_8168f(tp);
5125
5126 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5127
5128 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5129 }
5130
5131 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5132 {
5133 void __iomem *ioaddr = tp->mmio_addr;
5134 struct pci_dev *pdev = tp->pci_dev;
5135
5136 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5137 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5138 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5139 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5140
5141 rtl_csi_access_enable_1(tp);
5142
5143 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5144
5145 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5146 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5147
5148 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5149 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
5150 RTL_W8(MaxTxPacketSize, EarlySize);
5151 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5152 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5153
5154 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5155 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5156
5157 /* Adjust EEE LED frequency */
5158 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5159
5160 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5161 }
5162
5163 static void rtl_hw_start_8168(struct net_device *dev)
5164 {
5165 struct rtl8169_private *tp = netdev_priv(dev);
5166 void __iomem *ioaddr = tp->mmio_addr;
5167
5168 RTL_W8(Cfg9346, Cfg9346_Unlock);
5169
5170 RTL_W8(MaxTxPacketSize, TxPacketMax);
5171
5172 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5173
5174 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5175
5176 RTL_W16(CPlusCmd, tp->cp_cmd);
5177
5178 RTL_W16(IntrMitigate, 0x5151);
5179
5180 /* Work around for RxFIFO overflow. */
5181 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5182 tp->event_slow |= RxFIFOOver | PCSTimeout;
5183 tp->event_slow &= ~RxOverflow;
5184 }
5185
5186 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5187
5188 rtl_set_rx_mode(dev);
5189
5190 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5191 (InterFrameGap << TxInterFrameGapShift));
5192
5193 RTL_R8(IntrMask);
5194
5195 switch (tp->mac_version) {
5196 case RTL_GIGA_MAC_VER_11:
5197 rtl_hw_start_8168bb(tp);
5198 break;
5199
5200 case RTL_GIGA_MAC_VER_12:
5201 case RTL_GIGA_MAC_VER_17:
5202 rtl_hw_start_8168bef(tp);
5203 break;
5204
5205 case RTL_GIGA_MAC_VER_18:
5206 rtl_hw_start_8168cp_1(tp);
5207 break;
5208
5209 case RTL_GIGA_MAC_VER_19:
5210 rtl_hw_start_8168c_1(tp);
5211 break;
5212
5213 case RTL_GIGA_MAC_VER_20:
5214 rtl_hw_start_8168c_2(tp);
5215 break;
5216
5217 case RTL_GIGA_MAC_VER_21:
5218 rtl_hw_start_8168c_3(tp);
5219 break;
5220
5221 case RTL_GIGA_MAC_VER_22:
5222 rtl_hw_start_8168c_4(tp);
5223 break;
5224
5225 case RTL_GIGA_MAC_VER_23:
5226 rtl_hw_start_8168cp_2(tp);
5227 break;
5228
5229 case RTL_GIGA_MAC_VER_24:
5230 rtl_hw_start_8168cp_3(tp);
5231 break;
5232
5233 case RTL_GIGA_MAC_VER_25:
5234 case RTL_GIGA_MAC_VER_26:
5235 case RTL_GIGA_MAC_VER_27:
5236 rtl_hw_start_8168d(tp);
5237 break;
5238
5239 case RTL_GIGA_MAC_VER_28:
5240 rtl_hw_start_8168d_4(tp);
5241 break;
5242
5243 case RTL_GIGA_MAC_VER_31:
5244 rtl_hw_start_8168dp(tp);
5245 break;
5246
5247 case RTL_GIGA_MAC_VER_32:
5248 case RTL_GIGA_MAC_VER_33:
5249 rtl_hw_start_8168e_1(tp);
5250 break;
5251 case RTL_GIGA_MAC_VER_34:
5252 rtl_hw_start_8168e_2(tp);
5253 break;
5254
5255 case RTL_GIGA_MAC_VER_35:
5256 case RTL_GIGA_MAC_VER_36:
5257 rtl_hw_start_8168f_1(tp);
5258 break;
5259
5260 case RTL_GIGA_MAC_VER_38:
5261 rtl_hw_start_8411(tp);
5262 break;
5263
5264 case RTL_GIGA_MAC_VER_40:
5265 case RTL_GIGA_MAC_VER_41:
5266 rtl_hw_start_8168g_1(tp);
5267 break;
5268
5269 default:
5270 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5271 dev->name, tp->mac_version);
5272 break;
5273 }
5274
5275 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5276
5277 RTL_W8(Cfg9346, Cfg9346_Lock);
5278
5279 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5280 }
5281
5282 #define R810X_CPCMD_QUIRK_MASK (\
5283 EnableBist | \
5284 Mac_dbgo_oe | \
5285 Force_half_dup | \
5286 Force_rxflow_en | \
5287 Force_txflow_en | \
5288 Cxpl_dbg_sel | \
5289 ASF | \
5290 PktCntrDisable | \
5291 Mac_dbgo_sel)
5292
5293 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5294 {
5295 void __iomem *ioaddr = tp->mmio_addr;
5296 struct pci_dev *pdev = tp->pci_dev;
5297 static const struct ephy_info e_info_8102e_1[] = {
5298 { 0x01, 0, 0x6e65 },
5299 { 0x02, 0, 0x091f },
5300 { 0x03, 0, 0xc2f9 },
5301 { 0x06, 0, 0xafb5 },
5302 { 0x07, 0, 0x0e00 },
5303 { 0x19, 0, 0xec80 },
5304 { 0x01, 0, 0x2e65 },
5305 { 0x01, 0, 0x6e65 }
5306 };
5307 u8 cfg1;
5308
5309 rtl_csi_access_enable_2(tp);
5310
5311 RTL_W8(DBG_REG, FIX_NAK_1);
5312
5313 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5314
5315 RTL_W8(Config1,
5316 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5317 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5318
5319 cfg1 = RTL_R8(Config1);
5320 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5321 RTL_W8(Config1, cfg1 & ~LEDS0);
5322
5323 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5324 }
5325
5326 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5327 {
5328 void __iomem *ioaddr = tp->mmio_addr;
5329 struct pci_dev *pdev = tp->pci_dev;
5330
5331 rtl_csi_access_enable_2(tp);
5332
5333 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5334
5335 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5336 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5337 }
5338
5339 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5340 {
5341 rtl_hw_start_8102e_2(tp);
5342
5343 rtl_ephy_write(tp, 0x03, 0xc2f9);
5344 }
5345
5346 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5347 {
5348 void __iomem *ioaddr = tp->mmio_addr;
5349 static const struct ephy_info e_info_8105e_1[] = {
5350 { 0x07, 0, 0x4000 },
5351 { 0x19, 0, 0x0200 },
5352 { 0x19, 0, 0x0020 },
5353 { 0x1e, 0, 0x2000 },
5354 { 0x03, 0, 0x0001 },
5355 { 0x19, 0, 0x0100 },
5356 { 0x19, 0, 0x0004 },
5357 { 0x0a, 0, 0x0020 }
5358 };
5359
5360 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5361 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5362
5363 /* Disable Early Tally Counter */
5364 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5365
5366 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5367 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5368 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5369 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5370 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5371
5372 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5373 }
5374
5375 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5376 {
5377 rtl_hw_start_8105e_1(tp);
5378 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5379 }
5380
5381 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5382 {
5383 void __iomem *ioaddr = tp->mmio_addr;
5384 static const struct ephy_info e_info_8402[] = {
5385 { 0x19, 0xffff, 0xff64 },
5386 { 0x1e, 0, 0x4000 }
5387 };
5388
5389 rtl_csi_access_enable_2(tp);
5390
5391 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5392 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5393
5394 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5395 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5396 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5397 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5398 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5399
5400 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5401
5402 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5403
5404 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5405 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5406 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5407 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5408 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5409 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5410 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5411 }
5412
5413 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5414 {
5415 void __iomem *ioaddr = tp->mmio_addr;
5416
5417 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5418 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5419
5420 RTL_W32(MISC,
5421 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5422 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5423 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5424 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5425 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5426 }
5427
5428 static void rtl_hw_start_8101(struct net_device *dev)
5429 {
5430 struct rtl8169_private *tp = netdev_priv(dev);
5431 void __iomem *ioaddr = tp->mmio_addr;
5432 struct pci_dev *pdev = tp->pci_dev;
5433
5434 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5435 tp->event_slow &= ~RxFIFOOver;
5436
5437 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5438 tp->mac_version == RTL_GIGA_MAC_VER_16)
5439 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5440 PCI_EXP_DEVCTL_NOSNOOP_EN);
5441
5442 RTL_W8(Cfg9346, Cfg9346_Unlock);
5443
5444 switch (tp->mac_version) {
5445 case RTL_GIGA_MAC_VER_07:
5446 rtl_hw_start_8102e_1(tp);
5447 break;
5448
5449 case RTL_GIGA_MAC_VER_08:
5450 rtl_hw_start_8102e_3(tp);
5451 break;
5452
5453 case RTL_GIGA_MAC_VER_09:
5454 rtl_hw_start_8102e_2(tp);
5455 break;
5456
5457 case RTL_GIGA_MAC_VER_29:
5458 rtl_hw_start_8105e_1(tp);
5459 break;
5460 case RTL_GIGA_MAC_VER_30:
5461 rtl_hw_start_8105e_2(tp);
5462 break;
5463
5464 case RTL_GIGA_MAC_VER_37:
5465 rtl_hw_start_8402(tp);
5466 break;
5467
5468 case RTL_GIGA_MAC_VER_39:
5469 rtl_hw_start_8106(tp);
5470 break;
5471 }
5472
5473 RTL_W8(Cfg9346, Cfg9346_Lock);
5474
5475 RTL_W8(MaxTxPacketSize, TxPacketMax);
5476
5477 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5478
5479 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5480 RTL_W16(CPlusCmd, tp->cp_cmd);
5481
5482 RTL_W16(IntrMitigate, 0x0000);
5483
5484 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5485
5486 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5487 rtl_set_rx_tx_config_registers(tp);
5488
5489 RTL_R8(IntrMask);
5490
5491 rtl_set_rx_mode(dev);
5492
5493 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5494 }
5495
5496 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5497 {
5498 struct rtl8169_private *tp = netdev_priv(dev);
5499
5500 if (new_mtu < ETH_ZLEN ||
5501 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5502 return -EINVAL;
5503
5504 if (new_mtu > ETH_DATA_LEN)
5505 rtl_hw_jumbo_enable(tp);
5506 else
5507 rtl_hw_jumbo_disable(tp);
5508
5509 dev->mtu = new_mtu;
5510 netdev_update_features(dev);
5511
5512 return 0;
5513 }
5514
5515 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5516 {
5517 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5518 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5519 }
5520
5521 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5522 void **data_buff, struct RxDesc *desc)
5523 {
5524 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5525 DMA_FROM_DEVICE);
5526
5527 kfree(*data_buff);
5528 *data_buff = NULL;
5529 rtl8169_make_unusable_by_asic(desc);
5530 }
5531
5532 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5533 {
5534 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5535
5536 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5537 }
5538
5539 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5540 u32 rx_buf_sz)
5541 {
5542 desc->addr = cpu_to_le64(mapping);
5543 wmb();
5544 rtl8169_mark_to_asic(desc, rx_buf_sz);
5545 }
5546
5547 static inline void *rtl8169_align(void *data)
5548 {
5549 return (void *)ALIGN((long)data, 16);
5550 }
5551
5552 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5553 struct RxDesc *desc)
5554 {
5555 void *data;
5556 dma_addr_t mapping;
5557 struct device *d = &tp->pci_dev->dev;
5558 struct net_device *dev = tp->dev;
5559 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5560
5561 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5562 if (!data)
5563 return NULL;
5564
5565 if (rtl8169_align(data) != data) {
5566 kfree(data);
5567 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5568 if (!data)
5569 return NULL;
5570 }
5571
5572 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5573 DMA_FROM_DEVICE);
5574 if (unlikely(dma_mapping_error(d, mapping))) {
5575 if (net_ratelimit())
5576 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5577 goto err_out;
5578 }
5579
5580 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5581 return data;
5582
5583 err_out:
5584 kfree(data);
5585 return NULL;
5586 }
5587
5588 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5589 {
5590 unsigned int i;
5591
5592 for (i = 0; i < NUM_RX_DESC; i++) {
5593 if (tp->Rx_databuff[i]) {
5594 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5595 tp->RxDescArray + i);
5596 }
5597 }
5598 }
5599
5600 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5601 {
5602 desc->opts1 |= cpu_to_le32(RingEnd);
5603 }
5604
5605 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5606 {
5607 unsigned int i;
5608
5609 for (i = 0; i < NUM_RX_DESC; i++) {
5610 void *data;
5611
5612 if (tp->Rx_databuff[i])
5613 continue;
5614
5615 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5616 if (!data) {
5617 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5618 goto err_out;
5619 }
5620 tp->Rx_databuff[i] = data;
5621 }
5622
5623 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5624 return 0;
5625
5626 err_out:
5627 rtl8169_rx_clear(tp);
5628 return -ENOMEM;
5629 }
5630
5631 static int rtl8169_init_ring(struct net_device *dev)
5632 {
5633 struct rtl8169_private *tp = netdev_priv(dev);
5634
5635 rtl8169_init_ring_indexes(tp);
5636
5637 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5638 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5639
5640 return rtl8169_rx_fill(tp);
5641 }
5642
5643 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5644 struct TxDesc *desc)
5645 {
5646 unsigned int len = tx_skb->len;
5647
5648 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5649
5650 desc->opts1 = 0x00;
5651 desc->opts2 = 0x00;
5652 desc->addr = 0x00;
5653 tx_skb->len = 0;
5654 }
5655
5656 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5657 unsigned int n)
5658 {
5659 unsigned int i;
5660
5661 for (i = 0; i < n; i++) {
5662 unsigned int entry = (start + i) % NUM_TX_DESC;
5663 struct ring_info *tx_skb = tp->tx_skb + entry;
5664 unsigned int len = tx_skb->len;
5665
5666 if (len) {
5667 struct sk_buff *skb = tx_skb->skb;
5668
5669 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5670 tp->TxDescArray + entry);
5671 if (skb) {
5672 tp->dev->stats.tx_dropped++;
5673 dev_kfree_skb(skb);
5674 tx_skb->skb = NULL;
5675 }
5676 }
5677 }
5678 }
5679
5680 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5681 {
5682 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5683 tp->cur_tx = tp->dirty_tx = 0;
5684 }
5685
5686 static void rtl_reset_work(struct rtl8169_private *tp)
5687 {
5688 struct net_device *dev = tp->dev;
5689 int i;
5690
5691 napi_disable(&tp->napi);
5692 netif_stop_queue(dev);
5693 synchronize_sched();
5694
5695 rtl8169_hw_reset(tp);
5696
5697 for (i = 0; i < NUM_RX_DESC; i++)
5698 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5699
5700 rtl8169_tx_clear(tp);
5701 rtl8169_init_ring_indexes(tp);
5702
5703 napi_enable(&tp->napi);
5704 rtl_hw_start(dev);
5705 netif_wake_queue(dev);
5706 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5707 }
5708
5709 static void rtl8169_tx_timeout(struct net_device *dev)
5710 {
5711 struct rtl8169_private *tp = netdev_priv(dev);
5712
5713 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5714 }
5715
5716 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5717 u32 *opts)
5718 {
5719 struct skb_shared_info *info = skb_shinfo(skb);
5720 unsigned int cur_frag, entry;
5721 struct TxDesc * uninitialized_var(txd);
5722 struct device *d = &tp->pci_dev->dev;
5723
5724 entry = tp->cur_tx;
5725 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5726 const skb_frag_t *frag = info->frags + cur_frag;
5727 dma_addr_t mapping;
5728 u32 status, len;
5729 void *addr;
5730
5731 entry = (entry + 1) % NUM_TX_DESC;
5732
5733 txd = tp->TxDescArray + entry;
5734 len = skb_frag_size(frag);
5735 addr = skb_frag_address(frag);
5736 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5737 if (unlikely(dma_mapping_error(d, mapping))) {
5738 if (net_ratelimit())
5739 netif_err(tp, drv, tp->dev,
5740 "Failed to map TX fragments DMA!\n");
5741 goto err_out;
5742 }
5743
5744 /* Anti gcc 2.95.3 bugware (sic) */
5745 status = opts[0] | len |
5746 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5747
5748 txd->opts1 = cpu_to_le32(status);
5749 txd->opts2 = cpu_to_le32(opts[1]);
5750 txd->addr = cpu_to_le64(mapping);
5751
5752 tp->tx_skb[entry].len = len;
5753 }
5754
5755 if (cur_frag) {
5756 tp->tx_skb[entry].skb = skb;
5757 txd->opts1 |= cpu_to_le32(LastFrag);
5758 }
5759
5760 return cur_frag;
5761
5762 err_out:
5763 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5764 return -EIO;
5765 }
5766
5767 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5768 struct sk_buff *skb, u32 *opts)
5769 {
5770 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5771 u32 mss = skb_shinfo(skb)->gso_size;
5772 int offset = info->opts_offset;
5773
5774 if (mss) {
5775 opts[0] |= TD_LSO;
5776 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5777 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5778 const struct iphdr *ip = ip_hdr(skb);
5779
5780 if (ip->protocol == IPPROTO_TCP)
5781 opts[offset] |= info->checksum.tcp;
5782 else if (ip->protocol == IPPROTO_UDP)
5783 opts[offset] |= info->checksum.udp;
5784 else
5785 WARN_ON_ONCE(1);
5786 }
5787 }
5788
5789 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5790 struct net_device *dev)
5791 {
5792 struct rtl8169_private *tp = netdev_priv(dev);
5793 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5794 struct TxDesc *txd = tp->TxDescArray + entry;
5795 void __iomem *ioaddr = tp->mmio_addr;
5796 struct device *d = &tp->pci_dev->dev;
5797 dma_addr_t mapping;
5798 u32 status, len;
5799 u32 opts[2];
5800 int frags;
5801
5802 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5803 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5804 goto err_stop_0;
5805 }
5806
5807 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5808 goto err_stop_0;
5809
5810 len = skb_headlen(skb);
5811 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5812 if (unlikely(dma_mapping_error(d, mapping))) {
5813 if (net_ratelimit())
5814 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5815 goto err_dma_0;
5816 }
5817
5818 tp->tx_skb[entry].len = len;
5819 txd->addr = cpu_to_le64(mapping);
5820
5821 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5822 opts[0] = DescOwn;
5823
5824 rtl8169_tso_csum(tp, skb, opts);
5825
5826 frags = rtl8169_xmit_frags(tp, skb, opts);
5827 if (frags < 0)
5828 goto err_dma_1;
5829 else if (frags)
5830 opts[0] |= FirstFrag;
5831 else {
5832 opts[0] |= FirstFrag | LastFrag;
5833 tp->tx_skb[entry].skb = skb;
5834 }
5835
5836 txd->opts2 = cpu_to_le32(opts[1]);
5837
5838 skb_tx_timestamp(skb);
5839
5840 wmb();
5841
5842 /* Anti gcc 2.95.3 bugware (sic) */
5843 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5844 txd->opts1 = cpu_to_le32(status);
5845
5846 tp->cur_tx += frags + 1;
5847
5848 wmb();
5849
5850 RTL_W8(TxPoll, NPQ);
5851
5852 mmiowb();
5853
5854 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5855 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5856 * not miss a ring update when it notices a stopped queue.
5857 */
5858 smp_wmb();
5859 netif_stop_queue(dev);
5860 /* Sync with rtl_tx:
5861 * - publish queue status and cur_tx ring index (write barrier)
5862 * - refresh dirty_tx ring index (read barrier).
5863 * May the current thread have a pessimistic view of the ring
5864 * status and forget to wake up queue, a racing rtl_tx thread
5865 * can't.
5866 */
5867 smp_mb();
5868 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5869 netif_wake_queue(dev);
5870 }
5871
5872 return NETDEV_TX_OK;
5873
5874 err_dma_1:
5875 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5876 err_dma_0:
5877 dev_kfree_skb(skb);
5878 dev->stats.tx_dropped++;
5879 return NETDEV_TX_OK;
5880
5881 err_stop_0:
5882 netif_stop_queue(dev);
5883 dev->stats.tx_dropped++;
5884 return NETDEV_TX_BUSY;
5885 }
5886
5887 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5888 {
5889 struct rtl8169_private *tp = netdev_priv(dev);
5890 struct pci_dev *pdev = tp->pci_dev;
5891 u16 pci_status, pci_cmd;
5892
5893 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5894 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5895
5896 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5897 pci_cmd, pci_status);
5898
5899 /*
5900 * The recovery sequence below admits a very elaborated explanation:
5901 * - it seems to work;
5902 * - I did not see what else could be done;
5903 * - it makes iop3xx happy.
5904 *
5905 * Feel free to adjust to your needs.
5906 */
5907 if (pdev->broken_parity_status)
5908 pci_cmd &= ~PCI_COMMAND_PARITY;
5909 else
5910 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5911
5912 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5913
5914 pci_write_config_word(pdev, PCI_STATUS,
5915 pci_status & (PCI_STATUS_DETECTED_PARITY |
5916 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5917 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5918
5919 /* The infamous DAC f*ckup only happens at boot time */
5920 if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
5921 void __iomem *ioaddr = tp->mmio_addr;
5922
5923 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5924 tp->cp_cmd &= ~PCIDAC;
5925 RTL_W16(CPlusCmd, tp->cp_cmd);
5926 dev->features &= ~NETIF_F_HIGHDMA;
5927 }
5928
5929 rtl8169_hw_reset(tp);
5930
5931 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5932 }
5933
5934 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5935 {
5936 unsigned int dirty_tx, tx_left;
5937
5938 dirty_tx = tp->dirty_tx;
5939 smp_rmb();
5940 tx_left = tp->cur_tx - dirty_tx;
5941
5942 while (tx_left > 0) {
5943 unsigned int entry = dirty_tx % NUM_TX_DESC;
5944 struct ring_info *tx_skb = tp->tx_skb + entry;
5945 u32 status;
5946
5947 rmb();
5948 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5949 if (status & DescOwn)
5950 break;
5951
5952 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5953 tp->TxDescArray + entry);
5954 if (status & LastFrag) {
5955 u64_stats_update_begin(&tp->tx_stats.syncp);
5956 tp->tx_stats.packets++;
5957 tp->tx_stats.bytes += tx_skb->skb->len;
5958 u64_stats_update_end(&tp->tx_stats.syncp);
5959 dev_kfree_skb(tx_skb->skb);
5960 tx_skb->skb = NULL;
5961 }
5962 dirty_tx++;
5963 tx_left--;
5964 }
5965
5966 if (tp->dirty_tx != dirty_tx) {
5967 tp->dirty_tx = dirty_tx;
5968 /* Sync with rtl8169_start_xmit:
5969 * - publish dirty_tx ring index (write barrier)
5970 * - refresh cur_tx ring index and queue status (read barrier)
5971 * May the current thread miss the stopped queue condition,
5972 * a racing xmit thread can only have a right view of the
5973 * ring status.
5974 */
5975 smp_mb();
5976 if (netif_queue_stopped(dev) &&
5977 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5978 netif_wake_queue(dev);
5979 }
5980 /*
5981 * 8168 hack: TxPoll requests are lost when the Tx packets are
5982 * too close. Let's kick an extra TxPoll request when a burst
5983 * of start_xmit activity is detected (if it is not detected,
5984 * it is slow enough). -- FR
5985 */
5986 if (tp->cur_tx != dirty_tx) {
5987 void __iomem *ioaddr = tp->mmio_addr;
5988
5989 RTL_W8(TxPoll, NPQ);
5990 }
5991 }
5992 }
5993
5994 static inline int rtl8169_fragmented_frame(u32 status)
5995 {
5996 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5997 }
5998
5999 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
6000 {
6001 u32 status = opts1 & RxProtoMask;
6002
6003 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
6004 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
6005 skb->ip_summed = CHECKSUM_UNNECESSARY;
6006 else
6007 skb_checksum_none_assert(skb);
6008 }
6009
6010 static struct sk_buff *rtl8169_try_rx_copy(void *data,
6011 struct rtl8169_private *tp,
6012 int pkt_size,
6013 dma_addr_t addr)
6014 {
6015 struct sk_buff *skb;
6016 struct device *d = &tp->pci_dev->dev;
6017
6018 data = rtl8169_align(data);
6019 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6020 prefetch(data);
6021 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6022 if (skb)
6023 memcpy(skb->data, data, pkt_size);
6024 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6025
6026 return skb;
6027 }
6028
6029 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6030 {
6031 unsigned int cur_rx, rx_left;
6032 unsigned int count;
6033
6034 cur_rx = tp->cur_rx;
6035
6036 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
6037 unsigned int entry = cur_rx % NUM_RX_DESC;
6038 struct RxDesc *desc = tp->RxDescArray + entry;
6039 u32 status;
6040
6041 rmb();
6042 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6043
6044 if (status & DescOwn)
6045 break;
6046 if (unlikely(status & RxRES)) {
6047 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6048 status);
6049 dev->stats.rx_errors++;
6050 if (status & (RxRWT | RxRUNT))
6051 dev->stats.rx_length_errors++;
6052 if (status & RxCRC)
6053 dev->stats.rx_crc_errors++;
6054 if (status & RxFOVF) {
6055 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6056 dev->stats.rx_fifo_errors++;
6057 }
6058 if ((status & (RxRUNT | RxCRC)) &&
6059 !(status & (RxRWT | RxFOVF)) &&
6060 (dev->features & NETIF_F_RXALL))
6061 goto process_pkt;
6062 } else {
6063 struct sk_buff *skb;
6064 dma_addr_t addr;
6065 int pkt_size;
6066
6067 process_pkt:
6068 addr = le64_to_cpu(desc->addr);
6069 if (likely(!(dev->features & NETIF_F_RXFCS)))
6070 pkt_size = (status & 0x00003fff) - 4;
6071 else
6072 pkt_size = status & 0x00003fff;
6073
6074 /*
6075 * The driver does not support incoming fragmented
6076 * frames. They are seen as a symptom of over-mtu
6077 * sized frames.
6078 */
6079 if (unlikely(rtl8169_fragmented_frame(status))) {
6080 dev->stats.rx_dropped++;
6081 dev->stats.rx_length_errors++;
6082 goto release_descriptor;
6083 }
6084
6085 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6086 tp, pkt_size, addr);
6087 if (!skb) {
6088 dev->stats.rx_dropped++;
6089 goto release_descriptor;
6090 }
6091
6092 rtl8169_rx_csum(skb, status);
6093 skb_put(skb, pkt_size);
6094 skb->protocol = eth_type_trans(skb, dev);
6095
6096 rtl8169_rx_vlan_tag(desc, skb);
6097
6098 napi_gro_receive(&tp->napi, skb);
6099
6100 u64_stats_update_begin(&tp->rx_stats.syncp);
6101 tp->rx_stats.packets++;
6102 tp->rx_stats.bytes += pkt_size;
6103 u64_stats_update_end(&tp->rx_stats.syncp);
6104 }
6105 release_descriptor:
6106 desc->opts2 = 0;
6107 wmb();
6108 rtl8169_mark_to_asic(desc, rx_buf_sz);
6109 }
6110
6111 count = cur_rx - tp->cur_rx;
6112 tp->cur_rx = cur_rx;
6113
6114 return count;
6115 }
6116
6117 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6118 {
6119 struct net_device *dev = dev_instance;
6120 struct rtl8169_private *tp = netdev_priv(dev);
6121 int handled = 0;
6122 u16 status;
6123
6124 status = rtl_get_events(tp);
6125 if (status && status != 0xffff) {
6126 status &= RTL_EVENT_NAPI | tp->event_slow;
6127 if (status) {
6128 handled = 1;
6129
6130 rtl_irq_disable(tp);
6131 napi_schedule(&tp->napi);
6132 }
6133 }
6134 return IRQ_RETVAL(handled);
6135 }
6136
6137 /*
6138 * Workqueue context.
6139 */
6140 static void rtl_slow_event_work(struct rtl8169_private *tp)
6141 {
6142 struct net_device *dev = tp->dev;
6143 u16 status;
6144
6145 status = rtl_get_events(tp) & tp->event_slow;
6146 rtl_ack_events(tp, status);
6147
6148 if (unlikely(status & RxFIFOOver)) {
6149 switch (tp->mac_version) {
6150 /* Work around for rx fifo overflow */
6151 case RTL_GIGA_MAC_VER_11:
6152 netif_stop_queue(dev);
6153 /* XXX - Hack alert. See rtl_task(). */
6154 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6155 default:
6156 break;
6157 }
6158 }
6159
6160 if (unlikely(status & SYSErr))
6161 rtl8169_pcierr_interrupt(dev);
6162
6163 if (status & LinkChg)
6164 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6165
6166 rtl_irq_enable_all(tp);
6167 }
6168
6169 static void rtl_task(struct work_struct *work)
6170 {
6171 static const struct {
6172 int bitnr;
6173 void (*action)(struct rtl8169_private *);
6174 } rtl_work[] = {
6175 /* XXX - keep rtl_slow_event_work() as first element. */
6176 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6177 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6178 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6179 };
6180 struct rtl8169_private *tp =
6181 container_of(work, struct rtl8169_private, wk.work);
6182 struct net_device *dev = tp->dev;
6183 int i;
6184
6185 rtl_lock_work(tp);
6186
6187 if (!netif_running(dev) ||
6188 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6189 goto out_unlock;
6190
6191 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6192 bool pending;
6193
6194 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6195 if (pending)
6196 rtl_work[i].action(tp);
6197 }
6198
6199 out_unlock:
6200 rtl_unlock_work(tp);
6201 }
6202
6203 static int rtl8169_poll(struct napi_struct *napi, int budget)
6204 {
6205 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6206 struct net_device *dev = tp->dev;
6207 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6208 int work_done= 0;
6209 u16 status;
6210
6211 status = rtl_get_events(tp);
6212 rtl_ack_events(tp, status & ~tp->event_slow);
6213
6214 if (status & RTL_EVENT_NAPI_RX)
6215 work_done = rtl_rx(dev, tp, (u32) budget);
6216
6217 if (status & RTL_EVENT_NAPI_TX)
6218 rtl_tx(dev, tp);
6219
6220 if (status & tp->event_slow) {
6221 enable_mask &= ~tp->event_slow;
6222
6223 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6224 }
6225
6226 if (work_done < budget) {
6227 napi_complete(napi);
6228
6229 rtl_irq_enable(tp, enable_mask);
6230 mmiowb();
6231 }
6232
6233 return work_done;
6234 }
6235
6236 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6237 {
6238 struct rtl8169_private *tp = netdev_priv(dev);
6239
6240 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6241 return;
6242
6243 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6244 RTL_W32(RxMissed, 0);
6245 }
6246
6247 static void rtl8169_down(struct net_device *dev)
6248 {
6249 struct rtl8169_private *tp = netdev_priv(dev);
6250 void __iomem *ioaddr = tp->mmio_addr;
6251
6252 del_timer_sync(&tp->timer);
6253
6254 napi_disable(&tp->napi);
6255 netif_stop_queue(dev);
6256
6257 rtl8169_hw_reset(tp);
6258 /*
6259 * At this point device interrupts can not be enabled in any function,
6260 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6261 * and napi is disabled (rtl8169_poll).
6262 */
6263 rtl8169_rx_missed(dev, ioaddr);
6264
6265 /* Give a racing hard_start_xmit a few cycles to complete. */
6266 synchronize_sched();
6267
6268 rtl8169_tx_clear(tp);
6269
6270 rtl8169_rx_clear(tp);
6271
6272 rtl_pll_power_down(tp);
6273 }
6274
6275 static int rtl8169_close(struct net_device *dev)
6276 {
6277 struct rtl8169_private *tp = netdev_priv(dev);
6278 struct pci_dev *pdev = tp->pci_dev;
6279
6280 pm_runtime_get_sync(&pdev->dev);
6281
6282 /* Update counters before going down */
6283 rtl8169_update_counters(dev);
6284
6285 rtl_lock_work(tp);
6286 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6287
6288 rtl8169_down(dev);
6289 rtl_unlock_work(tp);
6290
6291 free_irq(pdev->irq, dev);
6292
6293 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6294 tp->RxPhyAddr);
6295 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6296 tp->TxPhyAddr);
6297 tp->TxDescArray = NULL;
6298 tp->RxDescArray = NULL;
6299
6300 pm_runtime_put_sync(&pdev->dev);
6301
6302 return 0;
6303 }
6304
6305 #ifdef CONFIG_NET_POLL_CONTROLLER
6306 static void rtl8169_netpoll(struct net_device *dev)
6307 {
6308 struct rtl8169_private *tp = netdev_priv(dev);
6309
6310 rtl8169_interrupt(tp->pci_dev->irq, dev);
6311 }
6312 #endif
6313
6314 static int rtl_open(struct net_device *dev)
6315 {
6316 struct rtl8169_private *tp = netdev_priv(dev);
6317 void __iomem *ioaddr = tp->mmio_addr;
6318 struct pci_dev *pdev = tp->pci_dev;
6319 int retval = -ENOMEM;
6320
6321 pm_runtime_get_sync(&pdev->dev);
6322
6323 /*
6324 * Rx and Tx descriptors needs 256 bytes alignment.
6325 * dma_alloc_coherent provides more.
6326 */
6327 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6328 &tp->TxPhyAddr, GFP_KERNEL);
6329 if (!tp->TxDescArray)
6330 goto err_pm_runtime_put;
6331
6332 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6333 &tp->RxPhyAddr, GFP_KERNEL);
6334 if (!tp->RxDescArray)
6335 goto err_free_tx_0;
6336
6337 retval = rtl8169_init_ring(dev);
6338 if (retval < 0)
6339 goto err_free_rx_1;
6340
6341 INIT_WORK(&tp->wk.work, rtl_task);
6342
6343 smp_mb();
6344
6345 rtl_request_firmware(tp);
6346
6347 retval = request_irq(pdev->irq, rtl8169_interrupt,
6348 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6349 dev->name, dev);
6350 if (retval < 0)
6351 goto err_release_fw_2;
6352
6353 rtl_lock_work(tp);
6354
6355 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6356
6357 napi_enable(&tp->napi);
6358
6359 rtl8169_init_phy(dev, tp);
6360
6361 __rtl8169_set_features(dev, dev->features);
6362
6363 rtl_pll_power_up(tp);
6364
6365 rtl_hw_start(dev);
6366
6367 netif_start_queue(dev);
6368
6369 rtl_unlock_work(tp);
6370
6371 tp->saved_wolopts = 0;
6372 pm_runtime_put_noidle(&pdev->dev);
6373
6374 rtl8169_check_link_status(dev, tp, ioaddr);
6375 out:
6376 return retval;
6377
6378 err_release_fw_2:
6379 rtl_release_firmware(tp);
6380 rtl8169_rx_clear(tp);
6381 err_free_rx_1:
6382 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6383 tp->RxPhyAddr);
6384 tp->RxDescArray = NULL;
6385 err_free_tx_0:
6386 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6387 tp->TxPhyAddr);
6388 tp->TxDescArray = NULL;
6389 err_pm_runtime_put:
6390 pm_runtime_put_noidle(&pdev->dev);
6391 goto out;
6392 }
6393
6394 static struct rtnl_link_stats64 *
6395 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6396 {
6397 struct rtl8169_private *tp = netdev_priv(dev);
6398 void __iomem *ioaddr = tp->mmio_addr;
6399 unsigned int start;
6400
6401 if (netif_running(dev))
6402 rtl8169_rx_missed(dev, ioaddr);
6403
6404 do {
6405 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6406 stats->rx_packets = tp->rx_stats.packets;
6407 stats->rx_bytes = tp->rx_stats.bytes;
6408 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6409
6410
6411 do {
6412 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6413 stats->tx_packets = tp->tx_stats.packets;
6414 stats->tx_bytes = tp->tx_stats.bytes;
6415 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6416
6417 stats->rx_dropped = dev->stats.rx_dropped;
6418 stats->tx_dropped = dev->stats.tx_dropped;
6419 stats->rx_length_errors = dev->stats.rx_length_errors;
6420 stats->rx_errors = dev->stats.rx_errors;
6421 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6422 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6423 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6424
6425 return stats;
6426 }
6427
6428 static void rtl8169_net_suspend(struct net_device *dev)
6429 {
6430 struct rtl8169_private *tp = netdev_priv(dev);
6431
6432 if (!netif_running(dev))
6433 return;
6434
6435 netif_device_detach(dev);
6436 netif_stop_queue(dev);
6437
6438 rtl_lock_work(tp);
6439 napi_disable(&tp->napi);
6440 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6441 rtl_unlock_work(tp);
6442
6443 rtl_pll_power_down(tp);
6444 }
6445
6446 #ifdef CONFIG_PM
6447
6448 static int rtl8169_suspend(struct device *device)
6449 {
6450 struct pci_dev *pdev = to_pci_dev(device);
6451 struct net_device *dev = pci_get_drvdata(pdev);
6452
6453 rtl8169_net_suspend(dev);
6454
6455 return 0;
6456 }
6457
6458 static void __rtl8169_resume(struct net_device *dev)
6459 {
6460 struct rtl8169_private *tp = netdev_priv(dev);
6461
6462 netif_device_attach(dev);
6463
6464 rtl_pll_power_up(tp);
6465
6466 rtl_lock_work(tp);
6467 napi_enable(&tp->napi);
6468 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6469 rtl_unlock_work(tp);
6470
6471 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6472 }
6473
6474 static int rtl8169_resume(struct device *device)
6475 {
6476 struct pci_dev *pdev = to_pci_dev(device);
6477 struct net_device *dev = pci_get_drvdata(pdev);
6478 struct rtl8169_private *tp = netdev_priv(dev);
6479
6480 rtl8169_init_phy(dev, tp);
6481
6482 if (netif_running(dev))
6483 __rtl8169_resume(dev);
6484
6485 return 0;
6486 }
6487
6488 static int rtl8169_runtime_suspend(struct device *device)
6489 {
6490 struct pci_dev *pdev = to_pci_dev(device);
6491 struct net_device *dev = pci_get_drvdata(pdev);
6492 struct rtl8169_private *tp = netdev_priv(dev);
6493
6494 if (!tp->TxDescArray)
6495 return 0;
6496
6497 rtl_lock_work(tp);
6498 tp->saved_wolopts = __rtl8169_get_wol(tp);
6499 __rtl8169_set_wol(tp, WAKE_ANY);
6500 rtl_unlock_work(tp);
6501
6502 rtl8169_net_suspend(dev);
6503
6504 return 0;
6505 }
6506
6507 static int rtl8169_runtime_resume(struct device *device)
6508 {
6509 struct pci_dev *pdev = to_pci_dev(device);
6510 struct net_device *dev = pci_get_drvdata(pdev);
6511 struct rtl8169_private *tp = netdev_priv(dev);
6512
6513 if (!tp->TxDescArray)
6514 return 0;
6515
6516 rtl_lock_work(tp);
6517 __rtl8169_set_wol(tp, tp->saved_wolopts);
6518 tp->saved_wolopts = 0;
6519 rtl_unlock_work(tp);
6520
6521 rtl8169_init_phy(dev, tp);
6522
6523 __rtl8169_resume(dev);
6524
6525 return 0;
6526 }
6527
6528 static int rtl8169_runtime_idle(struct device *device)
6529 {
6530 struct pci_dev *pdev = to_pci_dev(device);
6531 struct net_device *dev = pci_get_drvdata(pdev);
6532 struct rtl8169_private *tp = netdev_priv(dev);
6533
6534 return tp->TxDescArray ? -EBUSY : 0;
6535 }
6536
6537 static const struct dev_pm_ops rtl8169_pm_ops = {
6538 .suspend = rtl8169_suspend,
6539 .resume = rtl8169_resume,
6540 .freeze = rtl8169_suspend,
6541 .thaw = rtl8169_resume,
6542 .poweroff = rtl8169_suspend,
6543 .restore = rtl8169_resume,
6544 .runtime_suspend = rtl8169_runtime_suspend,
6545 .runtime_resume = rtl8169_runtime_resume,
6546 .runtime_idle = rtl8169_runtime_idle,
6547 };
6548
6549 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6550
6551 #else /* !CONFIG_PM */
6552
6553 #define RTL8169_PM_OPS NULL
6554
6555 #endif /* !CONFIG_PM */
6556
6557 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6558 {
6559 void __iomem *ioaddr = tp->mmio_addr;
6560
6561 /* WoL fails with 8168b when the receiver is disabled. */
6562 switch (tp->mac_version) {
6563 case RTL_GIGA_MAC_VER_11:
6564 case RTL_GIGA_MAC_VER_12:
6565 case RTL_GIGA_MAC_VER_17:
6566 pci_clear_master(tp->pci_dev);
6567
6568 RTL_W8(ChipCmd, CmdRxEnb);
6569 /* PCI commit */
6570 RTL_R8(ChipCmd);
6571 break;
6572 default:
6573 break;
6574 }
6575 }
6576
6577 static void rtl_shutdown(struct pci_dev *pdev)
6578 {
6579 struct net_device *dev = pci_get_drvdata(pdev);
6580 struct rtl8169_private *tp = netdev_priv(dev);
6581 struct device *d = &pdev->dev;
6582
6583 pm_runtime_get_sync(d);
6584
6585 rtl8169_net_suspend(dev);
6586
6587 /* Restore original MAC address */
6588 rtl_rar_set(tp, dev->perm_addr);
6589
6590 rtl8169_hw_reset(tp);
6591
6592 if (system_state == SYSTEM_POWER_OFF) {
6593 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6594 rtl_wol_suspend_quirk(tp);
6595 rtl_wol_shutdown_quirk(tp);
6596 }
6597
6598 pci_wake_from_d3(pdev, true);
6599 pci_set_power_state(pdev, PCI_D3hot);
6600 }
6601
6602 pm_runtime_put_noidle(d);
6603 }
6604
6605 static void rtl_remove_one(struct pci_dev *pdev)
6606 {
6607 struct net_device *dev = pci_get_drvdata(pdev);
6608 struct rtl8169_private *tp = netdev_priv(dev);
6609
6610 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6611 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6612 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6613 rtl8168_driver_stop(tp);
6614 }
6615
6616 cancel_work_sync(&tp->wk.work);
6617
6618 netif_napi_del(&tp->napi);
6619
6620 unregister_netdev(dev);
6621
6622 rtl_release_firmware(tp);
6623
6624 if (pci_dev_run_wake(pdev))
6625 pm_runtime_get_noresume(&pdev->dev);
6626
6627 /* restore original MAC address */
6628 rtl_rar_set(tp, dev->perm_addr);
6629
6630 rtl_disable_msi(pdev, tp);
6631 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6632 pci_set_drvdata(pdev, NULL);
6633 }
6634
6635 static const struct net_device_ops rtl_netdev_ops = {
6636 .ndo_open = rtl_open,
6637 .ndo_stop = rtl8169_close,
6638 .ndo_get_stats64 = rtl8169_get_stats64,
6639 .ndo_start_xmit = rtl8169_start_xmit,
6640 .ndo_tx_timeout = rtl8169_tx_timeout,
6641 .ndo_validate_addr = eth_validate_addr,
6642 .ndo_change_mtu = rtl8169_change_mtu,
6643 .ndo_fix_features = rtl8169_fix_features,
6644 .ndo_set_features = rtl8169_set_features,
6645 .ndo_set_mac_address = rtl_set_mac_address,
6646 .ndo_do_ioctl = rtl8169_ioctl,
6647 .ndo_set_rx_mode = rtl_set_rx_mode,
6648 #ifdef CONFIG_NET_POLL_CONTROLLER
6649 .ndo_poll_controller = rtl8169_netpoll,
6650 #endif
6651
6652 };
6653
6654 static const struct rtl_cfg_info {
6655 void (*hw_start)(struct net_device *);
6656 unsigned int region;
6657 unsigned int align;
6658 u16 event_slow;
6659 unsigned features;
6660 u8 default_ver;
6661 } rtl_cfg_infos [] = {
6662 [RTL_CFG_0] = {
6663 .hw_start = rtl_hw_start_8169,
6664 .region = 1,
6665 .align = 0,
6666 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6667 .features = RTL_FEATURE_GMII,
6668 .default_ver = RTL_GIGA_MAC_VER_01,
6669 },
6670 [RTL_CFG_1] = {
6671 .hw_start = rtl_hw_start_8168,
6672 .region = 2,
6673 .align = 8,
6674 .event_slow = SYSErr | LinkChg | RxOverflow,
6675 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6676 .default_ver = RTL_GIGA_MAC_VER_11,
6677 },
6678 [RTL_CFG_2] = {
6679 .hw_start = rtl_hw_start_8101,
6680 .region = 2,
6681 .align = 8,
6682 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6683 PCSTimeout,
6684 .features = RTL_FEATURE_MSI,
6685 .default_ver = RTL_GIGA_MAC_VER_13,
6686 }
6687 };
6688
6689 /* Cfg9346_Unlock assumed. */
6690 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6691 const struct rtl_cfg_info *cfg)
6692 {
6693 void __iomem *ioaddr = tp->mmio_addr;
6694 unsigned msi = 0;
6695 u8 cfg2;
6696
6697 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6698 if (cfg->features & RTL_FEATURE_MSI) {
6699 if (pci_enable_msi(tp->pci_dev)) {
6700 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6701 } else {
6702 cfg2 |= MSIEnable;
6703 msi = RTL_FEATURE_MSI;
6704 }
6705 }
6706 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6707 RTL_W8(Config2, cfg2);
6708 return msi;
6709 }
6710
6711 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6712 {
6713 void __iomem *ioaddr = tp->mmio_addr;
6714
6715 return RTL_R8(MCU) & LINK_LIST_RDY;
6716 }
6717
6718 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6719 {
6720 void __iomem *ioaddr = tp->mmio_addr;
6721
6722 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6723 }
6724
6725 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6726 {
6727 void __iomem *ioaddr = tp->mmio_addr;
6728 u32 data;
6729
6730 tp->ocp_base = OCP_STD_PHY_BASE;
6731
6732 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6733
6734 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6735 return;
6736
6737 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6738 return;
6739
6740 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6741 msleep(1);
6742 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6743
6744 data = r8168_mac_ocp_read(tp, 0xe8de);
6745 data &= ~(1 << 14);
6746 r8168_mac_ocp_write(tp, 0xe8de, data);
6747
6748 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6749 return;
6750
6751 data = r8168_mac_ocp_read(tp, 0xe8de);
6752 data |= (1 << 15);
6753 r8168_mac_ocp_write(tp, 0xe8de, data);
6754
6755 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6756 return;
6757 }
6758
6759 static void rtl_hw_initialize(struct rtl8169_private *tp)
6760 {
6761 switch (tp->mac_version) {
6762 case RTL_GIGA_MAC_VER_40:
6763 case RTL_GIGA_MAC_VER_41:
6764 rtl_hw_init_8168g(tp);
6765 break;
6766
6767 default:
6768 break;
6769 }
6770 }
6771
6772 static int
6773 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6774 {
6775 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6776 const unsigned int region = cfg->region;
6777 struct rtl8169_private *tp;
6778 struct mii_if_info *mii;
6779 struct net_device *dev;
6780 void __iomem *ioaddr;
6781 int chipset, i;
6782 int rc;
6783
6784 if (netif_msg_drv(&debug)) {
6785 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6786 MODULENAME, RTL8169_VERSION);
6787 }
6788
6789 dev = alloc_etherdev(sizeof (*tp));
6790 if (!dev) {
6791 rc = -ENOMEM;
6792 goto out;
6793 }
6794
6795 SET_NETDEV_DEV(dev, &pdev->dev);
6796 dev->netdev_ops = &rtl_netdev_ops;
6797 tp = netdev_priv(dev);
6798 tp->dev = dev;
6799 tp->pci_dev = pdev;
6800 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6801
6802 mii = &tp->mii;
6803 mii->dev = dev;
6804 mii->mdio_read = rtl_mdio_read;
6805 mii->mdio_write = rtl_mdio_write;
6806 mii->phy_id_mask = 0x1f;
6807 mii->reg_num_mask = 0x1f;
6808 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6809
6810 /* disable ASPM completely as that cause random device stop working
6811 * problems as well as full system hangs for some PCIe devices users */
6812 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6813 PCIE_LINK_STATE_CLKPM);
6814
6815 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6816 rc = pci_enable_device(pdev);
6817 if (rc < 0) {
6818 netif_err(tp, probe, dev, "enable failure\n");
6819 goto err_out_free_dev_1;
6820 }
6821
6822 if (pci_set_mwi(pdev) < 0)
6823 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6824
6825 /* make sure PCI base addr 1 is MMIO */
6826 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6827 netif_err(tp, probe, dev,
6828 "region #%d not an MMIO resource, aborting\n",
6829 region);
6830 rc = -ENODEV;
6831 goto err_out_mwi_2;
6832 }
6833
6834 /* check for weird/broken PCI region reporting */
6835 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6836 netif_err(tp, probe, dev,
6837 "Invalid PCI region size(s), aborting\n");
6838 rc = -ENODEV;
6839 goto err_out_mwi_2;
6840 }
6841
6842 rc = pci_request_regions(pdev, MODULENAME);
6843 if (rc < 0) {
6844 netif_err(tp, probe, dev, "could not request regions\n");
6845 goto err_out_mwi_2;
6846 }
6847
6848 tp->cp_cmd = RxChkSum;
6849
6850 if ((sizeof(dma_addr_t) > 4) &&
6851 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6852 tp->cp_cmd |= PCIDAC;
6853 dev->features |= NETIF_F_HIGHDMA;
6854 } else {
6855 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6856 if (rc < 0) {
6857 netif_err(tp, probe, dev, "DMA configuration failed\n");
6858 goto err_out_free_res_3;
6859 }
6860 }
6861
6862 /* ioremap MMIO region */
6863 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6864 if (!ioaddr) {
6865 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6866 rc = -EIO;
6867 goto err_out_free_res_3;
6868 }
6869 tp->mmio_addr = ioaddr;
6870
6871 if (!pci_is_pcie(pdev))
6872 netif_info(tp, probe, dev, "not PCI Express\n");
6873
6874 /* Identify chip attached to board */
6875 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6876
6877 rtl_init_rxcfg(tp);
6878
6879 rtl_irq_disable(tp);
6880
6881 rtl_hw_initialize(tp);
6882
6883 rtl_hw_reset(tp);
6884
6885 rtl_ack_events(tp, 0xffff);
6886
6887 pci_set_master(pdev);
6888
6889 /*
6890 * Pretend we are using VLANs; This bypasses a nasty bug where
6891 * Interrupts stop flowing on high load on 8110SCd controllers.
6892 */
6893 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6894 tp->cp_cmd |= RxVlan;
6895
6896 rtl_init_mdio_ops(tp);
6897 rtl_init_pll_power_ops(tp);
6898 rtl_init_jumbo_ops(tp);
6899 rtl_init_csi_ops(tp);
6900
6901 rtl8169_print_mac_version(tp);
6902
6903 chipset = tp->mac_version;
6904 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6905
6906 RTL_W8(Cfg9346, Cfg9346_Unlock);
6907 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6908 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6909 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6910 tp->features |= RTL_FEATURE_WOL;
6911 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6912 tp->features |= RTL_FEATURE_WOL;
6913 tp->features |= rtl_try_msi(tp, cfg);
6914 RTL_W8(Cfg9346, Cfg9346_Lock);
6915
6916 if (rtl_tbi_enabled(tp)) {
6917 tp->set_speed = rtl8169_set_speed_tbi;
6918 tp->get_settings = rtl8169_gset_tbi;
6919 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6920 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6921 tp->link_ok = rtl8169_tbi_link_ok;
6922 tp->do_ioctl = rtl_tbi_ioctl;
6923 } else {
6924 tp->set_speed = rtl8169_set_speed_xmii;
6925 tp->get_settings = rtl8169_gset_xmii;
6926 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6927 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6928 tp->link_ok = rtl8169_xmii_link_ok;
6929 tp->do_ioctl = rtl_xmii_ioctl;
6930 }
6931
6932 mutex_init(&tp->wk.mutex);
6933
6934 /* Get MAC address */
6935 for (i = 0; i < ETH_ALEN; i++)
6936 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6937
6938 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6939 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6940
6941 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6942
6943 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6944 * properly for all devices */
6945 dev->features |= NETIF_F_RXCSUM |
6946 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6947
6948 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6949 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6950 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6951 NETIF_F_HIGHDMA;
6952
6953 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6954 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6955 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6956
6957 dev->hw_features |= NETIF_F_RXALL;
6958 dev->hw_features |= NETIF_F_RXFCS;
6959
6960 tp->hw_start = cfg->hw_start;
6961 tp->event_slow = cfg->event_slow;
6962
6963 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6964 ~(RxBOVF | RxFOVF) : ~0;
6965
6966 init_timer(&tp->timer);
6967 tp->timer.data = (unsigned long) dev;
6968 tp->timer.function = rtl8169_phy_timer;
6969
6970 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6971
6972 rc = register_netdev(dev);
6973 if (rc < 0)
6974 goto err_out_msi_4;
6975
6976 pci_set_drvdata(pdev, dev);
6977
6978 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6979 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6980 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6981 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6982 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6983 "tx checksumming: %s]\n",
6984 rtl_chip_infos[chipset].jumbo_max,
6985 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6986 }
6987
6988 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6989 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6990 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6991 rtl8168_driver_start(tp);
6992 }
6993
6994 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6995
6996 if (pci_dev_run_wake(pdev))
6997 pm_runtime_put_noidle(&pdev->dev);
6998
6999 netif_carrier_off(dev);
7000
7001 out:
7002 return rc;
7003
7004 err_out_msi_4:
7005 netif_napi_del(&tp->napi);
7006 rtl_disable_msi(pdev, tp);
7007 iounmap(ioaddr);
7008 err_out_free_res_3:
7009 pci_release_regions(pdev);
7010 err_out_mwi_2:
7011 pci_clear_mwi(pdev);
7012 pci_disable_device(pdev);
7013 err_out_free_dev_1:
7014 free_netdev(dev);
7015 goto out;
7016 }
7017
7018 static struct pci_driver rtl8169_pci_driver = {
7019 .name = MODULENAME,
7020 .id_table = rtl8169_pci_tbl,
7021 .probe = rtl_init_one,
7022 .remove = rtl_remove_one,
7023 .shutdown = rtl_shutdown,
7024 .driver.pm = RTL8169_PM_OPS,
7025 };
7026
7027 module_pci_driver(rtl8169_pci_driver);
This page took 0.359106 seconds and 5 git commands to generate.