Merge branch 'acpi-lpss'
[deliverable/linux.git] / drivers / net / ethernet / realtek / r8169.c
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
31
32 #include <asm/io.h>
33 #include <asm/irq.h>
34
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
38
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
51
52 #ifdef RTL8169_DEBUG
53 #define assert(expr) \
54 if (!(expr)) { \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
57 }
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
60 #else
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
64
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
67
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
70
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
74
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
78
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
82
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
89
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
92
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
100
101 enum mac_version {
102 RTL_GIGA_MAC_VER_01 = 0,
103 RTL_GIGA_MAC_VER_02,
104 RTL_GIGA_MAC_VER_03,
105 RTL_GIGA_MAC_VER_04,
106 RTL_GIGA_MAC_VER_05,
107 RTL_GIGA_MAC_VER_06,
108 RTL_GIGA_MAC_VER_07,
109 RTL_GIGA_MAC_VER_08,
110 RTL_GIGA_MAC_VER_09,
111 RTL_GIGA_MAC_VER_10,
112 RTL_GIGA_MAC_VER_11,
113 RTL_GIGA_MAC_VER_12,
114 RTL_GIGA_MAC_VER_13,
115 RTL_GIGA_MAC_VER_14,
116 RTL_GIGA_MAC_VER_15,
117 RTL_GIGA_MAC_VER_16,
118 RTL_GIGA_MAC_VER_17,
119 RTL_GIGA_MAC_VER_18,
120 RTL_GIGA_MAC_VER_19,
121 RTL_GIGA_MAC_VER_20,
122 RTL_GIGA_MAC_VER_21,
123 RTL_GIGA_MAC_VER_22,
124 RTL_GIGA_MAC_VER_23,
125 RTL_GIGA_MAC_VER_24,
126 RTL_GIGA_MAC_VER_25,
127 RTL_GIGA_MAC_VER_26,
128 RTL_GIGA_MAC_VER_27,
129 RTL_GIGA_MAC_VER_28,
130 RTL_GIGA_MAC_VER_29,
131 RTL_GIGA_MAC_VER_30,
132 RTL_GIGA_MAC_VER_31,
133 RTL_GIGA_MAC_VER_32,
134 RTL_GIGA_MAC_VER_33,
135 RTL_GIGA_MAC_VER_34,
136 RTL_GIGA_MAC_VER_35,
137 RTL_GIGA_MAC_VER_36,
138 RTL_GIGA_MAC_VER_37,
139 RTL_GIGA_MAC_VER_38,
140 RTL_GIGA_MAC_VER_39,
141 RTL_GIGA_MAC_VER_40,
142 RTL_GIGA_MAC_VER_41,
143 RTL_GIGA_MAC_NONE = 0xff,
144 };
145
146 enum rtl_tx_desc_version {
147 RTL_TD_0 = 0,
148 RTL_TD_1 = 1,
149 };
150
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
156
157 #define _R(NAME,TD,FW,SZ,B) { \
158 .name = NAME, \
159 .txd_version = TD, \
160 .fw_name = FW, \
161 .jumbo_max = SZ, \
162 .jumbo_tx_csum = B \
163 }
164
165 static const struct {
166 const char *name;
167 enum rtl_tx_desc_version txd_version;
168 const char *fw_name;
169 u16 jumbo_max;
170 bool jumbo_tx_csum;
171 } rtl_chip_infos[] = {
172 /* PCI devices. */
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
185 /* PCI-E devices. */
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
224 JUMBO_9K, false),
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
227 JUMBO_9K, false),
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
234 JUMBO_1K, true),
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
237 JUMBO_1K, true),
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
242 JUMBO_9K, false),
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
245 JUMBO_9K, false),
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
248 JUMBO_9K, false),
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
254 JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
257 JUMBO_1K, true),
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
260 JUMBO_9K, false),
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
263 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
269 };
270 #undef _R
271
272 enum cfg_version {
273 RTL_CFG_0 = 0x00,
274 RTL_CFG_1,
275 RTL_CFG_2
276 };
277
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
292 { 0x0001, 0x8168,
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
294 {0,},
295 };
296
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
298
299 static int rx_buf_sz = 16383;
300 static int use_dac;
301 static struct {
302 u32 msg_enable;
303 } debug = { -1 };
304
305 enum rtl_registers {
306 MAC0 = 0, /* Ethernet hardware address. */
307 MAC4 = 4,
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
315 FLASH = 0x30,
316 ERSR = 0x36,
317 ChipCmd = 0x37,
318 TxPoll = 0x38,
319 IntrMask = 0x3c,
320 IntrStatus = 0x3e,
321
322 TxConfig = 0x40,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
325
326 RxConfig = 0x44,
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
335
336 RxMissed = 0x4c,
337 Cfg9346 = 0x50,
338 Config0 = 0x51,
339 Config1 = 0x52,
340 Config2 = 0x53,
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
342
343 Config3 = 0x54,
344 Config4 = 0x55,
345 Config5 = 0x56,
346 MultiIntr = 0x5c,
347 PHYAR = 0x60,
348 PHYstatus = 0x6c,
349 RxMaxSize = 0xda,
350 CPlusCmd = 0xe0,
351 IntrMitigate = 0xe2,
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
355
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
357
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
359
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
362
363 FuncEvent = 0xf0,
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
367 };
368
369 enum rtl8110_registers {
370 TBICSR = 0x64,
371 TBI_ANAR = 0x68,
372 TBI_LPAR = 0x6a,
373 };
374
375 enum rtl8168_8101_registers {
376 CSIDR = 0x64,
377 CSIAR = 0x68,
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
386 PMCH = 0x6f,
387 EPHYAR = 0x80,
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
393 DLLPR = 0xd0,
394 #define PFM_EN (1 << 6)
395 DBG_REG = 0xd1,
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
398 TWSI = 0xd2,
399 MCU = 0xd3,
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
407 EFUSEAR = 0xdc,
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
414 };
415
416 enum rtl8168_registers {
417 LED_FREQ = 0x1a,
418 EEE_LED = 0x1b,
419 ERIDR = 0x70,
420 ERIAR = 0x74,
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
441 OCPAR = 0xb4,
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
445 GPHY_OCP = 0xb8,
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
453 };
454
455 enum rtl_register_content {
456 /* InterruptStatusBits */
457 SYSErr = 0x8000,
458 PCSTimeout = 0x4000,
459 SWInt = 0x0100,
460 TxDescUnavail = 0x0080,
461 RxFIFOOver = 0x0040,
462 LinkChg = 0x0020,
463 RxOverflow = 0x0010,
464 TxErr = 0x0008,
465 TxOK = 0x0004,
466 RxErr = 0x0002,
467 RxOK = 0x0001,
468
469 /* RxStatusDesc */
470 RxBOVF = (1 << 24),
471 RxFOVF = (1 << 23),
472 RxRWT = (1 << 22),
473 RxRES = (1 << 21),
474 RxRUNT = (1 << 20),
475 RxCRC = (1 << 19),
476
477 /* ChipCmdBits */
478 StopReq = 0x80,
479 CmdReset = 0x10,
480 CmdRxEnb = 0x08,
481 CmdTxEnb = 0x04,
482 RxBufEmpty = 0x01,
483
484 /* TXPoll register p.5 */
485 HPQ = 0x80, /* Poll cmd on the high prio queue */
486 NPQ = 0x40, /* Poll cmd on the low prio queue */
487 FSWInt = 0x01, /* Forced software interrupt */
488
489 /* Cfg9346Bits */
490 Cfg9346_Lock = 0x00,
491 Cfg9346_Unlock = 0xc0,
492
493 /* rx_mode_bits */
494 AcceptErr = 0x20,
495 AcceptRunt = 0x10,
496 AcceptBroadcast = 0x08,
497 AcceptMulticast = 0x04,
498 AcceptMyPhys = 0x02,
499 AcceptAllPhys = 0x01,
500 #define RX_CONFIG_ACCEPT_MASK 0x3f
501
502 /* TxConfigBits */
503 TxInterFrameGapShift = 24,
504 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
505
506 /* Config1 register p.24 */
507 LEDS1 = (1 << 7),
508 LEDS0 = (1 << 6),
509 Speed_down = (1 << 4),
510 MEMMAP = (1 << 3),
511 IOMAP = (1 << 2),
512 VPD = (1 << 1),
513 PMEnable = (1 << 0), /* Power Management Enable */
514
515 /* Config2 register p. 25 */
516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
517 PCI_Clock_66MHz = 0x01,
518 PCI_Clock_33MHz = 0x00,
519
520 /* Config3 register p.25 */
521 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
522 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
523 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
524 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
525
526 /* Config4 register */
527 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
528
529 /* Config5 register p.27 */
530 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
531 MWF = (1 << 5), /* Accept Multicast wakeup frame */
532 UWF = (1 << 4), /* Accept Unicast wakeup frame */
533 Spi_en = (1 << 3),
534 LanWake = (1 << 1), /* LanWake enable/disable */
535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
536
537 /* TBICSR p.28 */
538 TBIReset = 0x80000000,
539 TBILoopback = 0x40000000,
540 TBINwEnable = 0x20000000,
541 TBINwRestart = 0x10000000,
542 TBILinkOk = 0x02000000,
543 TBINwComplete = 0x01000000,
544
545 /* CPlusCmd p.31 */
546 EnableBist = (1 << 15), // 8168 8101
547 Mac_dbgo_oe = (1 << 14), // 8168 8101
548 Normal_mode = (1 << 13), // unused
549 Force_half_dup = (1 << 12), // 8168 8101
550 Force_rxflow_en = (1 << 11), // 8168 8101
551 Force_txflow_en = (1 << 10), // 8168 8101
552 Cxpl_dbg_sel = (1 << 9), // 8168 8101
553 ASF = (1 << 8), // 8168 8101
554 PktCntrDisable = (1 << 7), // 8168 8101
555 Mac_dbgo_sel = 0x001c, // 8168
556 RxVlan = (1 << 6),
557 RxChkSum = (1 << 5),
558 PCIDAC = (1 << 4),
559 PCIMulRW = (1 << 3),
560 INTT_0 = 0x0000, // 8168
561 INTT_1 = 0x0001, // 8168
562 INTT_2 = 0x0002, // 8168
563 INTT_3 = 0x0003, // 8168
564
565 /* rtl8169_PHYstatus */
566 TBI_Enable = 0x80,
567 TxFlowCtrl = 0x40,
568 RxFlowCtrl = 0x20,
569 _1000bpsF = 0x10,
570 _100bps = 0x08,
571 _10bps = 0x04,
572 LinkStatus = 0x02,
573 FullDup = 0x01,
574
575 /* _TBICSRBit */
576 TBILinkOK = 0x02000000,
577
578 /* DumpCounterCommand */
579 CounterDump = 0x8,
580 };
581
582 enum rtl_desc_bit {
583 /* First doubleword. */
584 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
585 RingEnd = (1 << 30), /* End of descriptor ring */
586 FirstFrag = (1 << 29), /* First segment of a packet */
587 LastFrag = (1 << 28), /* Final segment of a packet */
588 };
589
590 /* Generic case. */
591 enum rtl_tx_desc_bit {
592 /* First doubleword. */
593 TD_LSO = (1 << 27), /* Large Send Offload */
594 #define TD_MSS_MAX 0x07ffu /* MSS value */
595
596 /* Second doubleword. */
597 TxVlanTag = (1 << 17), /* Add VLAN tag */
598 };
599
600 /* 8169, 8168b and 810x except 8102e. */
601 enum rtl_tx_desc_bit_0 {
602 /* First doubleword. */
603 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
604 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
605 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
606 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
607 };
608
609 /* 8102e, 8168c and beyond. */
610 enum rtl_tx_desc_bit_1 {
611 /* Second doubleword. */
612 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
613 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
614 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
615 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
616 };
617
618 static const struct rtl_tx_desc_info {
619 struct {
620 u32 udp;
621 u32 tcp;
622 } checksum;
623 u16 mss_shift;
624 u16 opts_offset;
625 } tx_desc_info [] = {
626 [RTL_TD_0] = {
627 .checksum = {
628 .udp = TD0_IP_CS | TD0_UDP_CS,
629 .tcp = TD0_IP_CS | TD0_TCP_CS
630 },
631 .mss_shift = TD0_MSS_SHIFT,
632 .opts_offset = 0
633 },
634 [RTL_TD_1] = {
635 .checksum = {
636 .udp = TD1_IP_CS | TD1_UDP_CS,
637 .tcp = TD1_IP_CS | TD1_TCP_CS
638 },
639 .mss_shift = TD1_MSS_SHIFT,
640 .opts_offset = 1
641 }
642 };
643
644 enum rtl_rx_desc_bit {
645 /* Rx private */
646 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
647 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
648
649 #define RxProtoUDP (PID1)
650 #define RxProtoTCP (PID0)
651 #define RxProtoIP (PID1 | PID0)
652 #define RxProtoMask RxProtoIP
653
654 IPFail = (1 << 16), /* IP checksum failed */
655 UDPFail = (1 << 15), /* UDP/IP checksum failed */
656 TCPFail = (1 << 14), /* TCP/IP checksum failed */
657 RxVlanTag = (1 << 16), /* VLAN tag available */
658 };
659
660 #define RsvdMask 0x3fffc000
661
662 struct TxDesc {
663 __le32 opts1;
664 __le32 opts2;
665 __le64 addr;
666 };
667
668 struct RxDesc {
669 __le32 opts1;
670 __le32 opts2;
671 __le64 addr;
672 };
673
674 struct ring_info {
675 struct sk_buff *skb;
676 u32 len;
677 u8 __pad[sizeof(void *) - sizeof(u32)];
678 };
679
680 enum features {
681 RTL_FEATURE_WOL = (1 << 0),
682 RTL_FEATURE_MSI = (1 << 1),
683 RTL_FEATURE_GMII = (1 << 2),
684 };
685
686 struct rtl8169_counters {
687 __le64 tx_packets;
688 __le64 rx_packets;
689 __le64 tx_errors;
690 __le32 rx_errors;
691 __le16 rx_missed;
692 __le16 align_errors;
693 __le32 tx_one_collision;
694 __le32 tx_multi_collision;
695 __le64 rx_unicast;
696 __le64 rx_broadcast;
697 __le32 rx_multicast;
698 __le16 tx_aborted;
699 __le16 tx_underun;
700 };
701
702 enum rtl_flag {
703 RTL_FLAG_TASK_ENABLED,
704 RTL_FLAG_TASK_SLOW_PENDING,
705 RTL_FLAG_TASK_RESET_PENDING,
706 RTL_FLAG_TASK_PHY_PENDING,
707 RTL_FLAG_MAX
708 };
709
710 struct rtl8169_stats {
711 u64 packets;
712 u64 bytes;
713 struct u64_stats_sync syncp;
714 };
715
716 struct rtl8169_private {
717 void __iomem *mmio_addr; /* memory map physical address */
718 struct pci_dev *pci_dev;
719 struct net_device *dev;
720 struct napi_struct napi;
721 u32 msg_enable;
722 u16 txd_version;
723 u16 mac_version;
724 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
725 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
726 u32 dirty_tx;
727 struct rtl8169_stats rx_stats;
728 struct rtl8169_stats tx_stats;
729 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
730 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
731 dma_addr_t TxPhyAddr;
732 dma_addr_t RxPhyAddr;
733 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
734 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
735 struct timer_list timer;
736 u16 cp_cmd;
737
738 u16 event_slow;
739
740 struct mdio_ops {
741 void (*write)(struct rtl8169_private *, int, int);
742 int (*read)(struct rtl8169_private *, int);
743 } mdio_ops;
744
745 struct pll_power_ops {
746 void (*down)(struct rtl8169_private *);
747 void (*up)(struct rtl8169_private *);
748 } pll_power_ops;
749
750 struct jumbo_ops {
751 void (*enable)(struct rtl8169_private *);
752 void (*disable)(struct rtl8169_private *);
753 } jumbo_ops;
754
755 struct csi_ops {
756 void (*write)(struct rtl8169_private *, int, int);
757 u32 (*read)(struct rtl8169_private *, int);
758 } csi_ops;
759
760 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
761 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
762 void (*phy_reset_enable)(struct rtl8169_private *tp);
763 void (*hw_start)(struct net_device *);
764 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
765 unsigned int (*link_ok)(void __iomem *);
766 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
767
768 struct {
769 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
770 struct mutex mutex;
771 struct work_struct work;
772 } wk;
773
774 unsigned features;
775
776 struct mii_if_info mii;
777 struct rtl8169_counters counters;
778 u32 saved_wolopts;
779 u32 opts1_mask;
780
781 struct rtl_fw {
782 const struct firmware *fw;
783
784 #define RTL_VER_SIZE 32
785
786 char version[RTL_VER_SIZE];
787
788 struct rtl_fw_phy_action {
789 __le32 *code;
790 size_t size;
791 } phy_action;
792 } *rtl_fw;
793 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
794
795 u32 ocp_base;
796 };
797
798 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
799 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
800 module_param(use_dac, int, 0);
801 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
802 module_param_named(debug, debug.msg_enable, int, 0);
803 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
804 MODULE_LICENSE("GPL");
805 MODULE_VERSION(RTL8169_VERSION);
806 MODULE_FIRMWARE(FIRMWARE_8168D_1);
807 MODULE_FIRMWARE(FIRMWARE_8168D_2);
808 MODULE_FIRMWARE(FIRMWARE_8168E_1);
809 MODULE_FIRMWARE(FIRMWARE_8168E_2);
810 MODULE_FIRMWARE(FIRMWARE_8168E_3);
811 MODULE_FIRMWARE(FIRMWARE_8105E_1);
812 MODULE_FIRMWARE(FIRMWARE_8168F_1);
813 MODULE_FIRMWARE(FIRMWARE_8168F_2);
814 MODULE_FIRMWARE(FIRMWARE_8402_1);
815 MODULE_FIRMWARE(FIRMWARE_8411_1);
816 MODULE_FIRMWARE(FIRMWARE_8106E_1);
817 MODULE_FIRMWARE(FIRMWARE_8168G_1);
818
819 static void rtl_lock_work(struct rtl8169_private *tp)
820 {
821 mutex_lock(&tp->wk.mutex);
822 }
823
824 static void rtl_unlock_work(struct rtl8169_private *tp)
825 {
826 mutex_unlock(&tp->wk.mutex);
827 }
828
829 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
830 {
831 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
832 PCI_EXP_DEVCTL_READRQ, force);
833 }
834
835 struct rtl_cond {
836 bool (*check)(struct rtl8169_private *);
837 const char *msg;
838 };
839
840 static void rtl_udelay(unsigned int d)
841 {
842 udelay(d);
843 }
844
845 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
846 void (*delay)(unsigned int), unsigned int d, int n,
847 bool high)
848 {
849 int i;
850
851 for (i = 0; i < n; i++) {
852 delay(d);
853 if (c->check(tp) == high)
854 return true;
855 }
856 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
857 c->msg, !high, n, d);
858 return false;
859 }
860
861 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
862 const struct rtl_cond *c,
863 unsigned int d, int n)
864 {
865 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
866 }
867
868 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
869 const struct rtl_cond *c,
870 unsigned int d, int n)
871 {
872 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
873 }
874
875 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
876 const struct rtl_cond *c,
877 unsigned int d, int n)
878 {
879 return rtl_loop_wait(tp, c, msleep, d, n, true);
880 }
881
882 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
883 const struct rtl_cond *c,
884 unsigned int d, int n)
885 {
886 return rtl_loop_wait(tp, c, msleep, d, n, false);
887 }
888
889 #define DECLARE_RTL_COND(name) \
890 static bool name ## _check(struct rtl8169_private *); \
891 \
892 static const struct rtl_cond name = { \
893 .check = name ## _check, \
894 .msg = #name \
895 }; \
896 \
897 static bool name ## _check(struct rtl8169_private *tp)
898
899 DECLARE_RTL_COND(rtl_ocpar_cond)
900 {
901 void __iomem *ioaddr = tp->mmio_addr;
902
903 return RTL_R32(OCPAR) & OCPAR_FLAG;
904 }
905
906 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
907 {
908 void __iomem *ioaddr = tp->mmio_addr;
909
910 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
911
912 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
913 RTL_R32(OCPDR) : ~0;
914 }
915
916 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
917 {
918 void __iomem *ioaddr = tp->mmio_addr;
919
920 RTL_W32(OCPDR, data);
921 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
922
923 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
924 }
925
926 DECLARE_RTL_COND(rtl_eriar_cond)
927 {
928 void __iomem *ioaddr = tp->mmio_addr;
929
930 return RTL_R32(ERIAR) & ERIAR_FLAG;
931 }
932
933 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
934 {
935 void __iomem *ioaddr = tp->mmio_addr;
936
937 RTL_W8(ERIDR, cmd);
938 RTL_W32(ERIAR, 0x800010e8);
939 msleep(2);
940
941 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
942 return;
943
944 ocp_write(tp, 0x1, 0x30, 0x00000001);
945 }
946
947 #define OOB_CMD_RESET 0x00
948 #define OOB_CMD_DRIVER_START 0x05
949 #define OOB_CMD_DRIVER_STOP 0x06
950
951 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
952 {
953 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
954 }
955
956 DECLARE_RTL_COND(rtl_ocp_read_cond)
957 {
958 u16 reg;
959
960 reg = rtl8168_get_ocp_reg(tp);
961
962 return ocp_read(tp, 0x0f, reg) & 0x00000800;
963 }
964
965 static void rtl8168_driver_start(struct rtl8169_private *tp)
966 {
967 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
968
969 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
970 }
971
972 static void rtl8168_driver_stop(struct rtl8169_private *tp)
973 {
974 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
975
976 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
977 }
978
979 static int r8168dp_check_dash(struct rtl8169_private *tp)
980 {
981 u16 reg = rtl8168_get_ocp_reg(tp);
982
983 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
984 }
985
986 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
987 {
988 if (reg & 0xffff0001) {
989 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
990 return true;
991 }
992 return false;
993 }
994
995 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
996 {
997 void __iomem *ioaddr = tp->mmio_addr;
998
999 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1000 }
1001
1002 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1003 {
1004 void __iomem *ioaddr = tp->mmio_addr;
1005
1006 if (rtl_ocp_reg_failure(tp, reg))
1007 return;
1008
1009 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1010
1011 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1012 }
1013
1014 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1015 {
1016 void __iomem *ioaddr = tp->mmio_addr;
1017
1018 if (rtl_ocp_reg_failure(tp, reg))
1019 return 0;
1020
1021 RTL_W32(GPHY_OCP, reg << 15);
1022
1023 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1024 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1025 }
1026
1027 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1028 {
1029 int val;
1030
1031 val = r8168_phy_ocp_read(tp, reg);
1032 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1033 }
1034
1035 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1036 {
1037 void __iomem *ioaddr = tp->mmio_addr;
1038
1039 if (rtl_ocp_reg_failure(tp, reg))
1040 return;
1041
1042 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1043 }
1044
1045 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1046 {
1047 void __iomem *ioaddr = tp->mmio_addr;
1048
1049 if (rtl_ocp_reg_failure(tp, reg))
1050 return 0;
1051
1052 RTL_W32(OCPDR, reg << 15);
1053
1054 return RTL_R32(OCPDR);
1055 }
1056
1057 #define OCP_STD_PHY_BASE 0xa400
1058
1059 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1060 {
1061 if (reg == 0x1f) {
1062 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1063 return;
1064 }
1065
1066 if (tp->ocp_base != OCP_STD_PHY_BASE)
1067 reg -= 0x10;
1068
1069 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1070 }
1071
1072 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1073 {
1074 if (tp->ocp_base != OCP_STD_PHY_BASE)
1075 reg -= 0x10;
1076
1077 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1078 }
1079
1080 DECLARE_RTL_COND(rtl_phyar_cond)
1081 {
1082 void __iomem *ioaddr = tp->mmio_addr;
1083
1084 return RTL_R32(PHYAR) & 0x80000000;
1085 }
1086
1087 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1088 {
1089 void __iomem *ioaddr = tp->mmio_addr;
1090
1091 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1092
1093 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1094 /*
1095 * According to hardware specs a 20us delay is required after write
1096 * complete indication, but before sending next command.
1097 */
1098 udelay(20);
1099 }
1100
1101 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1102 {
1103 void __iomem *ioaddr = tp->mmio_addr;
1104 int value;
1105
1106 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1107
1108 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1109 RTL_R32(PHYAR) & 0xffff : ~0;
1110
1111 /*
1112 * According to hardware specs a 20us delay is required after read
1113 * complete indication, but before sending next command.
1114 */
1115 udelay(20);
1116
1117 return value;
1118 }
1119
1120 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1121 {
1122 void __iomem *ioaddr = tp->mmio_addr;
1123
1124 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1125 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1126 RTL_W32(EPHY_RXER_NUM, 0);
1127
1128 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1129 }
1130
1131 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1132 {
1133 r8168dp_1_mdio_access(tp, reg,
1134 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1135 }
1136
1137 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1138 {
1139 void __iomem *ioaddr = tp->mmio_addr;
1140
1141 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1142
1143 mdelay(1);
1144 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1145 RTL_W32(EPHY_RXER_NUM, 0);
1146
1147 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1148 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1149 }
1150
1151 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1152
1153 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1154 {
1155 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1156 }
1157
1158 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1159 {
1160 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1161 }
1162
1163 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1164 {
1165 void __iomem *ioaddr = tp->mmio_addr;
1166
1167 r8168dp_2_mdio_start(ioaddr);
1168
1169 r8169_mdio_write(tp, reg, value);
1170
1171 r8168dp_2_mdio_stop(ioaddr);
1172 }
1173
1174 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1175 {
1176 void __iomem *ioaddr = tp->mmio_addr;
1177 int value;
1178
1179 r8168dp_2_mdio_start(ioaddr);
1180
1181 value = r8169_mdio_read(tp, reg);
1182
1183 r8168dp_2_mdio_stop(ioaddr);
1184
1185 return value;
1186 }
1187
1188 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1189 {
1190 tp->mdio_ops.write(tp, location, val);
1191 }
1192
1193 static int rtl_readphy(struct rtl8169_private *tp, int location)
1194 {
1195 return tp->mdio_ops.read(tp, location);
1196 }
1197
1198 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1199 {
1200 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1201 }
1202
1203 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1204 {
1205 int val;
1206
1207 val = rtl_readphy(tp, reg_addr);
1208 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1209 }
1210
1211 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1212 int val)
1213 {
1214 struct rtl8169_private *tp = netdev_priv(dev);
1215
1216 rtl_writephy(tp, location, val);
1217 }
1218
1219 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1220 {
1221 struct rtl8169_private *tp = netdev_priv(dev);
1222
1223 return rtl_readphy(tp, location);
1224 }
1225
1226 DECLARE_RTL_COND(rtl_ephyar_cond)
1227 {
1228 void __iomem *ioaddr = tp->mmio_addr;
1229
1230 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1231 }
1232
1233 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1234 {
1235 void __iomem *ioaddr = tp->mmio_addr;
1236
1237 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1238 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1239
1240 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1241
1242 udelay(10);
1243 }
1244
1245 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1246 {
1247 void __iomem *ioaddr = tp->mmio_addr;
1248
1249 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1250
1251 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1252 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1253 }
1254
1255 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1256 u32 val, int type)
1257 {
1258 void __iomem *ioaddr = tp->mmio_addr;
1259
1260 BUG_ON((addr & 3) || (mask == 0));
1261 RTL_W32(ERIDR, val);
1262 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1263
1264 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1265 }
1266
1267 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1268 {
1269 void __iomem *ioaddr = tp->mmio_addr;
1270
1271 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1272
1273 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1274 RTL_R32(ERIDR) : ~0;
1275 }
1276
1277 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1278 u32 m, int type)
1279 {
1280 u32 val;
1281
1282 val = rtl_eri_read(tp, addr, type);
1283 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1284 }
1285
1286 struct exgmac_reg {
1287 u16 addr;
1288 u16 mask;
1289 u32 val;
1290 };
1291
1292 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1293 const struct exgmac_reg *r, int len)
1294 {
1295 while (len-- > 0) {
1296 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1297 r++;
1298 }
1299 }
1300
1301 DECLARE_RTL_COND(rtl_efusear_cond)
1302 {
1303 void __iomem *ioaddr = tp->mmio_addr;
1304
1305 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1306 }
1307
1308 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1309 {
1310 void __iomem *ioaddr = tp->mmio_addr;
1311
1312 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1313
1314 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1315 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1316 }
1317
1318 static u16 rtl_get_events(struct rtl8169_private *tp)
1319 {
1320 void __iomem *ioaddr = tp->mmio_addr;
1321
1322 return RTL_R16(IntrStatus);
1323 }
1324
1325 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1326 {
1327 void __iomem *ioaddr = tp->mmio_addr;
1328
1329 RTL_W16(IntrStatus, bits);
1330 mmiowb();
1331 }
1332
1333 static void rtl_irq_disable(struct rtl8169_private *tp)
1334 {
1335 void __iomem *ioaddr = tp->mmio_addr;
1336
1337 RTL_W16(IntrMask, 0);
1338 mmiowb();
1339 }
1340
1341 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1342 {
1343 void __iomem *ioaddr = tp->mmio_addr;
1344
1345 RTL_W16(IntrMask, bits);
1346 }
1347
1348 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1349 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1350 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1351
1352 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1353 {
1354 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1355 }
1356
1357 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1358 {
1359 void __iomem *ioaddr = tp->mmio_addr;
1360
1361 rtl_irq_disable(tp);
1362 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1363 RTL_R8(ChipCmd);
1364 }
1365
1366 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1367 {
1368 void __iomem *ioaddr = tp->mmio_addr;
1369
1370 return RTL_R32(TBICSR) & TBIReset;
1371 }
1372
1373 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1374 {
1375 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1376 }
1377
1378 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1379 {
1380 return RTL_R32(TBICSR) & TBILinkOk;
1381 }
1382
1383 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1384 {
1385 return RTL_R8(PHYstatus) & LinkStatus;
1386 }
1387
1388 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1389 {
1390 void __iomem *ioaddr = tp->mmio_addr;
1391
1392 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1393 }
1394
1395 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1396 {
1397 unsigned int val;
1398
1399 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1400 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1401 }
1402
1403 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1404 {
1405 void __iomem *ioaddr = tp->mmio_addr;
1406 struct net_device *dev = tp->dev;
1407
1408 if (!netif_running(dev))
1409 return;
1410
1411 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1412 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1413 if (RTL_R8(PHYstatus) & _1000bpsF) {
1414 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1415 ERIAR_EXGMAC);
1416 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1417 ERIAR_EXGMAC);
1418 } else if (RTL_R8(PHYstatus) & _100bps) {
1419 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1420 ERIAR_EXGMAC);
1421 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1422 ERIAR_EXGMAC);
1423 } else {
1424 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1425 ERIAR_EXGMAC);
1426 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1427 ERIAR_EXGMAC);
1428 }
1429 /* Reset packet filter */
1430 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1431 ERIAR_EXGMAC);
1432 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1433 ERIAR_EXGMAC);
1434 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1435 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1436 if (RTL_R8(PHYstatus) & _1000bpsF) {
1437 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1438 ERIAR_EXGMAC);
1439 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1440 ERIAR_EXGMAC);
1441 } else {
1442 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1443 ERIAR_EXGMAC);
1444 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1445 ERIAR_EXGMAC);
1446 }
1447 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1448 if (RTL_R8(PHYstatus) & _10bps) {
1449 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1450 ERIAR_EXGMAC);
1451 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1452 ERIAR_EXGMAC);
1453 } else {
1454 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1455 ERIAR_EXGMAC);
1456 }
1457 }
1458 }
1459
1460 static void __rtl8169_check_link_status(struct net_device *dev,
1461 struct rtl8169_private *tp,
1462 void __iomem *ioaddr, bool pm)
1463 {
1464 if (tp->link_ok(ioaddr)) {
1465 rtl_link_chg_patch(tp);
1466 /* This is to cancel a scheduled suspend if there's one. */
1467 if (pm)
1468 pm_request_resume(&tp->pci_dev->dev);
1469 netif_carrier_on(dev);
1470 if (net_ratelimit())
1471 netif_info(tp, ifup, dev, "link up\n");
1472 } else {
1473 netif_carrier_off(dev);
1474 netif_info(tp, ifdown, dev, "link down\n");
1475 if (pm)
1476 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1477 }
1478 }
1479
1480 static void rtl8169_check_link_status(struct net_device *dev,
1481 struct rtl8169_private *tp,
1482 void __iomem *ioaddr)
1483 {
1484 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1485 }
1486
1487 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1488
1489 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1490 {
1491 void __iomem *ioaddr = tp->mmio_addr;
1492 u8 options;
1493 u32 wolopts = 0;
1494
1495 options = RTL_R8(Config1);
1496 if (!(options & PMEnable))
1497 return 0;
1498
1499 options = RTL_R8(Config3);
1500 if (options & LinkUp)
1501 wolopts |= WAKE_PHY;
1502 if (options & MagicPacket)
1503 wolopts |= WAKE_MAGIC;
1504
1505 options = RTL_R8(Config5);
1506 if (options & UWF)
1507 wolopts |= WAKE_UCAST;
1508 if (options & BWF)
1509 wolopts |= WAKE_BCAST;
1510 if (options & MWF)
1511 wolopts |= WAKE_MCAST;
1512
1513 return wolopts;
1514 }
1515
1516 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1517 {
1518 struct rtl8169_private *tp = netdev_priv(dev);
1519
1520 rtl_lock_work(tp);
1521
1522 wol->supported = WAKE_ANY;
1523 wol->wolopts = __rtl8169_get_wol(tp);
1524
1525 rtl_unlock_work(tp);
1526 }
1527
1528 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1529 {
1530 void __iomem *ioaddr = tp->mmio_addr;
1531 unsigned int i;
1532 static const struct {
1533 u32 opt;
1534 u16 reg;
1535 u8 mask;
1536 } cfg[] = {
1537 { WAKE_PHY, Config3, LinkUp },
1538 { WAKE_MAGIC, Config3, MagicPacket },
1539 { WAKE_UCAST, Config5, UWF },
1540 { WAKE_BCAST, Config5, BWF },
1541 { WAKE_MCAST, Config5, MWF },
1542 { WAKE_ANY, Config5, LanWake }
1543 };
1544 u8 options;
1545
1546 RTL_W8(Cfg9346, Cfg9346_Unlock);
1547
1548 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1549 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1550 if (wolopts & cfg[i].opt)
1551 options |= cfg[i].mask;
1552 RTL_W8(cfg[i].reg, options);
1553 }
1554
1555 switch (tp->mac_version) {
1556 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1557 options = RTL_R8(Config1) & ~PMEnable;
1558 if (wolopts)
1559 options |= PMEnable;
1560 RTL_W8(Config1, options);
1561 break;
1562 default:
1563 options = RTL_R8(Config2) & ~PME_SIGNAL;
1564 if (wolopts)
1565 options |= PME_SIGNAL;
1566 RTL_W8(Config2, options);
1567 break;
1568 }
1569
1570 RTL_W8(Cfg9346, Cfg9346_Lock);
1571 }
1572
1573 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1574 {
1575 struct rtl8169_private *tp = netdev_priv(dev);
1576
1577 rtl_lock_work(tp);
1578
1579 if (wol->wolopts)
1580 tp->features |= RTL_FEATURE_WOL;
1581 else
1582 tp->features &= ~RTL_FEATURE_WOL;
1583 __rtl8169_set_wol(tp, wol->wolopts);
1584
1585 rtl_unlock_work(tp);
1586
1587 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1588
1589 return 0;
1590 }
1591
1592 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1593 {
1594 return rtl_chip_infos[tp->mac_version].fw_name;
1595 }
1596
1597 static void rtl8169_get_drvinfo(struct net_device *dev,
1598 struct ethtool_drvinfo *info)
1599 {
1600 struct rtl8169_private *tp = netdev_priv(dev);
1601 struct rtl_fw *rtl_fw = tp->rtl_fw;
1602
1603 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1604 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1605 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1606 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1607 if (!IS_ERR_OR_NULL(rtl_fw))
1608 strlcpy(info->fw_version, rtl_fw->version,
1609 sizeof(info->fw_version));
1610 }
1611
1612 static int rtl8169_get_regs_len(struct net_device *dev)
1613 {
1614 return R8169_REGS_SIZE;
1615 }
1616
1617 static int rtl8169_set_speed_tbi(struct net_device *dev,
1618 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1619 {
1620 struct rtl8169_private *tp = netdev_priv(dev);
1621 void __iomem *ioaddr = tp->mmio_addr;
1622 int ret = 0;
1623 u32 reg;
1624
1625 reg = RTL_R32(TBICSR);
1626 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1627 (duplex == DUPLEX_FULL)) {
1628 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1629 } else if (autoneg == AUTONEG_ENABLE)
1630 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1631 else {
1632 netif_warn(tp, link, dev,
1633 "incorrect speed setting refused in TBI mode\n");
1634 ret = -EOPNOTSUPP;
1635 }
1636
1637 return ret;
1638 }
1639
1640 static int rtl8169_set_speed_xmii(struct net_device *dev,
1641 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1642 {
1643 struct rtl8169_private *tp = netdev_priv(dev);
1644 int giga_ctrl, bmcr;
1645 int rc = -EINVAL;
1646
1647 rtl_writephy(tp, 0x1f, 0x0000);
1648
1649 if (autoneg == AUTONEG_ENABLE) {
1650 int auto_nego;
1651
1652 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1653 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1654 ADVERTISE_100HALF | ADVERTISE_100FULL);
1655
1656 if (adv & ADVERTISED_10baseT_Half)
1657 auto_nego |= ADVERTISE_10HALF;
1658 if (adv & ADVERTISED_10baseT_Full)
1659 auto_nego |= ADVERTISE_10FULL;
1660 if (adv & ADVERTISED_100baseT_Half)
1661 auto_nego |= ADVERTISE_100HALF;
1662 if (adv & ADVERTISED_100baseT_Full)
1663 auto_nego |= ADVERTISE_100FULL;
1664
1665 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1666
1667 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1668 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1669
1670 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1671 if (tp->mii.supports_gmii) {
1672 if (adv & ADVERTISED_1000baseT_Half)
1673 giga_ctrl |= ADVERTISE_1000HALF;
1674 if (adv & ADVERTISED_1000baseT_Full)
1675 giga_ctrl |= ADVERTISE_1000FULL;
1676 } else if (adv & (ADVERTISED_1000baseT_Half |
1677 ADVERTISED_1000baseT_Full)) {
1678 netif_info(tp, link, dev,
1679 "PHY does not support 1000Mbps\n");
1680 goto out;
1681 }
1682
1683 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1684
1685 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1686 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1687 } else {
1688 giga_ctrl = 0;
1689
1690 if (speed == SPEED_10)
1691 bmcr = 0;
1692 else if (speed == SPEED_100)
1693 bmcr = BMCR_SPEED100;
1694 else
1695 goto out;
1696
1697 if (duplex == DUPLEX_FULL)
1698 bmcr |= BMCR_FULLDPLX;
1699 }
1700
1701 rtl_writephy(tp, MII_BMCR, bmcr);
1702
1703 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1704 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1705 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1706 rtl_writephy(tp, 0x17, 0x2138);
1707 rtl_writephy(tp, 0x0e, 0x0260);
1708 } else {
1709 rtl_writephy(tp, 0x17, 0x2108);
1710 rtl_writephy(tp, 0x0e, 0x0000);
1711 }
1712 }
1713
1714 rc = 0;
1715 out:
1716 return rc;
1717 }
1718
1719 static int rtl8169_set_speed(struct net_device *dev,
1720 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1721 {
1722 struct rtl8169_private *tp = netdev_priv(dev);
1723 int ret;
1724
1725 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1726 if (ret < 0)
1727 goto out;
1728
1729 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1730 (advertising & ADVERTISED_1000baseT_Full)) {
1731 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1732 }
1733 out:
1734 return ret;
1735 }
1736
1737 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1738 {
1739 struct rtl8169_private *tp = netdev_priv(dev);
1740 int ret;
1741
1742 del_timer_sync(&tp->timer);
1743
1744 rtl_lock_work(tp);
1745 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1746 cmd->duplex, cmd->advertising);
1747 rtl_unlock_work(tp);
1748
1749 return ret;
1750 }
1751
1752 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1753 netdev_features_t features)
1754 {
1755 struct rtl8169_private *tp = netdev_priv(dev);
1756
1757 if (dev->mtu > TD_MSS_MAX)
1758 features &= ~NETIF_F_ALL_TSO;
1759
1760 if (dev->mtu > JUMBO_1K &&
1761 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1762 features &= ~NETIF_F_IP_CSUM;
1763
1764 return features;
1765 }
1766
1767 static void __rtl8169_set_features(struct net_device *dev,
1768 netdev_features_t features)
1769 {
1770 struct rtl8169_private *tp = netdev_priv(dev);
1771 netdev_features_t changed = features ^ dev->features;
1772 void __iomem *ioaddr = tp->mmio_addr;
1773
1774 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1775 return;
1776
1777 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1778 if (features & NETIF_F_RXCSUM)
1779 tp->cp_cmd |= RxChkSum;
1780 else
1781 tp->cp_cmd &= ~RxChkSum;
1782
1783 if (dev->features & NETIF_F_HW_VLAN_RX)
1784 tp->cp_cmd |= RxVlan;
1785 else
1786 tp->cp_cmd &= ~RxVlan;
1787
1788 RTL_W16(CPlusCmd, tp->cp_cmd);
1789 RTL_R16(CPlusCmd);
1790 }
1791 if (changed & NETIF_F_RXALL) {
1792 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1793 if (features & NETIF_F_RXALL)
1794 tmp |= (AcceptErr | AcceptRunt);
1795 RTL_W32(RxConfig, tmp);
1796 }
1797 }
1798
1799 static int rtl8169_set_features(struct net_device *dev,
1800 netdev_features_t features)
1801 {
1802 struct rtl8169_private *tp = netdev_priv(dev);
1803
1804 rtl_lock_work(tp);
1805 __rtl8169_set_features(dev, features);
1806 rtl_unlock_work(tp);
1807
1808 return 0;
1809 }
1810
1811
1812 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1813 {
1814 return (vlan_tx_tag_present(skb)) ?
1815 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1816 }
1817
1818 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1819 {
1820 u32 opts2 = le32_to_cpu(desc->opts2);
1821
1822 if (opts2 & RxVlanTag)
1823 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1824 }
1825
1826 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1827 {
1828 struct rtl8169_private *tp = netdev_priv(dev);
1829 void __iomem *ioaddr = tp->mmio_addr;
1830 u32 status;
1831
1832 cmd->supported =
1833 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1834 cmd->port = PORT_FIBRE;
1835 cmd->transceiver = XCVR_INTERNAL;
1836
1837 status = RTL_R32(TBICSR);
1838 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1839 cmd->autoneg = !!(status & TBINwEnable);
1840
1841 ethtool_cmd_speed_set(cmd, SPEED_1000);
1842 cmd->duplex = DUPLEX_FULL; /* Always set */
1843
1844 return 0;
1845 }
1846
1847 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1848 {
1849 struct rtl8169_private *tp = netdev_priv(dev);
1850
1851 return mii_ethtool_gset(&tp->mii, cmd);
1852 }
1853
1854 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1855 {
1856 struct rtl8169_private *tp = netdev_priv(dev);
1857 int rc;
1858
1859 rtl_lock_work(tp);
1860 rc = tp->get_settings(dev, cmd);
1861 rtl_unlock_work(tp);
1862
1863 return rc;
1864 }
1865
1866 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1867 void *p)
1868 {
1869 struct rtl8169_private *tp = netdev_priv(dev);
1870
1871 if (regs->len > R8169_REGS_SIZE)
1872 regs->len = R8169_REGS_SIZE;
1873
1874 rtl_lock_work(tp);
1875 memcpy_fromio(p, tp->mmio_addr, regs->len);
1876 rtl_unlock_work(tp);
1877 }
1878
1879 static u32 rtl8169_get_msglevel(struct net_device *dev)
1880 {
1881 struct rtl8169_private *tp = netdev_priv(dev);
1882
1883 return tp->msg_enable;
1884 }
1885
1886 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1887 {
1888 struct rtl8169_private *tp = netdev_priv(dev);
1889
1890 tp->msg_enable = value;
1891 }
1892
1893 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1894 "tx_packets",
1895 "rx_packets",
1896 "tx_errors",
1897 "rx_errors",
1898 "rx_missed",
1899 "align_errors",
1900 "tx_single_collisions",
1901 "tx_multi_collisions",
1902 "unicast",
1903 "broadcast",
1904 "multicast",
1905 "tx_aborted",
1906 "tx_underrun",
1907 };
1908
1909 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1910 {
1911 switch (sset) {
1912 case ETH_SS_STATS:
1913 return ARRAY_SIZE(rtl8169_gstrings);
1914 default:
1915 return -EOPNOTSUPP;
1916 }
1917 }
1918
1919 DECLARE_RTL_COND(rtl_counters_cond)
1920 {
1921 void __iomem *ioaddr = tp->mmio_addr;
1922
1923 return RTL_R32(CounterAddrLow) & CounterDump;
1924 }
1925
1926 static void rtl8169_update_counters(struct net_device *dev)
1927 {
1928 struct rtl8169_private *tp = netdev_priv(dev);
1929 void __iomem *ioaddr = tp->mmio_addr;
1930 struct device *d = &tp->pci_dev->dev;
1931 struct rtl8169_counters *counters;
1932 dma_addr_t paddr;
1933 u32 cmd;
1934
1935 /*
1936 * Some chips are unable to dump tally counters when the receiver
1937 * is disabled.
1938 */
1939 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1940 return;
1941
1942 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1943 if (!counters)
1944 return;
1945
1946 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1947 cmd = (u64)paddr & DMA_BIT_MASK(32);
1948 RTL_W32(CounterAddrLow, cmd);
1949 RTL_W32(CounterAddrLow, cmd | CounterDump);
1950
1951 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1952 memcpy(&tp->counters, counters, sizeof(*counters));
1953
1954 RTL_W32(CounterAddrLow, 0);
1955 RTL_W32(CounterAddrHigh, 0);
1956
1957 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1958 }
1959
1960 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1961 struct ethtool_stats *stats, u64 *data)
1962 {
1963 struct rtl8169_private *tp = netdev_priv(dev);
1964
1965 ASSERT_RTNL();
1966
1967 rtl8169_update_counters(dev);
1968
1969 data[0] = le64_to_cpu(tp->counters.tx_packets);
1970 data[1] = le64_to_cpu(tp->counters.rx_packets);
1971 data[2] = le64_to_cpu(tp->counters.tx_errors);
1972 data[3] = le32_to_cpu(tp->counters.rx_errors);
1973 data[4] = le16_to_cpu(tp->counters.rx_missed);
1974 data[5] = le16_to_cpu(tp->counters.align_errors);
1975 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1976 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1977 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1978 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1979 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1980 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1981 data[12] = le16_to_cpu(tp->counters.tx_underun);
1982 }
1983
1984 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1985 {
1986 switch(stringset) {
1987 case ETH_SS_STATS:
1988 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1989 break;
1990 }
1991 }
1992
1993 static const struct ethtool_ops rtl8169_ethtool_ops = {
1994 .get_drvinfo = rtl8169_get_drvinfo,
1995 .get_regs_len = rtl8169_get_regs_len,
1996 .get_link = ethtool_op_get_link,
1997 .get_settings = rtl8169_get_settings,
1998 .set_settings = rtl8169_set_settings,
1999 .get_msglevel = rtl8169_get_msglevel,
2000 .set_msglevel = rtl8169_set_msglevel,
2001 .get_regs = rtl8169_get_regs,
2002 .get_wol = rtl8169_get_wol,
2003 .set_wol = rtl8169_set_wol,
2004 .get_strings = rtl8169_get_strings,
2005 .get_sset_count = rtl8169_get_sset_count,
2006 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2007 .get_ts_info = ethtool_op_get_ts_info,
2008 };
2009
2010 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2011 struct net_device *dev, u8 default_version)
2012 {
2013 void __iomem *ioaddr = tp->mmio_addr;
2014 /*
2015 * The driver currently handles the 8168Bf and the 8168Be identically
2016 * but they can be identified more specifically through the test below
2017 * if needed:
2018 *
2019 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2020 *
2021 * Same thing for the 8101Eb and the 8101Ec:
2022 *
2023 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2024 */
2025 static const struct rtl_mac_info {
2026 u32 mask;
2027 u32 val;
2028 int mac_version;
2029 } mac_info[] = {
2030 /* 8168G family. */
2031 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2032 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2033
2034 /* 8168F family. */
2035 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2036 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2037 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2038
2039 /* 8168E family. */
2040 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2041 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2042 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2043 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2044
2045 /* 8168D family. */
2046 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2047 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2048 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2049
2050 /* 8168DP family. */
2051 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2052 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2053 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2054
2055 /* 8168C family. */
2056 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2057 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2058 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2059 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2060 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2061 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2062 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2063 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2064 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2065
2066 /* 8168B family. */
2067 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2068 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2069 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2070 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2071
2072 /* 8101 family. */
2073 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2074 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2075 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2076 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2077 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2078 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2079 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2080 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2081 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2082 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2083 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2084 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2085 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2086 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2087 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2088 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2089 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2090 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2091 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2092 /* FIXME: where did these entries come from ? -- FR */
2093 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2094 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2095
2096 /* 8110 family. */
2097 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2098 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2099 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2100 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2101 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2102 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2103
2104 /* Catch-all */
2105 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2106 };
2107 const struct rtl_mac_info *p = mac_info;
2108 u32 reg;
2109
2110 reg = RTL_R32(TxConfig);
2111 while ((reg & p->mask) != p->val)
2112 p++;
2113 tp->mac_version = p->mac_version;
2114
2115 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2116 netif_notice(tp, probe, dev,
2117 "unknown MAC, using family default\n");
2118 tp->mac_version = default_version;
2119 }
2120 }
2121
2122 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2123 {
2124 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2125 }
2126
2127 struct phy_reg {
2128 u16 reg;
2129 u16 val;
2130 };
2131
2132 static void rtl_writephy_batch(struct rtl8169_private *tp,
2133 const struct phy_reg *regs, int len)
2134 {
2135 while (len-- > 0) {
2136 rtl_writephy(tp, regs->reg, regs->val);
2137 regs++;
2138 }
2139 }
2140
2141 #define PHY_READ 0x00000000
2142 #define PHY_DATA_OR 0x10000000
2143 #define PHY_DATA_AND 0x20000000
2144 #define PHY_BJMPN 0x30000000
2145 #define PHY_READ_EFUSE 0x40000000
2146 #define PHY_READ_MAC_BYTE 0x50000000
2147 #define PHY_WRITE_MAC_BYTE 0x60000000
2148 #define PHY_CLEAR_READCOUNT 0x70000000
2149 #define PHY_WRITE 0x80000000
2150 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2151 #define PHY_COMP_EQ_SKIPN 0xa0000000
2152 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2153 #define PHY_WRITE_PREVIOUS 0xc0000000
2154 #define PHY_SKIPN 0xd0000000
2155 #define PHY_DELAY_MS 0xe0000000
2156 #define PHY_WRITE_ERI_WORD 0xf0000000
2157
2158 struct fw_info {
2159 u32 magic;
2160 char version[RTL_VER_SIZE];
2161 __le32 fw_start;
2162 __le32 fw_len;
2163 u8 chksum;
2164 } __packed;
2165
2166 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2167
2168 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2169 {
2170 const struct firmware *fw = rtl_fw->fw;
2171 struct fw_info *fw_info = (struct fw_info *)fw->data;
2172 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2173 char *version = rtl_fw->version;
2174 bool rc = false;
2175
2176 if (fw->size < FW_OPCODE_SIZE)
2177 goto out;
2178
2179 if (!fw_info->magic) {
2180 size_t i, size, start;
2181 u8 checksum = 0;
2182
2183 if (fw->size < sizeof(*fw_info))
2184 goto out;
2185
2186 for (i = 0; i < fw->size; i++)
2187 checksum += fw->data[i];
2188 if (checksum != 0)
2189 goto out;
2190
2191 start = le32_to_cpu(fw_info->fw_start);
2192 if (start > fw->size)
2193 goto out;
2194
2195 size = le32_to_cpu(fw_info->fw_len);
2196 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2197 goto out;
2198
2199 memcpy(version, fw_info->version, RTL_VER_SIZE);
2200
2201 pa->code = (__le32 *)(fw->data + start);
2202 pa->size = size;
2203 } else {
2204 if (fw->size % FW_OPCODE_SIZE)
2205 goto out;
2206
2207 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2208
2209 pa->code = (__le32 *)fw->data;
2210 pa->size = fw->size / FW_OPCODE_SIZE;
2211 }
2212 version[RTL_VER_SIZE - 1] = 0;
2213
2214 rc = true;
2215 out:
2216 return rc;
2217 }
2218
2219 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2220 struct rtl_fw_phy_action *pa)
2221 {
2222 bool rc = false;
2223 size_t index;
2224
2225 for (index = 0; index < pa->size; index++) {
2226 u32 action = le32_to_cpu(pa->code[index]);
2227 u32 regno = (action & 0x0fff0000) >> 16;
2228
2229 switch(action & 0xf0000000) {
2230 case PHY_READ:
2231 case PHY_DATA_OR:
2232 case PHY_DATA_AND:
2233 case PHY_READ_EFUSE:
2234 case PHY_CLEAR_READCOUNT:
2235 case PHY_WRITE:
2236 case PHY_WRITE_PREVIOUS:
2237 case PHY_DELAY_MS:
2238 break;
2239
2240 case PHY_BJMPN:
2241 if (regno > index) {
2242 netif_err(tp, ifup, tp->dev,
2243 "Out of range of firmware\n");
2244 goto out;
2245 }
2246 break;
2247 case PHY_READCOUNT_EQ_SKIP:
2248 if (index + 2 >= pa->size) {
2249 netif_err(tp, ifup, tp->dev,
2250 "Out of range of firmware\n");
2251 goto out;
2252 }
2253 break;
2254 case PHY_COMP_EQ_SKIPN:
2255 case PHY_COMP_NEQ_SKIPN:
2256 case PHY_SKIPN:
2257 if (index + 1 + regno >= pa->size) {
2258 netif_err(tp, ifup, tp->dev,
2259 "Out of range of firmware\n");
2260 goto out;
2261 }
2262 break;
2263
2264 case PHY_READ_MAC_BYTE:
2265 case PHY_WRITE_MAC_BYTE:
2266 case PHY_WRITE_ERI_WORD:
2267 default:
2268 netif_err(tp, ifup, tp->dev,
2269 "Invalid action 0x%08x\n", action);
2270 goto out;
2271 }
2272 }
2273 rc = true;
2274 out:
2275 return rc;
2276 }
2277
2278 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2279 {
2280 struct net_device *dev = tp->dev;
2281 int rc = -EINVAL;
2282
2283 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2284 netif_err(tp, ifup, dev, "invalid firwmare\n");
2285 goto out;
2286 }
2287
2288 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2289 rc = 0;
2290 out:
2291 return rc;
2292 }
2293
2294 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2295 {
2296 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2297 u32 predata, count;
2298 size_t index;
2299
2300 predata = count = 0;
2301
2302 for (index = 0; index < pa->size; ) {
2303 u32 action = le32_to_cpu(pa->code[index]);
2304 u32 data = action & 0x0000ffff;
2305 u32 regno = (action & 0x0fff0000) >> 16;
2306
2307 if (!action)
2308 break;
2309
2310 switch(action & 0xf0000000) {
2311 case PHY_READ:
2312 predata = rtl_readphy(tp, regno);
2313 count++;
2314 index++;
2315 break;
2316 case PHY_DATA_OR:
2317 predata |= data;
2318 index++;
2319 break;
2320 case PHY_DATA_AND:
2321 predata &= data;
2322 index++;
2323 break;
2324 case PHY_BJMPN:
2325 index -= regno;
2326 break;
2327 case PHY_READ_EFUSE:
2328 predata = rtl8168d_efuse_read(tp, regno);
2329 index++;
2330 break;
2331 case PHY_CLEAR_READCOUNT:
2332 count = 0;
2333 index++;
2334 break;
2335 case PHY_WRITE:
2336 rtl_writephy(tp, regno, data);
2337 index++;
2338 break;
2339 case PHY_READCOUNT_EQ_SKIP:
2340 index += (count == data) ? 2 : 1;
2341 break;
2342 case PHY_COMP_EQ_SKIPN:
2343 if (predata == data)
2344 index += regno;
2345 index++;
2346 break;
2347 case PHY_COMP_NEQ_SKIPN:
2348 if (predata != data)
2349 index += regno;
2350 index++;
2351 break;
2352 case PHY_WRITE_PREVIOUS:
2353 rtl_writephy(tp, regno, predata);
2354 index++;
2355 break;
2356 case PHY_SKIPN:
2357 index += regno + 1;
2358 break;
2359 case PHY_DELAY_MS:
2360 mdelay(data);
2361 index++;
2362 break;
2363
2364 case PHY_READ_MAC_BYTE:
2365 case PHY_WRITE_MAC_BYTE:
2366 case PHY_WRITE_ERI_WORD:
2367 default:
2368 BUG();
2369 }
2370 }
2371 }
2372
2373 static void rtl_release_firmware(struct rtl8169_private *tp)
2374 {
2375 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2376 release_firmware(tp->rtl_fw->fw);
2377 kfree(tp->rtl_fw);
2378 }
2379 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2380 }
2381
2382 static void rtl_apply_firmware(struct rtl8169_private *tp)
2383 {
2384 struct rtl_fw *rtl_fw = tp->rtl_fw;
2385
2386 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2387 if (!IS_ERR_OR_NULL(rtl_fw))
2388 rtl_phy_write_fw(tp, rtl_fw);
2389 }
2390
2391 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2392 {
2393 if (rtl_readphy(tp, reg) != val)
2394 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2395 else
2396 rtl_apply_firmware(tp);
2397 }
2398
2399 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2400 {
2401 static const struct phy_reg phy_reg_init[] = {
2402 { 0x1f, 0x0001 },
2403 { 0x06, 0x006e },
2404 { 0x08, 0x0708 },
2405 { 0x15, 0x4000 },
2406 { 0x18, 0x65c7 },
2407
2408 { 0x1f, 0x0001 },
2409 { 0x03, 0x00a1 },
2410 { 0x02, 0x0008 },
2411 { 0x01, 0x0120 },
2412 { 0x00, 0x1000 },
2413 { 0x04, 0x0800 },
2414 { 0x04, 0x0000 },
2415
2416 { 0x03, 0xff41 },
2417 { 0x02, 0xdf60 },
2418 { 0x01, 0x0140 },
2419 { 0x00, 0x0077 },
2420 { 0x04, 0x7800 },
2421 { 0x04, 0x7000 },
2422
2423 { 0x03, 0x802f },
2424 { 0x02, 0x4f02 },
2425 { 0x01, 0x0409 },
2426 { 0x00, 0xf0f9 },
2427 { 0x04, 0x9800 },
2428 { 0x04, 0x9000 },
2429
2430 { 0x03, 0xdf01 },
2431 { 0x02, 0xdf20 },
2432 { 0x01, 0xff95 },
2433 { 0x00, 0xba00 },
2434 { 0x04, 0xa800 },
2435 { 0x04, 0xa000 },
2436
2437 { 0x03, 0xff41 },
2438 { 0x02, 0xdf20 },
2439 { 0x01, 0x0140 },
2440 { 0x00, 0x00bb },
2441 { 0x04, 0xb800 },
2442 { 0x04, 0xb000 },
2443
2444 { 0x03, 0xdf41 },
2445 { 0x02, 0xdc60 },
2446 { 0x01, 0x6340 },
2447 { 0x00, 0x007d },
2448 { 0x04, 0xd800 },
2449 { 0x04, 0xd000 },
2450
2451 { 0x03, 0xdf01 },
2452 { 0x02, 0xdf20 },
2453 { 0x01, 0x100a },
2454 { 0x00, 0xa0ff },
2455 { 0x04, 0xf800 },
2456 { 0x04, 0xf000 },
2457
2458 { 0x1f, 0x0000 },
2459 { 0x0b, 0x0000 },
2460 { 0x00, 0x9200 }
2461 };
2462
2463 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2464 }
2465
2466 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2467 {
2468 static const struct phy_reg phy_reg_init[] = {
2469 { 0x1f, 0x0002 },
2470 { 0x01, 0x90d0 },
2471 { 0x1f, 0x0000 }
2472 };
2473
2474 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2475 }
2476
2477 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2478 {
2479 struct pci_dev *pdev = tp->pci_dev;
2480
2481 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2482 (pdev->subsystem_device != 0xe000))
2483 return;
2484
2485 rtl_writephy(tp, 0x1f, 0x0001);
2486 rtl_writephy(tp, 0x10, 0xf01b);
2487 rtl_writephy(tp, 0x1f, 0x0000);
2488 }
2489
2490 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2491 {
2492 static const struct phy_reg phy_reg_init[] = {
2493 { 0x1f, 0x0001 },
2494 { 0x04, 0x0000 },
2495 { 0x03, 0x00a1 },
2496 { 0x02, 0x0008 },
2497 { 0x01, 0x0120 },
2498 { 0x00, 0x1000 },
2499 { 0x04, 0x0800 },
2500 { 0x04, 0x9000 },
2501 { 0x03, 0x802f },
2502 { 0x02, 0x4f02 },
2503 { 0x01, 0x0409 },
2504 { 0x00, 0xf099 },
2505 { 0x04, 0x9800 },
2506 { 0x04, 0xa000 },
2507 { 0x03, 0xdf01 },
2508 { 0x02, 0xdf20 },
2509 { 0x01, 0xff95 },
2510 { 0x00, 0xba00 },
2511 { 0x04, 0xa800 },
2512 { 0x04, 0xf000 },
2513 { 0x03, 0xdf01 },
2514 { 0x02, 0xdf20 },
2515 { 0x01, 0x101a },
2516 { 0x00, 0xa0ff },
2517 { 0x04, 0xf800 },
2518 { 0x04, 0x0000 },
2519 { 0x1f, 0x0000 },
2520
2521 { 0x1f, 0x0001 },
2522 { 0x10, 0xf41b },
2523 { 0x14, 0xfb54 },
2524 { 0x18, 0xf5c7 },
2525 { 0x1f, 0x0000 },
2526
2527 { 0x1f, 0x0001 },
2528 { 0x17, 0x0cc0 },
2529 { 0x1f, 0x0000 }
2530 };
2531
2532 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2533
2534 rtl8169scd_hw_phy_config_quirk(tp);
2535 }
2536
2537 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2538 {
2539 static const struct phy_reg phy_reg_init[] = {
2540 { 0x1f, 0x0001 },
2541 { 0x04, 0x0000 },
2542 { 0x03, 0x00a1 },
2543 { 0x02, 0x0008 },
2544 { 0x01, 0x0120 },
2545 { 0x00, 0x1000 },
2546 { 0x04, 0x0800 },
2547 { 0x04, 0x9000 },
2548 { 0x03, 0x802f },
2549 { 0x02, 0x4f02 },
2550 { 0x01, 0x0409 },
2551 { 0x00, 0xf099 },
2552 { 0x04, 0x9800 },
2553 { 0x04, 0xa000 },
2554 { 0x03, 0xdf01 },
2555 { 0x02, 0xdf20 },
2556 { 0x01, 0xff95 },
2557 { 0x00, 0xba00 },
2558 { 0x04, 0xa800 },
2559 { 0x04, 0xf000 },
2560 { 0x03, 0xdf01 },
2561 { 0x02, 0xdf20 },
2562 { 0x01, 0x101a },
2563 { 0x00, 0xa0ff },
2564 { 0x04, 0xf800 },
2565 { 0x04, 0x0000 },
2566 { 0x1f, 0x0000 },
2567
2568 { 0x1f, 0x0001 },
2569 { 0x0b, 0x8480 },
2570 { 0x1f, 0x0000 },
2571
2572 { 0x1f, 0x0001 },
2573 { 0x18, 0x67c7 },
2574 { 0x04, 0x2000 },
2575 { 0x03, 0x002f },
2576 { 0x02, 0x4360 },
2577 { 0x01, 0x0109 },
2578 { 0x00, 0x3022 },
2579 { 0x04, 0x2800 },
2580 { 0x1f, 0x0000 },
2581
2582 { 0x1f, 0x0001 },
2583 { 0x17, 0x0cc0 },
2584 { 0x1f, 0x0000 }
2585 };
2586
2587 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2588 }
2589
2590 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2591 {
2592 static const struct phy_reg phy_reg_init[] = {
2593 { 0x10, 0xf41b },
2594 { 0x1f, 0x0000 }
2595 };
2596
2597 rtl_writephy(tp, 0x1f, 0x0001);
2598 rtl_patchphy(tp, 0x16, 1 << 0);
2599
2600 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2601 }
2602
2603 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2604 {
2605 static const struct phy_reg phy_reg_init[] = {
2606 { 0x1f, 0x0001 },
2607 { 0x10, 0xf41b },
2608 { 0x1f, 0x0000 }
2609 };
2610
2611 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2612 }
2613
2614 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2615 {
2616 static const struct phy_reg phy_reg_init[] = {
2617 { 0x1f, 0x0000 },
2618 { 0x1d, 0x0f00 },
2619 { 0x1f, 0x0002 },
2620 { 0x0c, 0x1ec8 },
2621 { 0x1f, 0x0000 }
2622 };
2623
2624 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2625 }
2626
2627 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2628 {
2629 static const struct phy_reg phy_reg_init[] = {
2630 { 0x1f, 0x0001 },
2631 { 0x1d, 0x3d98 },
2632 { 0x1f, 0x0000 }
2633 };
2634
2635 rtl_writephy(tp, 0x1f, 0x0000);
2636 rtl_patchphy(tp, 0x14, 1 << 5);
2637 rtl_patchphy(tp, 0x0d, 1 << 5);
2638
2639 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2640 }
2641
2642 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2643 {
2644 static const struct phy_reg phy_reg_init[] = {
2645 { 0x1f, 0x0001 },
2646 { 0x12, 0x2300 },
2647 { 0x1f, 0x0002 },
2648 { 0x00, 0x88d4 },
2649 { 0x01, 0x82b1 },
2650 { 0x03, 0x7002 },
2651 { 0x08, 0x9e30 },
2652 { 0x09, 0x01f0 },
2653 { 0x0a, 0x5500 },
2654 { 0x0c, 0x00c8 },
2655 { 0x1f, 0x0003 },
2656 { 0x12, 0xc096 },
2657 { 0x16, 0x000a },
2658 { 0x1f, 0x0000 },
2659 { 0x1f, 0x0000 },
2660 { 0x09, 0x2000 },
2661 { 0x09, 0x0000 }
2662 };
2663
2664 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2665
2666 rtl_patchphy(tp, 0x14, 1 << 5);
2667 rtl_patchphy(tp, 0x0d, 1 << 5);
2668 rtl_writephy(tp, 0x1f, 0x0000);
2669 }
2670
2671 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2672 {
2673 static const struct phy_reg phy_reg_init[] = {
2674 { 0x1f, 0x0001 },
2675 { 0x12, 0x2300 },
2676 { 0x03, 0x802f },
2677 { 0x02, 0x4f02 },
2678 { 0x01, 0x0409 },
2679 { 0x00, 0xf099 },
2680 { 0x04, 0x9800 },
2681 { 0x04, 0x9000 },
2682 { 0x1d, 0x3d98 },
2683 { 0x1f, 0x0002 },
2684 { 0x0c, 0x7eb8 },
2685 { 0x06, 0x0761 },
2686 { 0x1f, 0x0003 },
2687 { 0x16, 0x0f0a },
2688 { 0x1f, 0x0000 }
2689 };
2690
2691 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2692
2693 rtl_patchphy(tp, 0x16, 1 << 0);
2694 rtl_patchphy(tp, 0x14, 1 << 5);
2695 rtl_patchphy(tp, 0x0d, 1 << 5);
2696 rtl_writephy(tp, 0x1f, 0x0000);
2697 }
2698
2699 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2700 {
2701 static const struct phy_reg phy_reg_init[] = {
2702 { 0x1f, 0x0001 },
2703 { 0x12, 0x2300 },
2704 { 0x1d, 0x3d98 },
2705 { 0x1f, 0x0002 },
2706 { 0x0c, 0x7eb8 },
2707 { 0x06, 0x5461 },
2708 { 0x1f, 0x0003 },
2709 { 0x16, 0x0f0a },
2710 { 0x1f, 0x0000 }
2711 };
2712
2713 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2714
2715 rtl_patchphy(tp, 0x16, 1 << 0);
2716 rtl_patchphy(tp, 0x14, 1 << 5);
2717 rtl_patchphy(tp, 0x0d, 1 << 5);
2718 rtl_writephy(tp, 0x1f, 0x0000);
2719 }
2720
2721 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2722 {
2723 rtl8168c_3_hw_phy_config(tp);
2724 }
2725
2726 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2727 {
2728 static const struct phy_reg phy_reg_init_0[] = {
2729 /* Channel Estimation */
2730 { 0x1f, 0x0001 },
2731 { 0x06, 0x4064 },
2732 { 0x07, 0x2863 },
2733 { 0x08, 0x059c },
2734 { 0x09, 0x26b4 },
2735 { 0x0a, 0x6a19 },
2736 { 0x0b, 0xdcc8 },
2737 { 0x10, 0xf06d },
2738 { 0x14, 0x7f68 },
2739 { 0x18, 0x7fd9 },
2740 { 0x1c, 0xf0ff },
2741 { 0x1d, 0x3d9c },
2742 { 0x1f, 0x0003 },
2743 { 0x12, 0xf49f },
2744 { 0x13, 0x070b },
2745 { 0x1a, 0x05ad },
2746 { 0x14, 0x94c0 },
2747
2748 /*
2749 * Tx Error Issue
2750 * Enhance line driver power
2751 */
2752 { 0x1f, 0x0002 },
2753 { 0x06, 0x5561 },
2754 { 0x1f, 0x0005 },
2755 { 0x05, 0x8332 },
2756 { 0x06, 0x5561 },
2757
2758 /*
2759 * Can not link to 1Gbps with bad cable
2760 * Decrease SNR threshold form 21.07dB to 19.04dB
2761 */
2762 { 0x1f, 0x0001 },
2763 { 0x17, 0x0cc0 },
2764
2765 { 0x1f, 0x0000 },
2766 { 0x0d, 0xf880 }
2767 };
2768
2769 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2770
2771 /*
2772 * Rx Error Issue
2773 * Fine Tune Switching regulator parameter
2774 */
2775 rtl_writephy(tp, 0x1f, 0x0002);
2776 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2777 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2778
2779 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2780 static const struct phy_reg phy_reg_init[] = {
2781 { 0x1f, 0x0002 },
2782 { 0x05, 0x669a },
2783 { 0x1f, 0x0005 },
2784 { 0x05, 0x8330 },
2785 { 0x06, 0x669a },
2786 { 0x1f, 0x0002 }
2787 };
2788 int val;
2789
2790 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2791
2792 val = rtl_readphy(tp, 0x0d);
2793
2794 if ((val & 0x00ff) != 0x006c) {
2795 static const u32 set[] = {
2796 0x0065, 0x0066, 0x0067, 0x0068,
2797 0x0069, 0x006a, 0x006b, 0x006c
2798 };
2799 int i;
2800
2801 rtl_writephy(tp, 0x1f, 0x0002);
2802
2803 val &= 0xff00;
2804 for (i = 0; i < ARRAY_SIZE(set); i++)
2805 rtl_writephy(tp, 0x0d, val | set[i]);
2806 }
2807 } else {
2808 static const struct phy_reg phy_reg_init[] = {
2809 { 0x1f, 0x0002 },
2810 { 0x05, 0x6662 },
2811 { 0x1f, 0x0005 },
2812 { 0x05, 0x8330 },
2813 { 0x06, 0x6662 }
2814 };
2815
2816 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2817 }
2818
2819 /* RSET couple improve */
2820 rtl_writephy(tp, 0x1f, 0x0002);
2821 rtl_patchphy(tp, 0x0d, 0x0300);
2822 rtl_patchphy(tp, 0x0f, 0x0010);
2823
2824 /* Fine tune PLL performance */
2825 rtl_writephy(tp, 0x1f, 0x0002);
2826 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2827 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2828
2829 rtl_writephy(tp, 0x1f, 0x0005);
2830 rtl_writephy(tp, 0x05, 0x001b);
2831
2832 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2833
2834 rtl_writephy(tp, 0x1f, 0x0000);
2835 }
2836
2837 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2838 {
2839 static const struct phy_reg phy_reg_init_0[] = {
2840 /* Channel Estimation */
2841 { 0x1f, 0x0001 },
2842 { 0x06, 0x4064 },
2843 { 0x07, 0x2863 },
2844 { 0x08, 0x059c },
2845 { 0x09, 0x26b4 },
2846 { 0x0a, 0x6a19 },
2847 { 0x0b, 0xdcc8 },
2848 { 0x10, 0xf06d },
2849 { 0x14, 0x7f68 },
2850 { 0x18, 0x7fd9 },
2851 { 0x1c, 0xf0ff },
2852 { 0x1d, 0x3d9c },
2853 { 0x1f, 0x0003 },
2854 { 0x12, 0xf49f },
2855 { 0x13, 0x070b },
2856 { 0x1a, 0x05ad },
2857 { 0x14, 0x94c0 },
2858
2859 /*
2860 * Tx Error Issue
2861 * Enhance line driver power
2862 */
2863 { 0x1f, 0x0002 },
2864 { 0x06, 0x5561 },
2865 { 0x1f, 0x0005 },
2866 { 0x05, 0x8332 },
2867 { 0x06, 0x5561 },
2868
2869 /*
2870 * Can not link to 1Gbps with bad cable
2871 * Decrease SNR threshold form 21.07dB to 19.04dB
2872 */
2873 { 0x1f, 0x0001 },
2874 { 0x17, 0x0cc0 },
2875
2876 { 0x1f, 0x0000 },
2877 { 0x0d, 0xf880 }
2878 };
2879
2880 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2881
2882 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2883 static const struct phy_reg phy_reg_init[] = {
2884 { 0x1f, 0x0002 },
2885 { 0x05, 0x669a },
2886 { 0x1f, 0x0005 },
2887 { 0x05, 0x8330 },
2888 { 0x06, 0x669a },
2889
2890 { 0x1f, 0x0002 }
2891 };
2892 int val;
2893
2894 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2895
2896 val = rtl_readphy(tp, 0x0d);
2897 if ((val & 0x00ff) != 0x006c) {
2898 static const u32 set[] = {
2899 0x0065, 0x0066, 0x0067, 0x0068,
2900 0x0069, 0x006a, 0x006b, 0x006c
2901 };
2902 int i;
2903
2904 rtl_writephy(tp, 0x1f, 0x0002);
2905
2906 val &= 0xff00;
2907 for (i = 0; i < ARRAY_SIZE(set); i++)
2908 rtl_writephy(tp, 0x0d, val | set[i]);
2909 }
2910 } else {
2911 static const struct phy_reg phy_reg_init[] = {
2912 { 0x1f, 0x0002 },
2913 { 0x05, 0x2642 },
2914 { 0x1f, 0x0005 },
2915 { 0x05, 0x8330 },
2916 { 0x06, 0x2642 }
2917 };
2918
2919 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2920 }
2921
2922 /* Fine tune PLL performance */
2923 rtl_writephy(tp, 0x1f, 0x0002);
2924 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2925 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2926
2927 /* Switching regulator Slew rate */
2928 rtl_writephy(tp, 0x1f, 0x0002);
2929 rtl_patchphy(tp, 0x0f, 0x0017);
2930
2931 rtl_writephy(tp, 0x1f, 0x0005);
2932 rtl_writephy(tp, 0x05, 0x001b);
2933
2934 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2935
2936 rtl_writephy(tp, 0x1f, 0x0000);
2937 }
2938
2939 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2940 {
2941 static const struct phy_reg phy_reg_init[] = {
2942 { 0x1f, 0x0002 },
2943 { 0x10, 0x0008 },
2944 { 0x0d, 0x006c },
2945
2946 { 0x1f, 0x0000 },
2947 { 0x0d, 0xf880 },
2948
2949 { 0x1f, 0x0001 },
2950 { 0x17, 0x0cc0 },
2951
2952 { 0x1f, 0x0001 },
2953 { 0x0b, 0xa4d8 },
2954 { 0x09, 0x281c },
2955 { 0x07, 0x2883 },
2956 { 0x0a, 0x6b35 },
2957 { 0x1d, 0x3da4 },
2958 { 0x1c, 0xeffd },
2959 { 0x14, 0x7f52 },
2960 { 0x18, 0x7fc6 },
2961 { 0x08, 0x0601 },
2962 { 0x06, 0x4063 },
2963 { 0x10, 0xf074 },
2964 { 0x1f, 0x0003 },
2965 { 0x13, 0x0789 },
2966 { 0x12, 0xf4bd },
2967 { 0x1a, 0x04fd },
2968 { 0x14, 0x84b0 },
2969 { 0x1f, 0x0000 },
2970 { 0x00, 0x9200 },
2971
2972 { 0x1f, 0x0005 },
2973 { 0x01, 0x0340 },
2974 { 0x1f, 0x0001 },
2975 { 0x04, 0x4000 },
2976 { 0x03, 0x1d21 },
2977 { 0x02, 0x0c32 },
2978 { 0x01, 0x0200 },
2979 { 0x00, 0x5554 },
2980 { 0x04, 0x4800 },
2981 { 0x04, 0x4000 },
2982 { 0x04, 0xf000 },
2983 { 0x03, 0xdf01 },
2984 { 0x02, 0xdf20 },
2985 { 0x01, 0x101a },
2986 { 0x00, 0xa0ff },
2987 { 0x04, 0xf800 },
2988 { 0x04, 0xf000 },
2989 { 0x1f, 0x0000 },
2990
2991 { 0x1f, 0x0007 },
2992 { 0x1e, 0x0023 },
2993 { 0x16, 0x0000 },
2994 { 0x1f, 0x0000 }
2995 };
2996
2997 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2998 }
2999
3000 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3001 {
3002 static const struct phy_reg phy_reg_init[] = {
3003 { 0x1f, 0x0001 },
3004 { 0x17, 0x0cc0 },
3005
3006 { 0x1f, 0x0007 },
3007 { 0x1e, 0x002d },
3008 { 0x18, 0x0040 },
3009 { 0x1f, 0x0000 }
3010 };
3011
3012 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3013 rtl_patchphy(tp, 0x0d, 1 << 5);
3014 }
3015
3016 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3017 {
3018 static const struct phy_reg phy_reg_init[] = {
3019 /* Enable Delay cap */
3020 { 0x1f, 0x0005 },
3021 { 0x05, 0x8b80 },
3022 { 0x06, 0xc896 },
3023 { 0x1f, 0x0000 },
3024
3025 /* Channel estimation fine tune */
3026 { 0x1f, 0x0001 },
3027 { 0x0b, 0x6c20 },
3028 { 0x07, 0x2872 },
3029 { 0x1c, 0xefff },
3030 { 0x1f, 0x0003 },
3031 { 0x14, 0x6420 },
3032 { 0x1f, 0x0000 },
3033
3034 /* Update PFM & 10M TX idle timer */
3035 { 0x1f, 0x0007 },
3036 { 0x1e, 0x002f },
3037 { 0x15, 0x1919 },
3038 { 0x1f, 0x0000 },
3039
3040 { 0x1f, 0x0007 },
3041 { 0x1e, 0x00ac },
3042 { 0x18, 0x0006 },
3043 { 0x1f, 0x0000 }
3044 };
3045
3046 rtl_apply_firmware(tp);
3047
3048 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3049
3050 /* DCO enable for 10M IDLE Power */
3051 rtl_writephy(tp, 0x1f, 0x0007);
3052 rtl_writephy(tp, 0x1e, 0x0023);
3053 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3054 rtl_writephy(tp, 0x1f, 0x0000);
3055
3056 /* For impedance matching */
3057 rtl_writephy(tp, 0x1f, 0x0002);
3058 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3059 rtl_writephy(tp, 0x1f, 0x0000);
3060
3061 /* PHY auto speed down */
3062 rtl_writephy(tp, 0x1f, 0x0007);
3063 rtl_writephy(tp, 0x1e, 0x002d);
3064 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3065 rtl_writephy(tp, 0x1f, 0x0000);
3066 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3067
3068 rtl_writephy(tp, 0x1f, 0x0005);
3069 rtl_writephy(tp, 0x05, 0x8b86);
3070 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3071 rtl_writephy(tp, 0x1f, 0x0000);
3072
3073 rtl_writephy(tp, 0x1f, 0x0005);
3074 rtl_writephy(tp, 0x05, 0x8b85);
3075 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3076 rtl_writephy(tp, 0x1f, 0x0007);
3077 rtl_writephy(tp, 0x1e, 0x0020);
3078 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3079 rtl_writephy(tp, 0x1f, 0x0006);
3080 rtl_writephy(tp, 0x00, 0x5a00);
3081 rtl_writephy(tp, 0x1f, 0x0000);
3082 rtl_writephy(tp, 0x0d, 0x0007);
3083 rtl_writephy(tp, 0x0e, 0x003c);
3084 rtl_writephy(tp, 0x0d, 0x4007);
3085 rtl_writephy(tp, 0x0e, 0x0000);
3086 rtl_writephy(tp, 0x0d, 0x0000);
3087 }
3088
3089 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3090 {
3091 const u16 w[] = {
3092 addr[0] | (addr[1] << 8),
3093 addr[2] | (addr[3] << 8),
3094 addr[4] | (addr[5] << 8)
3095 };
3096 const struct exgmac_reg e[] = {
3097 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3098 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3099 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3100 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3101 };
3102
3103 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3104 }
3105
3106 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3107 {
3108 static const struct phy_reg phy_reg_init[] = {
3109 /* Enable Delay cap */
3110 { 0x1f, 0x0004 },
3111 { 0x1f, 0x0007 },
3112 { 0x1e, 0x00ac },
3113 { 0x18, 0x0006 },
3114 { 0x1f, 0x0002 },
3115 { 0x1f, 0x0000 },
3116 { 0x1f, 0x0000 },
3117
3118 /* Channel estimation fine tune */
3119 { 0x1f, 0x0003 },
3120 { 0x09, 0xa20f },
3121 { 0x1f, 0x0000 },
3122 { 0x1f, 0x0000 },
3123
3124 /* Green Setting */
3125 { 0x1f, 0x0005 },
3126 { 0x05, 0x8b5b },
3127 { 0x06, 0x9222 },
3128 { 0x05, 0x8b6d },
3129 { 0x06, 0x8000 },
3130 { 0x05, 0x8b76 },
3131 { 0x06, 0x8000 },
3132 { 0x1f, 0x0000 }
3133 };
3134
3135 rtl_apply_firmware(tp);
3136
3137 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3138
3139 /* For 4-corner performance improve */
3140 rtl_writephy(tp, 0x1f, 0x0005);
3141 rtl_writephy(tp, 0x05, 0x8b80);
3142 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3143 rtl_writephy(tp, 0x1f, 0x0000);
3144
3145 /* PHY auto speed down */
3146 rtl_writephy(tp, 0x1f, 0x0004);
3147 rtl_writephy(tp, 0x1f, 0x0007);
3148 rtl_writephy(tp, 0x1e, 0x002d);
3149 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3150 rtl_writephy(tp, 0x1f, 0x0002);
3151 rtl_writephy(tp, 0x1f, 0x0000);
3152 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3153
3154 /* improve 10M EEE waveform */
3155 rtl_writephy(tp, 0x1f, 0x0005);
3156 rtl_writephy(tp, 0x05, 0x8b86);
3157 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3158 rtl_writephy(tp, 0x1f, 0x0000);
3159
3160 /* Improve 2-pair detection performance */
3161 rtl_writephy(tp, 0x1f, 0x0005);
3162 rtl_writephy(tp, 0x05, 0x8b85);
3163 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3164 rtl_writephy(tp, 0x1f, 0x0000);
3165
3166 /* EEE setting */
3167 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3168 rtl_writephy(tp, 0x1f, 0x0005);
3169 rtl_writephy(tp, 0x05, 0x8b85);
3170 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3171 rtl_writephy(tp, 0x1f, 0x0004);
3172 rtl_writephy(tp, 0x1f, 0x0007);
3173 rtl_writephy(tp, 0x1e, 0x0020);
3174 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3175 rtl_writephy(tp, 0x1f, 0x0002);
3176 rtl_writephy(tp, 0x1f, 0x0000);
3177 rtl_writephy(tp, 0x0d, 0x0007);
3178 rtl_writephy(tp, 0x0e, 0x003c);
3179 rtl_writephy(tp, 0x0d, 0x4007);
3180 rtl_writephy(tp, 0x0e, 0x0000);
3181 rtl_writephy(tp, 0x0d, 0x0000);
3182
3183 /* Green feature */
3184 rtl_writephy(tp, 0x1f, 0x0003);
3185 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3186 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3187 rtl_writephy(tp, 0x1f, 0x0000);
3188
3189 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3190 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3191 }
3192
3193 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3194 {
3195 /* For 4-corner performance improve */
3196 rtl_writephy(tp, 0x1f, 0x0005);
3197 rtl_writephy(tp, 0x05, 0x8b80);
3198 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3199 rtl_writephy(tp, 0x1f, 0x0000);
3200
3201 /* PHY auto speed down */
3202 rtl_writephy(tp, 0x1f, 0x0007);
3203 rtl_writephy(tp, 0x1e, 0x002d);
3204 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3205 rtl_writephy(tp, 0x1f, 0x0000);
3206 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3207
3208 /* Improve 10M EEE waveform */
3209 rtl_writephy(tp, 0x1f, 0x0005);
3210 rtl_writephy(tp, 0x05, 0x8b86);
3211 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3212 rtl_writephy(tp, 0x1f, 0x0000);
3213 }
3214
3215 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3216 {
3217 static const struct phy_reg phy_reg_init[] = {
3218 /* Channel estimation fine tune */
3219 { 0x1f, 0x0003 },
3220 { 0x09, 0xa20f },
3221 { 0x1f, 0x0000 },
3222
3223 /* Modify green table for giga & fnet */
3224 { 0x1f, 0x0005 },
3225 { 0x05, 0x8b55 },
3226 { 0x06, 0x0000 },
3227 { 0x05, 0x8b5e },
3228 { 0x06, 0x0000 },
3229 { 0x05, 0x8b67 },
3230 { 0x06, 0x0000 },
3231 { 0x05, 0x8b70 },
3232 { 0x06, 0x0000 },
3233 { 0x1f, 0x0000 },
3234 { 0x1f, 0x0007 },
3235 { 0x1e, 0x0078 },
3236 { 0x17, 0x0000 },
3237 { 0x19, 0x00fb },
3238 { 0x1f, 0x0000 },
3239
3240 /* Modify green table for 10M */
3241 { 0x1f, 0x0005 },
3242 { 0x05, 0x8b79 },
3243 { 0x06, 0xaa00 },
3244 { 0x1f, 0x0000 },
3245
3246 /* Disable hiimpedance detection (RTCT) */
3247 { 0x1f, 0x0003 },
3248 { 0x01, 0x328a },
3249 { 0x1f, 0x0000 }
3250 };
3251
3252 rtl_apply_firmware(tp);
3253
3254 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3255
3256 rtl8168f_hw_phy_config(tp);
3257
3258 /* Improve 2-pair detection performance */
3259 rtl_writephy(tp, 0x1f, 0x0005);
3260 rtl_writephy(tp, 0x05, 0x8b85);
3261 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3262 rtl_writephy(tp, 0x1f, 0x0000);
3263 }
3264
3265 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3266 {
3267 rtl_apply_firmware(tp);
3268
3269 rtl8168f_hw_phy_config(tp);
3270 }
3271
3272 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3273 {
3274 static const struct phy_reg phy_reg_init[] = {
3275 /* Channel estimation fine tune */
3276 { 0x1f, 0x0003 },
3277 { 0x09, 0xa20f },
3278 { 0x1f, 0x0000 },
3279
3280 /* Modify green table for giga & fnet */
3281 { 0x1f, 0x0005 },
3282 { 0x05, 0x8b55 },
3283 { 0x06, 0x0000 },
3284 { 0x05, 0x8b5e },
3285 { 0x06, 0x0000 },
3286 { 0x05, 0x8b67 },
3287 { 0x06, 0x0000 },
3288 { 0x05, 0x8b70 },
3289 { 0x06, 0x0000 },
3290 { 0x1f, 0x0000 },
3291 { 0x1f, 0x0007 },
3292 { 0x1e, 0x0078 },
3293 { 0x17, 0x0000 },
3294 { 0x19, 0x00aa },
3295 { 0x1f, 0x0000 },
3296
3297 /* Modify green table for 10M */
3298 { 0x1f, 0x0005 },
3299 { 0x05, 0x8b79 },
3300 { 0x06, 0xaa00 },
3301 { 0x1f, 0x0000 },
3302
3303 /* Disable hiimpedance detection (RTCT) */
3304 { 0x1f, 0x0003 },
3305 { 0x01, 0x328a },
3306 { 0x1f, 0x0000 }
3307 };
3308
3309
3310 rtl_apply_firmware(tp);
3311
3312 rtl8168f_hw_phy_config(tp);
3313
3314 /* Improve 2-pair detection performance */
3315 rtl_writephy(tp, 0x1f, 0x0005);
3316 rtl_writephy(tp, 0x05, 0x8b85);
3317 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3318 rtl_writephy(tp, 0x1f, 0x0000);
3319
3320 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3321
3322 /* Modify green table for giga */
3323 rtl_writephy(tp, 0x1f, 0x0005);
3324 rtl_writephy(tp, 0x05, 0x8b54);
3325 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3326 rtl_writephy(tp, 0x05, 0x8b5d);
3327 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3328 rtl_writephy(tp, 0x05, 0x8a7c);
3329 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3330 rtl_writephy(tp, 0x05, 0x8a7f);
3331 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3332 rtl_writephy(tp, 0x05, 0x8a82);
3333 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3334 rtl_writephy(tp, 0x05, 0x8a85);
3335 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3336 rtl_writephy(tp, 0x05, 0x8a88);
3337 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3338 rtl_writephy(tp, 0x1f, 0x0000);
3339
3340 /* uc same-seed solution */
3341 rtl_writephy(tp, 0x1f, 0x0005);
3342 rtl_writephy(tp, 0x05, 0x8b85);
3343 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3344 rtl_writephy(tp, 0x1f, 0x0000);
3345
3346 /* eee setting */
3347 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3348 rtl_writephy(tp, 0x1f, 0x0005);
3349 rtl_writephy(tp, 0x05, 0x8b85);
3350 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3351 rtl_writephy(tp, 0x1f, 0x0004);
3352 rtl_writephy(tp, 0x1f, 0x0007);
3353 rtl_writephy(tp, 0x1e, 0x0020);
3354 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3355 rtl_writephy(tp, 0x1f, 0x0000);
3356 rtl_writephy(tp, 0x0d, 0x0007);
3357 rtl_writephy(tp, 0x0e, 0x003c);
3358 rtl_writephy(tp, 0x0d, 0x4007);
3359 rtl_writephy(tp, 0x0e, 0x0000);
3360 rtl_writephy(tp, 0x0d, 0x0000);
3361
3362 /* Green feature */
3363 rtl_writephy(tp, 0x1f, 0x0003);
3364 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3365 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3366 rtl_writephy(tp, 0x1f, 0x0000);
3367 }
3368
3369 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3370 {
3371 static const u16 mac_ocp_patch[] = {
3372 0xe008, 0xe01b, 0xe01d, 0xe01f,
3373 0xe021, 0xe023, 0xe025, 0xe027,
3374 0x49d2, 0xf10d, 0x766c, 0x49e2,
3375 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3376
3377 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3378 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3379 0xbe00, 0xb416, 0x0076, 0xe86c,
3380 0xc602, 0xbe00, 0x0000, 0xc602,
3381
3382 0xbe00, 0x0000, 0xc602, 0xbe00,
3383 0x0000, 0xc602, 0xbe00, 0x0000,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3385 0xbe00, 0x0000, 0xc602, 0xbe00,
3386
3387 0x0000, 0x0000, 0x0000, 0x0000
3388 };
3389 u32 i;
3390
3391 /* Patch code for GPHY reset */
3392 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3393 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3394 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3395 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3396
3397 rtl_apply_firmware(tp);
3398
3399 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3400 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3401 else
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3403
3404 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3405 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3406 else
3407 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3408
3409 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3410 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3411
3412 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3413 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3414
3415 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3416 }
3417
3418 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3419 {
3420 static const struct phy_reg phy_reg_init[] = {
3421 { 0x1f, 0x0003 },
3422 { 0x08, 0x441d },
3423 { 0x01, 0x9100 },
3424 { 0x1f, 0x0000 }
3425 };
3426
3427 rtl_writephy(tp, 0x1f, 0x0000);
3428 rtl_patchphy(tp, 0x11, 1 << 12);
3429 rtl_patchphy(tp, 0x19, 1 << 13);
3430 rtl_patchphy(tp, 0x10, 1 << 15);
3431
3432 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3433 }
3434
3435 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3436 {
3437 static const struct phy_reg phy_reg_init[] = {
3438 { 0x1f, 0x0005 },
3439 { 0x1a, 0x0000 },
3440 { 0x1f, 0x0000 },
3441
3442 { 0x1f, 0x0004 },
3443 { 0x1c, 0x0000 },
3444 { 0x1f, 0x0000 },
3445
3446 { 0x1f, 0x0001 },
3447 { 0x15, 0x7701 },
3448 { 0x1f, 0x0000 }
3449 };
3450
3451 /* Disable ALDPS before ram code */
3452 rtl_writephy(tp, 0x1f, 0x0000);
3453 rtl_writephy(tp, 0x18, 0x0310);
3454 msleep(100);
3455
3456 rtl_apply_firmware(tp);
3457
3458 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3459 }
3460
3461 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3462 {
3463 /* Disable ALDPS before setting firmware */
3464 rtl_writephy(tp, 0x1f, 0x0000);
3465 rtl_writephy(tp, 0x18, 0x0310);
3466 msleep(20);
3467
3468 rtl_apply_firmware(tp);
3469
3470 /* EEE setting */
3471 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3472 rtl_writephy(tp, 0x1f, 0x0004);
3473 rtl_writephy(tp, 0x10, 0x401f);
3474 rtl_writephy(tp, 0x19, 0x7030);
3475 rtl_writephy(tp, 0x1f, 0x0000);
3476 }
3477
3478 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3479 {
3480 static const struct phy_reg phy_reg_init[] = {
3481 { 0x1f, 0x0004 },
3482 { 0x10, 0xc07f },
3483 { 0x19, 0x7030 },
3484 { 0x1f, 0x0000 }
3485 };
3486
3487 /* Disable ALDPS before ram code */
3488 rtl_writephy(tp, 0x1f, 0x0000);
3489 rtl_writephy(tp, 0x18, 0x0310);
3490 msleep(100);
3491
3492 rtl_apply_firmware(tp);
3493
3494 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3495 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3496
3497 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3498 }
3499
3500 static void rtl_hw_phy_config(struct net_device *dev)
3501 {
3502 struct rtl8169_private *tp = netdev_priv(dev);
3503
3504 rtl8169_print_mac_version(tp);
3505
3506 switch (tp->mac_version) {
3507 case RTL_GIGA_MAC_VER_01:
3508 break;
3509 case RTL_GIGA_MAC_VER_02:
3510 case RTL_GIGA_MAC_VER_03:
3511 rtl8169s_hw_phy_config(tp);
3512 break;
3513 case RTL_GIGA_MAC_VER_04:
3514 rtl8169sb_hw_phy_config(tp);
3515 break;
3516 case RTL_GIGA_MAC_VER_05:
3517 rtl8169scd_hw_phy_config(tp);
3518 break;
3519 case RTL_GIGA_MAC_VER_06:
3520 rtl8169sce_hw_phy_config(tp);
3521 break;
3522 case RTL_GIGA_MAC_VER_07:
3523 case RTL_GIGA_MAC_VER_08:
3524 case RTL_GIGA_MAC_VER_09:
3525 rtl8102e_hw_phy_config(tp);
3526 break;
3527 case RTL_GIGA_MAC_VER_11:
3528 rtl8168bb_hw_phy_config(tp);
3529 break;
3530 case RTL_GIGA_MAC_VER_12:
3531 rtl8168bef_hw_phy_config(tp);
3532 break;
3533 case RTL_GIGA_MAC_VER_17:
3534 rtl8168bef_hw_phy_config(tp);
3535 break;
3536 case RTL_GIGA_MAC_VER_18:
3537 rtl8168cp_1_hw_phy_config(tp);
3538 break;
3539 case RTL_GIGA_MAC_VER_19:
3540 rtl8168c_1_hw_phy_config(tp);
3541 break;
3542 case RTL_GIGA_MAC_VER_20:
3543 rtl8168c_2_hw_phy_config(tp);
3544 break;
3545 case RTL_GIGA_MAC_VER_21:
3546 rtl8168c_3_hw_phy_config(tp);
3547 break;
3548 case RTL_GIGA_MAC_VER_22:
3549 rtl8168c_4_hw_phy_config(tp);
3550 break;
3551 case RTL_GIGA_MAC_VER_23:
3552 case RTL_GIGA_MAC_VER_24:
3553 rtl8168cp_2_hw_phy_config(tp);
3554 break;
3555 case RTL_GIGA_MAC_VER_25:
3556 rtl8168d_1_hw_phy_config(tp);
3557 break;
3558 case RTL_GIGA_MAC_VER_26:
3559 rtl8168d_2_hw_phy_config(tp);
3560 break;
3561 case RTL_GIGA_MAC_VER_27:
3562 rtl8168d_3_hw_phy_config(tp);
3563 break;
3564 case RTL_GIGA_MAC_VER_28:
3565 rtl8168d_4_hw_phy_config(tp);
3566 break;
3567 case RTL_GIGA_MAC_VER_29:
3568 case RTL_GIGA_MAC_VER_30:
3569 rtl8105e_hw_phy_config(tp);
3570 break;
3571 case RTL_GIGA_MAC_VER_31:
3572 /* None. */
3573 break;
3574 case RTL_GIGA_MAC_VER_32:
3575 case RTL_GIGA_MAC_VER_33:
3576 rtl8168e_1_hw_phy_config(tp);
3577 break;
3578 case RTL_GIGA_MAC_VER_34:
3579 rtl8168e_2_hw_phy_config(tp);
3580 break;
3581 case RTL_GIGA_MAC_VER_35:
3582 rtl8168f_1_hw_phy_config(tp);
3583 break;
3584 case RTL_GIGA_MAC_VER_36:
3585 rtl8168f_2_hw_phy_config(tp);
3586 break;
3587
3588 case RTL_GIGA_MAC_VER_37:
3589 rtl8402_hw_phy_config(tp);
3590 break;
3591
3592 case RTL_GIGA_MAC_VER_38:
3593 rtl8411_hw_phy_config(tp);
3594 break;
3595
3596 case RTL_GIGA_MAC_VER_39:
3597 rtl8106e_hw_phy_config(tp);
3598 break;
3599
3600 case RTL_GIGA_MAC_VER_40:
3601 rtl8168g_1_hw_phy_config(tp);
3602 break;
3603
3604 case RTL_GIGA_MAC_VER_41:
3605 default:
3606 break;
3607 }
3608 }
3609
3610 static void rtl_phy_work(struct rtl8169_private *tp)
3611 {
3612 struct timer_list *timer = &tp->timer;
3613 void __iomem *ioaddr = tp->mmio_addr;
3614 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3615
3616 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3617
3618 if (tp->phy_reset_pending(tp)) {
3619 /*
3620 * A busy loop could burn quite a few cycles on nowadays CPU.
3621 * Let's delay the execution of the timer for a few ticks.
3622 */
3623 timeout = HZ/10;
3624 goto out_mod_timer;
3625 }
3626
3627 if (tp->link_ok(ioaddr))
3628 return;
3629
3630 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3631
3632 tp->phy_reset_enable(tp);
3633
3634 out_mod_timer:
3635 mod_timer(timer, jiffies + timeout);
3636 }
3637
3638 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3639 {
3640 if (!test_and_set_bit(flag, tp->wk.flags))
3641 schedule_work(&tp->wk.work);
3642 }
3643
3644 static void rtl8169_phy_timer(unsigned long __opaque)
3645 {
3646 struct net_device *dev = (struct net_device *)__opaque;
3647 struct rtl8169_private *tp = netdev_priv(dev);
3648
3649 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3650 }
3651
3652 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3653 void __iomem *ioaddr)
3654 {
3655 iounmap(ioaddr);
3656 pci_release_regions(pdev);
3657 pci_clear_mwi(pdev);
3658 pci_disable_device(pdev);
3659 free_netdev(dev);
3660 }
3661
3662 DECLARE_RTL_COND(rtl_phy_reset_cond)
3663 {
3664 return tp->phy_reset_pending(tp);
3665 }
3666
3667 static void rtl8169_phy_reset(struct net_device *dev,
3668 struct rtl8169_private *tp)
3669 {
3670 tp->phy_reset_enable(tp);
3671 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3672 }
3673
3674 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3675 {
3676 void __iomem *ioaddr = tp->mmio_addr;
3677
3678 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3679 (RTL_R8(PHYstatus) & TBI_Enable);
3680 }
3681
3682 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3683 {
3684 void __iomem *ioaddr = tp->mmio_addr;
3685
3686 rtl_hw_phy_config(dev);
3687
3688 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3689 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3690 RTL_W8(0x82, 0x01);
3691 }
3692
3693 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3694
3695 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3696 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3697
3698 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3699 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3700 RTL_W8(0x82, 0x01);
3701 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3702 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3703 }
3704
3705 rtl8169_phy_reset(dev, tp);
3706
3707 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3708 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3709 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3710 (tp->mii.supports_gmii ?
3711 ADVERTISED_1000baseT_Half |
3712 ADVERTISED_1000baseT_Full : 0));
3713
3714 if (rtl_tbi_enabled(tp))
3715 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3716 }
3717
3718 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3719 {
3720 void __iomem *ioaddr = tp->mmio_addr;
3721
3722 rtl_lock_work(tp);
3723
3724 RTL_W8(Cfg9346, Cfg9346_Unlock);
3725
3726 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3727 RTL_R32(MAC4);
3728
3729 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3730 RTL_R32(MAC0);
3731
3732 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3733 rtl_rar_exgmac_set(tp, addr);
3734
3735 RTL_W8(Cfg9346, Cfg9346_Lock);
3736
3737 rtl_unlock_work(tp);
3738 }
3739
3740 static int rtl_set_mac_address(struct net_device *dev, void *p)
3741 {
3742 struct rtl8169_private *tp = netdev_priv(dev);
3743 struct sockaddr *addr = p;
3744
3745 if (!is_valid_ether_addr(addr->sa_data))
3746 return -EADDRNOTAVAIL;
3747
3748 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3749
3750 rtl_rar_set(tp, dev->dev_addr);
3751
3752 return 0;
3753 }
3754
3755 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3756 {
3757 struct rtl8169_private *tp = netdev_priv(dev);
3758 struct mii_ioctl_data *data = if_mii(ifr);
3759
3760 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3761 }
3762
3763 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3764 struct mii_ioctl_data *data, int cmd)
3765 {
3766 switch (cmd) {
3767 case SIOCGMIIPHY:
3768 data->phy_id = 32; /* Internal PHY */
3769 return 0;
3770
3771 case SIOCGMIIREG:
3772 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3773 return 0;
3774
3775 case SIOCSMIIREG:
3776 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3777 return 0;
3778 }
3779 return -EOPNOTSUPP;
3780 }
3781
3782 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3783 {
3784 return -EOPNOTSUPP;
3785 }
3786
3787 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3788 {
3789 if (tp->features & RTL_FEATURE_MSI) {
3790 pci_disable_msi(pdev);
3791 tp->features &= ~RTL_FEATURE_MSI;
3792 }
3793 }
3794
3795 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3796 {
3797 struct mdio_ops *ops = &tp->mdio_ops;
3798
3799 switch (tp->mac_version) {
3800 case RTL_GIGA_MAC_VER_27:
3801 ops->write = r8168dp_1_mdio_write;
3802 ops->read = r8168dp_1_mdio_read;
3803 break;
3804 case RTL_GIGA_MAC_VER_28:
3805 case RTL_GIGA_MAC_VER_31:
3806 ops->write = r8168dp_2_mdio_write;
3807 ops->read = r8168dp_2_mdio_read;
3808 break;
3809 case RTL_GIGA_MAC_VER_40:
3810 case RTL_GIGA_MAC_VER_41:
3811 ops->write = r8168g_mdio_write;
3812 ops->read = r8168g_mdio_read;
3813 break;
3814 default:
3815 ops->write = r8169_mdio_write;
3816 ops->read = r8169_mdio_read;
3817 break;
3818 }
3819 }
3820
3821 static void rtl_speed_down(struct rtl8169_private *tp)
3822 {
3823 u32 adv;
3824 int lpa;
3825
3826 rtl_writephy(tp, 0x1f, 0x0000);
3827 lpa = rtl_readphy(tp, MII_LPA);
3828
3829 if (lpa & (LPA_10HALF | LPA_10FULL))
3830 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
3831 else if (lpa & (LPA_100HALF | LPA_100FULL))
3832 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3833 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3834 else
3835 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3836 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3837 (tp->mii.supports_gmii ?
3838 ADVERTISED_1000baseT_Half |
3839 ADVERTISED_1000baseT_Full : 0);
3840
3841 rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3842 adv);
3843 }
3844
3845 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3846 {
3847 void __iomem *ioaddr = tp->mmio_addr;
3848
3849 switch (tp->mac_version) {
3850 case RTL_GIGA_MAC_VER_25:
3851 case RTL_GIGA_MAC_VER_26:
3852 case RTL_GIGA_MAC_VER_29:
3853 case RTL_GIGA_MAC_VER_30:
3854 case RTL_GIGA_MAC_VER_32:
3855 case RTL_GIGA_MAC_VER_33:
3856 case RTL_GIGA_MAC_VER_34:
3857 case RTL_GIGA_MAC_VER_37:
3858 case RTL_GIGA_MAC_VER_38:
3859 case RTL_GIGA_MAC_VER_39:
3860 case RTL_GIGA_MAC_VER_40:
3861 case RTL_GIGA_MAC_VER_41:
3862 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3863 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3864 break;
3865 default:
3866 break;
3867 }
3868 }
3869
3870 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3871 {
3872 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3873 return false;
3874
3875 rtl_speed_down(tp);
3876 rtl_wol_suspend_quirk(tp);
3877
3878 return true;
3879 }
3880
3881 static void r810x_phy_power_down(struct rtl8169_private *tp)
3882 {
3883 rtl_writephy(tp, 0x1f, 0x0000);
3884 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3885 }
3886
3887 static void r810x_phy_power_up(struct rtl8169_private *tp)
3888 {
3889 rtl_writephy(tp, 0x1f, 0x0000);
3890 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3891 }
3892
3893 static void r810x_pll_power_down(struct rtl8169_private *tp)
3894 {
3895 void __iomem *ioaddr = tp->mmio_addr;
3896
3897 if (rtl_wol_pll_power_down(tp))
3898 return;
3899
3900 r810x_phy_power_down(tp);
3901
3902 switch (tp->mac_version) {
3903 case RTL_GIGA_MAC_VER_07:
3904 case RTL_GIGA_MAC_VER_08:
3905 case RTL_GIGA_MAC_VER_09:
3906 case RTL_GIGA_MAC_VER_10:
3907 case RTL_GIGA_MAC_VER_13:
3908 case RTL_GIGA_MAC_VER_16:
3909 break;
3910 default:
3911 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3912 break;
3913 }
3914 }
3915
3916 static void r810x_pll_power_up(struct rtl8169_private *tp)
3917 {
3918 void __iomem *ioaddr = tp->mmio_addr;
3919
3920 r810x_phy_power_up(tp);
3921
3922 switch (tp->mac_version) {
3923 case RTL_GIGA_MAC_VER_07:
3924 case RTL_GIGA_MAC_VER_08:
3925 case RTL_GIGA_MAC_VER_09:
3926 case RTL_GIGA_MAC_VER_10:
3927 case RTL_GIGA_MAC_VER_13:
3928 case RTL_GIGA_MAC_VER_16:
3929 break;
3930 default:
3931 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3932 break;
3933 }
3934 }
3935
3936 static void r8168_phy_power_up(struct rtl8169_private *tp)
3937 {
3938 rtl_writephy(tp, 0x1f, 0x0000);
3939 switch (tp->mac_version) {
3940 case RTL_GIGA_MAC_VER_11:
3941 case RTL_GIGA_MAC_VER_12:
3942 case RTL_GIGA_MAC_VER_17:
3943 case RTL_GIGA_MAC_VER_18:
3944 case RTL_GIGA_MAC_VER_19:
3945 case RTL_GIGA_MAC_VER_20:
3946 case RTL_GIGA_MAC_VER_21:
3947 case RTL_GIGA_MAC_VER_22:
3948 case RTL_GIGA_MAC_VER_23:
3949 case RTL_GIGA_MAC_VER_24:
3950 case RTL_GIGA_MAC_VER_25:
3951 case RTL_GIGA_MAC_VER_26:
3952 case RTL_GIGA_MAC_VER_27:
3953 case RTL_GIGA_MAC_VER_28:
3954 case RTL_GIGA_MAC_VER_31:
3955 rtl_writephy(tp, 0x0e, 0x0000);
3956 break;
3957 default:
3958 break;
3959 }
3960 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3961 }
3962
3963 static void r8168_phy_power_down(struct rtl8169_private *tp)
3964 {
3965 rtl_writephy(tp, 0x1f, 0x0000);
3966 switch (tp->mac_version) {
3967 case RTL_GIGA_MAC_VER_32:
3968 case RTL_GIGA_MAC_VER_33:
3969 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3970 break;
3971
3972 case RTL_GIGA_MAC_VER_11:
3973 case RTL_GIGA_MAC_VER_12:
3974 case RTL_GIGA_MAC_VER_17:
3975 case RTL_GIGA_MAC_VER_18:
3976 case RTL_GIGA_MAC_VER_19:
3977 case RTL_GIGA_MAC_VER_20:
3978 case RTL_GIGA_MAC_VER_21:
3979 case RTL_GIGA_MAC_VER_22:
3980 case RTL_GIGA_MAC_VER_23:
3981 case RTL_GIGA_MAC_VER_24:
3982 case RTL_GIGA_MAC_VER_25:
3983 case RTL_GIGA_MAC_VER_26:
3984 case RTL_GIGA_MAC_VER_27:
3985 case RTL_GIGA_MAC_VER_28:
3986 case RTL_GIGA_MAC_VER_31:
3987 rtl_writephy(tp, 0x0e, 0x0200);
3988 default:
3989 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3990 break;
3991 }
3992 }
3993
3994 static void r8168_pll_power_down(struct rtl8169_private *tp)
3995 {
3996 void __iomem *ioaddr = tp->mmio_addr;
3997
3998 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3999 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4000 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4001 r8168dp_check_dash(tp)) {
4002 return;
4003 }
4004
4005 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4006 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4007 (RTL_R16(CPlusCmd) & ASF)) {
4008 return;
4009 }
4010
4011 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4012 tp->mac_version == RTL_GIGA_MAC_VER_33)
4013 rtl_ephy_write(tp, 0x19, 0xff64);
4014
4015 if (rtl_wol_pll_power_down(tp))
4016 return;
4017
4018 r8168_phy_power_down(tp);
4019
4020 switch (tp->mac_version) {
4021 case RTL_GIGA_MAC_VER_25:
4022 case RTL_GIGA_MAC_VER_26:
4023 case RTL_GIGA_MAC_VER_27:
4024 case RTL_GIGA_MAC_VER_28:
4025 case RTL_GIGA_MAC_VER_31:
4026 case RTL_GIGA_MAC_VER_32:
4027 case RTL_GIGA_MAC_VER_33:
4028 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4029 break;
4030 }
4031 }
4032
4033 static void r8168_pll_power_up(struct rtl8169_private *tp)
4034 {
4035 void __iomem *ioaddr = tp->mmio_addr;
4036
4037 switch (tp->mac_version) {
4038 case RTL_GIGA_MAC_VER_25:
4039 case RTL_GIGA_MAC_VER_26:
4040 case RTL_GIGA_MAC_VER_27:
4041 case RTL_GIGA_MAC_VER_28:
4042 case RTL_GIGA_MAC_VER_31:
4043 case RTL_GIGA_MAC_VER_32:
4044 case RTL_GIGA_MAC_VER_33:
4045 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4046 break;
4047 }
4048
4049 r8168_phy_power_up(tp);
4050 }
4051
4052 static void rtl_generic_op(struct rtl8169_private *tp,
4053 void (*op)(struct rtl8169_private *))
4054 {
4055 if (op)
4056 op(tp);
4057 }
4058
4059 static void rtl_pll_power_down(struct rtl8169_private *tp)
4060 {
4061 rtl_generic_op(tp, tp->pll_power_ops.down);
4062 }
4063
4064 static void rtl_pll_power_up(struct rtl8169_private *tp)
4065 {
4066 rtl_generic_op(tp, tp->pll_power_ops.up);
4067 }
4068
4069 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4070 {
4071 struct pll_power_ops *ops = &tp->pll_power_ops;
4072
4073 switch (tp->mac_version) {
4074 case RTL_GIGA_MAC_VER_07:
4075 case RTL_GIGA_MAC_VER_08:
4076 case RTL_GIGA_MAC_VER_09:
4077 case RTL_GIGA_MAC_VER_10:
4078 case RTL_GIGA_MAC_VER_16:
4079 case RTL_GIGA_MAC_VER_29:
4080 case RTL_GIGA_MAC_VER_30:
4081 case RTL_GIGA_MAC_VER_37:
4082 case RTL_GIGA_MAC_VER_39:
4083 ops->down = r810x_pll_power_down;
4084 ops->up = r810x_pll_power_up;
4085 break;
4086
4087 case RTL_GIGA_MAC_VER_11:
4088 case RTL_GIGA_MAC_VER_12:
4089 case RTL_GIGA_MAC_VER_17:
4090 case RTL_GIGA_MAC_VER_18:
4091 case RTL_GIGA_MAC_VER_19:
4092 case RTL_GIGA_MAC_VER_20:
4093 case RTL_GIGA_MAC_VER_21:
4094 case RTL_GIGA_MAC_VER_22:
4095 case RTL_GIGA_MAC_VER_23:
4096 case RTL_GIGA_MAC_VER_24:
4097 case RTL_GIGA_MAC_VER_25:
4098 case RTL_GIGA_MAC_VER_26:
4099 case RTL_GIGA_MAC_VER_27:
4100 case RTL_GIGA_MAC_VER_28:
4101 case RTL_GIGA_MAC_VER_31:
4102 case RTL_GIGA_MAC_VER_32:
4103 case RTL_GIGA_MAC_VER_33:
4104 case RTL_GIGA_MAC_VER_34:
4105 case RTL_GIGA_MAC_VER_35:
4106 case RTL_GIGA_MAC_VER_36:
4107 case RTL_GIGA_MAC_VER_38:
4108 case RTL_GIGA_MAC_VER_40:
4109 case RTL_GIGA_MAC_VER_41:
4110 ops->down = r8168_pll_power_down;
4111 ops->up = r8168_pll_power_up;
4112 break;
4113
4114 default:
4115 ops->down = NULL;
4116 ops->up = NULL;
4117 break;
4118 }
4119 }
4120
4121 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4122 {
4123 void __iomem *ioaddr = tp->mmio_addr;
4124
4125 switch (tp->mac_version) {
4126 case RTL_GIGA_MAC_VER_01:
4127 case RTL_GIGA_MAC_VER_02:
4128 case RTL_GIGA_MAC_VER_03:
4129 case RTL_GIGA_MAC_VER_04:
4130 case RTL_GIGA_MAC_VER_05:
4131 case RTL_GIGA_MAC_VER_06:
4132 case RTL_GIGA_MAC_VER_10:
4133 case RTL_GIGA_MAC_VER_11:
4134 case RTL_GIGA_MAC_VER_12:
4135 case RTL_GIGA_MAC_VER_13:
4136 case RTL_GIGA_MAC_VER_14:
4137 case RTL_GIGA_MAC_VER_15:
4138 case RTL_GIGA_MAC_VER_16:
4139 case RTL_GIGA_MAC_VER_17:
4140 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4141 break;
4142 case RTL_GIGA_MAC_VER_18:
4143 case RTL_GIGA_MAC_VER_19:
4144 case RTL_GIGA_MAC_VER_20:
4145 case RTL_GIGA_MAC_VER_21:
4146 case RTL_GIGA_MAC_VER_22:
4147 case RTL_GIGA_MAC_VER_23:
4148 case RTL_GIGA_MAC_VER_24:
4149 case RTL_GIGA_MAC_VER_34:
4150 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4151 break;
4152 default:
4153 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4154 break;
4155 }
4156 }
4157
4158 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4159 {
4160 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4161 }
4162
4163 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4164 {
4165 void __iomem *ioaddr = tp->mmio_addr;
4166
4167 RTL_W8(Cfg9346, Cfg9346_Unlock);
4168 rtl_generic_op(tp, tp->jumbo_ops.enable);
4169 RTL_W8(Cfg9346, Cfg9346_Lock);
4170 }
4171
4172 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4173 {
4174 void __iomem *ioaddr = tp->mmio_addr;
4175
4176 RTL_W8(Cfg9346, Cfg9346_Unlock);
4177 rtl_generic_op(tp, tp->jumbo_ops.disable);
4178 RTL_W8(Cfg9346, Cfg9346_Lock);
4179 }
4180
4181 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4182 {
4183 void __iomem *ioaddr = tp->mmio_addr;
4184
4185 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4186 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4187 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4188 }
4189
4190 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4191 {
4192 void __iomem *ioaddr = tp->mmio_addr;
4193
4194 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4195 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4196 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4197 }
4198
4199 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4200 {
4201 void __iomem *ioaddr = tp->mmio_addr;
4202
4203 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4204 }
4205
4206 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4207 {
4208 void __iomem *ioaddr = tp->mmio_addr;
4209
4210 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4211 }
4212
4213 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4214 {
4215 void __iomem *ioaddr = tp->mmio_addr;
4216
4217 RTL_W8(MaxTxPacketSize, 0x3f);
4218 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4219 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4220 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4221 }
4222
4223 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4224 {
4225 void __iomem *ioaddr = tp->mmio_addr;
4226
4227 RTL_W8(MaxTxPacketSize, 0x0c);
4228 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4229 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4230 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4231 }
4232
4233 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4234 {
4235 rtl_tx_performance_tweak(tp->pci_dev,
4236 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4237 }
4238
4239 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4240 {
4241 rtl_tx_performance_tweak(tp->pci_dev,
4242 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4243 }
4244
4245 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4246 {
4247 void __iomem *ioaddr = tp->mmio_addr;
4248
4249 r8168b_0_hw_jumbo_enable(tp);
4250
4251 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4252 }
4253
4254 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4255 {
4256 void __iomem *ioaddr = tp->mmio_addr;
4257
4258 r8168b_0_hw_jumbo_disable(tp);
4259
4260 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4261 }
4262
4263 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4264 {
4265 struct jumbo_ops *ops = &tp->jumbo_ops;
4266
4267 switch (tp->mac_version) {
4268 case RTL_GIGA_MAC_VER_11:
4269 ops->disable = r8168b_0_hw_jumbo_disable;
4270 ops->enable = r8168b_0_hw_jumbo_enable;
4271 break;
4272 case RTL_GIGA_MAC_VER_12:
4273 case RTL_GIGA_MAC_VER_17:
4274 ops->disable = r8168b_1_hw_jumbo_disable;
4275 ops->enable = r8168b_1_hw_jumbo_enable;
4276 break;
4277 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4278 case RTL_GIGA_MAC_VER_19:
4279 case RTL_GIGA_MAC_VER_20:
4280 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4281 case RTL_GIGA_MAC_VER_22:
4282 case RTL_GIGA_MAC_VER_23:
4283 case RTL_GIGA_MAC_VER_24:
4284 case RTL_GIGA_MAC_VER_25:
4285 case RTL_GIGA_MAC_VER_26:
4286 ops->disable = r8168c_hw_jumbo_disable;
4287 ops->enable = r8168c_hw_jumbo_enable;
4288 break;
4289 case RTL_GIGA_MAC_VER_27:
4290 case RTL_GIGA_MAC_VER_28:
4291 ops->disable = r8168dp_hw_jumbo_disable;
4292 ops->enable = r8168dp_hw_jumbo_enable;
4293 break;
4294 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4295 case RTL_GIGA_MAC_VER_32:
4296 case RTL_GIGA_MAC_VER_33:
4297 case RTL_GIGA_MAC_VER_34:
4298 ops->disable = r8168e_hw_jumbo_disable;
4299 ops->enable = r8168e_hw_jumbo_enable;
4300 break;
4301
4302 /*
4303 * No action needed for jumbo frames with 8169.
4304 * No jumbo for 810x at all.
4305 */
4306 case RTL_GIGA_MAC_VER_40:
4307 case RTL_GIGA_MAC_VER_41:
4308 default:
4309 ops->disable = NULL;
4310 ops->enable = NULL;
4311 break;
4312 }
4313 }
4314
4315 DECLARE_RTL_COND(rtl_chipcmd_cond)
4316 {
4317 void __iomem *ioaddr = tp->mmio_addr;
4318
4319 return RTL_R8(ChipCmd) & CmdReset;
4320 }
4321
4322 static void rtl_hw_reset(struct rtl8169_private *tp)
4323 {
4324 void __iomem *ioaddr = tp->mmio_addr;
4325
4326 RTL_W8(ChipCmd, CmdReset);
4327
4328 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4329 }
4330
4331 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4332 {
4333 struct rtl_fw *rtl_fw;
4334 const char *name;
4335 int rc = -ENOMEM;
4336
4337 name = rtl_lookup_firmware_name(tp);
4338 if (!name)
4339 goto out_no_firmware;
4340
4341 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4342 if (!rtl_fw)
4343 goto err_warn;
4344
4345 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4346 if (rc < 0)
4347 goto err_free;
4348
4349 rc = rtl_check_firmware(tp, rtl_fw);
4350 if (rc < 0)
4351 goto err_release_firmware;
4352
4353 tp->rtl_fw = rtl_fw;
4354 out:
4355 return;
4356
4357 err_release_firmware:
4358 release_firmware(rtl_fw->fw);
4359 err_free:
4360 kfree(rtl_fw);
4361 err_warn:
4362 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4363 name, rc);
4364 out_no_firmware:
4365 tp->rtl_fw = NULL;
4366 goto out;
4367 }
4368
4369 static void rtl_request_firmware(struct rtl8169_private *tp)
4370 {
4371 if (IS_ERR(tp->rtl_fw))
4372 rtl_request_uncached_firmware(tp);
4373 }
4374
4375 static void rtl_rx_close(struct rtl8169_private *tp)
4376 {
4377 void __iomem *ioaddr = tp->mmio_addr;
4378
4379 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4380 }
4381
4382 DECLARE_RTL_COND(rtl_npq_cond)
4383 {
4384 void __iomem *ioaddr = tp->mmio_addr;
4385
4386 return RTL_R8(TxPoll) & NPQ;
4387 }
4388
4389 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4390 {
4391 void __iomem *ioaddr = tp->mmio_addr;
4392
4393 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4394 }
4395
4396 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4397 {
4398 void __iomem *ioaddr = tp->mmio_addr;
4399
4400 /* Disable interrupts */
4401 rtl8169_irq_mask_and_ack(tp);
4402
4403 rtl_rx_close(tp);
4404
4405 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4406 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4407 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4408 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4409 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4410 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4411 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4412 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4413 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4414 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4415 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4416 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4417 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4418 } else {
4419 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4420 udelay(100);
4421 }
4422
4423 rtl_hw_reset(tp);
4424 }
4425
4426 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4427 {
4428 void __iomem *ioaddr = tp->mmio_addr;
4429
4430 /* Set DMA burst size and Interframe Gap Time */
4431 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4432 (InterFrameGap << TxInterFrameGapShift));
4433 }
4434
4435 static void rtl_hw_start(struct net_device *dev)
4436 {
4437 struct rtl8169_private *tp = netdev_priv(dev);
4438
4439 tp->hw_start(dev);
4440
4441 rtl_irq_enable_all(tp);
4442 }
4443
4444 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4445 void __iomem *ioaddr)
4446 {
4447 /*
4448 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4449 * register to be written before TxDescAddrLow to work.
4450 * Switching from MMIO to I/O access fixes the issue as well.
4451 */
4452 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4453 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4454 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4455 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4456 }
4457
4458 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4459 {
4460 u16 cmd;
4461
4462 cmd = RTL_R16(CPlusCmd);
4463 RTL_W16(CPlusCmd, cmd);
4464 return cmd;
4465 }
4466
4467 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4468 {
4469 /* Low hurts. Let's disable the filtering. */
4470 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4471 }
4472
4473 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4474 {
4475 static const struct rtl_cfg2_info {
4476 u32 mac_version;
4477 u32 clk;
4478 u32 val;
4479 } cfg2_info [] = {
4480 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4481 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4482 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4483 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4484 };
4485 const struct rtl_cfg2_info *p = cfg2_info;
4486 unsigned int i;
4487 u32 clk;
4488
4489 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4490 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4491 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4492 RTL_W32(0x7c, p->val);
4493 break;
4494 }
4495 }
4496 }
4497
4498 static void rtl_set_rx_mode(struct net_device *dev)
4499 {
4500 struct rtl8169_private *tp = netdev_priv(dev);
4501 void __iomem *ioaddr = tp->mmio_addr;
4502 u32 mc_filter[2]; /* Multicast hash filter */
4503 int rx_mode;
4504 u32 tmp = 0;
4505
4506 if (dev->flags & IFF_PROMISC) {
4507 /* Unconditionally log net taps. */
4508 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4509 rx_mode =
4510 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4511 AcceptAllPhys;
4512 mc_filter[1] = mc_filter[0] = 0xffffffff;
4513 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4514 (dev->flags & IFF_ALLMULTI)) {
4515 /* Too many to filter perfectly -- accept all multicasts. */
4516 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4517 mc_filter[1] = mc_filter[0] = 0xffffffff;
4518 } else {
4519 struct netdev_hw_addr *ha;
4520
4521 rx_mode = AcceptBroadcast | AcceptMyPhys;
4522 mc_filter[1] = mc_filter[0] = 0;
4523 netdev_for_each_mc_addr(ha, dev) {
4524 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4525 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4526 rx_mode |= AcceptMulticast;
4527 }
4528 }
4529
4530 if (dev->features & NETIF_F_RXALL)
4531 rx_mode |= (AcceptErr | AcceptRunt);
4532
4533 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4534
4535 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4536 u32 data = mc_filter[0];
4537
4538 mc_filter[0] = swab32(mc_filter[1]);
4539 mc_filter[1] = swab32(data);
4540 }
4541
4542 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4543 mc_filter[1] = mc_filter[0] = 0xffffffff;
4544
4545 RTL_W32(MAR0 + 4, mc_filter[1]);
4546 RTL_W32(MAR0 + 0, mc_filter[0]);
4547
4548 RTL_W32(RxConfig, tmp);
4549 }
4550
4551 static void rtl_hw_start_8169(struct net_device *dev)
4552 {
4553 struct rtl8169_private *tp = netdev_priv(dev);
4554 void __iomem *ioaddr = tp->mmio_addr;
4555 struct pci_dev *pdev = tp->pci_dev;
4556
4557 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4558 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4559 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4560 }
4561
4562 RTL_W8(Cfg9346, Cfg9346_Unlock);
4563 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4564 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4565 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4566 tp->mac_version == RTL_GIGA_MAC_VER_04)
4567 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4568
4569 rtl_init_rxcfg(tp);
4570
4571 RTL_W8(EarlyTxThres, NoEarlyTx);
4572
4573 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4574
4575 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4576 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4577 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4578 tp->mac_version == RTL_GIGA_MAC_VER_04)
4579 rtl_set_rx_tx_config_registers(tp);
4580
4581 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4582
4583 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4584 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4585 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4586 "Bit-3 and bit-14 MUST be 1\n");
4587 tp->cp_cmd |= (1 << 14);
4588 }
4589
4590 RTL_W16(CPlusCmd, tp->cp_cmd);
4591
4592 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4593
4594 /*
4595 * Undocumented corner. Supposedly:
4596 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4597 */
4598 RTL_W16(IntrMitigate, 0x0000);
4599
4600 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4601
4602 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4603 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4604 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4605 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4606 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4607 rtl_set_rx_tx_config_registers(tp);
4608 }
4609
4610 RTL_W8(Cfg9346, Cfg9346_Lock);
4611
4612 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4613 RTL_R8(IntrMask);
4614
4615 RTL_W32(RxMissed, 0);
4616
4617 rtl_set_rx_mode(dev);
4618
4619 /* no early-rx interrupts */
4620 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4621 }
4622
4623 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4624 {
4625 if (tp->csi_ops.write)
4626 tp->csi_ops.write(tp, addr, value);
4627 }
4628
4629 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4630 {
4631 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4632 }
4633
4634 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4635 {
4636 u32 csi;
4637
4638 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4639 rtl_csi_write(tp, 0x070c, csi | bits);
4640 }
4641
4642 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4643 {
4644 rtl_csi_access_enable(tp, 0x17000000);
4645 }
4646
4647 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4648 {
4649 rtl_csi_access_enable(tp, 0x27000000);
4650 }
4651
4652 DECLARE_RTL_COND(rtl_csiar_cond)
4653 {
4654 void __iomem *ioaddr = tp->mmio_addr;
4655
4656 return RTL_R32(CSIAR) & CSIAR_FLAG;
4657 }
4658
4659 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4660 {
4661 void __iomem *ioaddr = tp->mmio_addr;
4662
4663 RTL_W32(CSIDR, value);
4664 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4665 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4666
4667 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4668 }
4669
4670 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4671 {
4672 void __iomem *ioaddr = tp->mmio_addr;
4673
4674 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4675 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4676
4677 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4678 RTL_R32(CSIDR) : ~0;
4679 }
4680
4681 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4682 {
4683 void __iomem *ioaddr = tp->mmio_addr;
4684
4685 RTL_W32(CSIDR, value);
4686 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4687 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4688 CSIAR_FUNC_NIC);
4689
4690 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4691 }
4692
4693 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4694 {
4695 void __iomem *ioaddr = tp->mmio_addr;
4696
4697 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4698 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4699
4700 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4701 RTL_R32(CSIDR) : ~0;
4702 }
4703
4704 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4705 {
4706 struct csi_ops *ops = &tp->csi_ops;
4707
4708 switch (tp->mac_version) {
4709 case RTL_GIGA_MAC_VER_01:
4710 case RTL_GIGA_MAC_VER_02:
4711 case RTL_GIGA_MAC_VER_03:
4712 case RTL_GIGA_MAC_VER_04:
4713 case RTL_GIGA_MAC_VER_05:
4714 case RTL_GIGA_MAC_VER_06:
4715 case RTL_GIGA_MAC_VER_10:
4716 case RTL_GIGA_MAC_VER_11:
4717 case RTL_GIGA_MAC_VER_12:
4718 case RTL_GIGA_MAC_VER_13:
4719 case RTL_GIGA_MAC_VER_14:
4720 case RTL_GIGA_MAC_VER_15:
4721 case RTL_GIGA_MAC_VER_16:
4722 case RTL_GIGA_MAC_VER_17:
4723 ops->write = NULL;
4724 ops->read = NULL;
4725 break;
4726
4727 case RTL_GIGA_MAC_VER_37:
4728 case RTL_GIGA_MAC_VER_38:
4729 ops->write = r8402_csi_write;
4730 ops->read = r8402_csi_read;
4731 break;
4732
4733 default:
4734 ops->write = r8169_csi_write;
4735 ops->read = r8169_csi_read;
4736 break;
4737 }
4738 }
4739
4740 struct ephy_info {
4741 unsigned int offset;
4742 u16 mask;
4743 u16 bits;
4744 };
4745
4746 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4747 int len)
4748 {
4749 u16 w;
4750
4751 while (len-- > 0) {
4752 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4753 rtl_ephy_write(tp, e->offset, w);
4754 e++;
4755 }
4756 }
4757
4758 static void rtl_disable_clock_request(struct pci_dev *pdev)
4759 {
4760 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4761 PCI_EXP_LNKCTL_CLKREQ_EN);
4762 }
4763
4764 static void rtl_enable_clock_request(struct pci_dev *pdev)
4765 {
4766 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4767 PCI_EXP_LNKCTL_CLKREQ_EN);
4768 }
4769
4770 #define R8168_CPCMD_QUIRK_MASK (\
4771 EnableBist | \
4772 Mac_dbgo_oe | \
4773 Force_half_dup | \
4774 Force_rxflow_en | \
4775 Force_txflow_en | \
4776 Cxpl_dbg_sel | \
4777 ASF | \
4778 PktCntrDisable | \
4779 Mac_dbgo_sel)
4780
4781 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4782 {
4783 void __iomem *ioaddr = tp->mmio_addr;
4784 struct pci_dev *pdev = tp->pci_dev;
4785
4786 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4787
4788 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4789
4790 if (tp->dev->mtu <= ETH_DATA_LEN) {
4791 rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
4792 PCI_EXP_DEVCTL_NOSNOOP_EN);
4793 }
4794 }
4795
4796 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4797 {
4798 void __iomem *ioaddr = tp->mmio_addr;
4799
4800 rtl_hw_start_8168bb(tp);
4801
4802 RTL_W8(MaxTxPacketSize, TxPacketMax);
4803
4804 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4805 }
4806
4807 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4808 {
4809 void __iomem *ioaddr = tp->mmio_addr;
4810 struct pci_dev *pdev = tp->pci_dev;
4811
4812 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4813
4814 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4815
4816 if (tp->dev->mtu <= ETH_DATA_LEN)
4817 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4818
4819 rtl_disable_clock_request(pdev);
4820
4821 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4822 }
4823
4824 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4825 {
4826 static const struct ephy_info e_info_8168cp[] = {
4827 { 0x01, 0, 0x0001 },
4828 { 0x02, 0x0800, 0x1000 },
4829 { 0x03, 0, 0x0042 },
4830 { 0x06, 0x0080, 0x0000 },
4831 { 0x07, 0, 0x2000 }
4832 };
4833
4834 rtl_csi_access_enable_2(tp);
4835
4836 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4837
4838 __rtl_hw_start_8168cp(tp);
4839 }
4840
4841 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4842 {
4843 void __iomem *ioaddr = tp->mmio_addr;
4844 struct pci_dev *pdev = tp->pci_dev;
4845
4846 rtl_csi_access_enable_2(tp);
4847
4848 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4849
4850 if (tp->dev->mtu <= ETH_DATA_LEN)
4851 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4852
4853 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4854 }
4855
4856 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4857 {
4858 void __iomem *ioaddr = tp->mmio_addr;
4859 struct pci_dev *pdev = tp->pci_dev;
4860
4861 rtl_csi_access_enable_2(tp);
4862
4863 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4864
4865 /* Magic. */
4866 RTL_W8(DBG_REG, 0x20);
4867
4868 RTL_W8(MaxTxPacketSize, TxPacketMax);
4869
4870 if (tp->dev->mtu <= ETH_DATA_LEN)
4871 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4872
4873 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4874 }
4875
4876 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4877 {
4878 void __iomem *ioaddr = tp->mmio_addr;
4879 static const struct ephy_info e_info_8168c_1[] = {
4880 { 0x02, 0x0800, 0x1000 },
4881 { 0x03, 0, 0x0002 },
4882 { 0x06, 0x0080, 0x0000 }
4883 };
4884
4885 rtl_csi_access_enable_2(tp);
4886
4887 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4888
4889 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4890
4891 __rtl_hw_start_8168cp(tp);
4892 }
4893
4894 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4895 {
4896 static const struct ephy_info e_info_8168c_2[] = {
4897 { 0x01, 0, 0x0001 },
4898 { 0x03, 0x0400, 0x0220 }
4899 };
4900
4901 rtl_csi_access_enable_2(tp);
4902
4903 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4904
4905 __rtl_hw_start_8168cp(tp);
4906 }
4907
4908 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4909 {
4910 rtl_hw_start_8168c_2(tp);
4911 }
4912
4913 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4914 {
4915 rtl_csi_access_enable_2(tp);
4916
4917 __rtl_hw_start_8168cp(tp);
4918 }
4919
4920 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4921 {
4922 void __iomem *ioaddr = tp->mmio_addr;
4923 struct pci_dev *pdev = tp->pci_dev;
4924
4925 rtl_csi_access_enable_2(tp);
4926
4927 rtl_disable_clock_request(pdev);
4928
4929 RTL_W8(MaxTxPacketSize, TxPacketMax);
4930
4931 if (tp->dev->mtu <= ETH_DATA_LEN)
4932 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4933
4934 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4935 }
4936
4937 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4938 {
4939 void __iomem *ioaddr = tp->mmio_addr;
4940 struct pci_dev *pdev = tp->pci_dev;
4941
4942 rtl_csi_access_enable_1(tp);
4943
4944 if (tp->dev->mtu <= ETH_DATA_LEN)
4945 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4946
4947 RTL_W8(MaxTxPacketSize, TxPacketMax);
4948
4949 rtl_disable_clock_request(pdev);
4950 }
4951
4952 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4953 {
4954 void __iomem *ioaddr = tp->mmio_addr;
4955 struct pci_dev *pdev = tp->pci_dev;
4956 static const struct ephy_info e_info_8168d_4[] = {
4957 { 0x0b, ~0, 0x48 },
4958 { 0x19, 0x20, 0x50 },
4959 { 0x0c, ~0, 0x20 }
4960 };
4961 int i;
4962
4963 rtl_csi_access_enable_1(tp);
4964
4965 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4966
4967 RTL_W8(MaxTxPacketSize, TxPacketMax);
4968
4969 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4970 const struct ephy_info *e = e_info_8168d_4 + i;
4971 u16 w;
4972
4973 w = rtl_ephy_read(tp, e->offset);
4974 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4975 }
4976
4977 rtl_enable_clock_request(pdev);
4978 }
4979
4980 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4981 {
4982 void __iomem *ioaddr = tp->mmio_addr;
4983 struct pci_dev *pdev = tp->pci_dev;
4984 static const struct ephy_info e_info_8168e_1[] = {
4985 { 0x00, 0x0200, 0x0100 },
4986 { 0x00, 0x0000, 0x0004 },
4987 { 0x06, 0x0002, 0x0001 },
4988 { 0x06, 0x0000, 0x0030 },
4989 { 0x07, 0x0000, 0x2000 },
4990 { 0x00, 0x0000, 0x0020 },
4991 { 0x03, 0x5800, 0x2000 },
4992 { 0x03, 0x0000, 0x0001 },
4993 { 0x01, 0x0800, 0x1000 },
4994 { 0x07, 0x0000, 0x4000 },
4995 { 0x1e, 0x0000, 0x2000 },
4996 { 0x19, 0xffff, 0xfe6c },
4997 { 0x0a, 0x0000, 0x0040 }
4998 };
4999
5000 rtl_csi_access_enable_2(tp);
5001
5002 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5003
5004 if (tp->dev->mtu <= ETH_DATA_LEN)
5005 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5006
5007 RTL_W8(MaxTxPacketSize, TxPacketMax);
5008
5009 rtl_disable_clock_request(pdev);
5010
5011 /* Reset tx FIFO pointer */
5012 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5013 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5014
5015 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5016 }
5017
5018 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5019 {
5020 void __iomem *ioaddr = tp->mmio_addr;
5021 struct pci_dev *pdev = tp->pci_dev;
5022 static const struct ephy_info e_info_8168e_2[] = {
5023 { 0x09, 0x0000, 0x0080 },
5024 { 0x19, 0x0000, 0x0224 }
5025 };
5026
5027 rtl_csi_access_enable_1(tp);
5028
5029 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5030
5031 if (tp->dev->mtu <= ETH_DATA_LEN)
5032 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5033
5034 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5035 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5036 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5037 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5038 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5039 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5040 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5041 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5042
5043 RTL_W8(MaxTxPacketSize, EarlySize);
5044
5045 rtl_disable_clock_request(pdev);
5046
5047 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5048 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5049
5050 /* Adjust EEE LED frequency */
5051 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5052
5053 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5054 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5055 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5056 }
5057
5058 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5059 {
5060 void __iomem *ioaddr = tp->mmio_addr;
5061 struct pci_dev *pdev = tp->pci_dev;
5062
5063 rtl_csi_access_enable_2(tp);
5064
5065 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5066
5067 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5068 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5069 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5070 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5071 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5072 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5073 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5074 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5075 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5076 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5077
5078 RTL_W8(MaxTxPacketSize, EarlySize);
5079
5080 rtl_disable_clock_request(pdev);
5081
5082 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5083 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5084 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5085 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5086 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5087 }
5088
5089 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5090 {
5091 void __iomem *ioaddr = tp->mmio_addr;
5092 static const struct ephy_info e_info_8168f_1[] = {
5093 { 0x06, 0x00c0, 0x0020 },
5094 { 0x08, 0x0001, 0x0002 },
5095 { 0x09, 0x0000, 0x0080 },
5096 { 0x19, 0x0000, 0x0224 }
5097 };
5098
5099 rtl_hw_start_8168f(tp);
5100
5101 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5102
5103 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5104
5105 /* Adjust EEE LED frequency */
5106 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5107 }
5108
5109 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5110 {
5111 static const struct ephy_info e_info_8168f_1[] = {
5112 { 0x06, 0x00c0, 0x0020 },
5113 { 0x0f, 0xffff, 0x5200 },
5114 { 0x1e, 0x0000, 0x4000 },
5115 { 0x19, 0x0000, 0x0224 }
5116 };
5117
5118 rtl_hw_start_8168f(tp);
5119
5120 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5121
5122 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5123 }
5124
5125 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5126 {
5127 void __iomem *ioaddr = tp->mmio_addr;
5128 struct pci_dev *pdev = tp->pci_dev;
5129
5130 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5131 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5132 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5133 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5134
5135 rtl_csi_access_enable_1(tp);
5136
5137 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5138
5139 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5140 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5141
5142 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5143 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5144 RTL_W8(MaxTxPacketSize, EarlySize);
5145
5146 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5147 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5148
5149 /* Adjust EEE LED frequency */
5150 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5151
5152 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5153 }
5154
5155 static void rtl_hw_start_8168(struct net_device *dev)
5156 {
5157 struct rtl8169_private *tp = netdev_priv(dev);
5158 void __iomem *ioaddr = tp->mmio_addr;
5159
5160 RTL_W8(Cfg9346, Cfg9346_Unlock);
5161
5162 RTL_W8(MaxTxPacketSize, TxPacketMax);
5163
5164 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5165
5166 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5167
5168 RTL_W16(CPlusCmd, tp->cp_cmd);
5169
5170 RTL_W16(IntrMitigate, 0x5151);
5171
5172 /* Work around for RxFIFO overflow. */
5173 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5174 tp->event_slow |= RxFIFOOver | PCSTimeout;
5175 tp->event_slow &= ~RxOverflow;
5176 }
5177
5178 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5179
5180 rtl_set_rx_mode(dev);
5181
5182 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5183 (InterFrameGap << TxInterFrameGapShift));
5184
5185 RTL_R8(IntrMask);
5186
5187 switch (tp->mac_version) {
5188 case RTL_GIGA_MAC_VER_11:
5189 rtl_hw_start_8168bb(tp);
5190 break;
5191
5192 case RTL_GIGA_MAC_VER_12:
5193 case RTL_GIGA_MAC_VER_17:
5194 rtl_hw_start_8168bef(tp);
5195 break;
5196
5197 case RTL_GIGA_MAC_VER_18:
5198 rtl_hw_start_8168cp_1(tp);
5199 break;
5200
5201 case RTL_GIGA_MAC_VER_19:
5202 rtl_hw_start_8168c_1(tp);
5203 break;
5204
5205 case RTL_GIGA_MAC_VER_20:
5206 rtl_hw_start_8168c_2(tp);
5207 break;
5208
5209 case RTL_GIGA_MAC_VER_21:
5210 rtl_hw_start_8168c_3(tp);
5211 break;
5212
5213 case RTL_GIGA_MAC_VER_22:
5214 rtl_hw_start_8168c_4(tp);
5215 break;
5216
5217 case RTL_GIGA_MAC_VER_23:
5218 rtl_hw_start_8168cp_2(tp);
5219 break;
5220
5221 case RTL_GIGA_MAC_VER_24:
5222 rtl_hw_start_8168cp_3(tp);
5223 break;
5224
5225 case RTL_GIGA_MAC_VER_25:
5226 case RTL_GIGA_MAC_VER_26:
5227 case RTL_GIGA_MAC_VER_27:
5228 rtl_hw_start_8168d(tp);
5229 break;
5230
5231 case RTL_GIGA_MAC_VER_28:
5232 rtl_hw_start_8168d_4(tp);
5233 break;
5234
5235 case RTL_GIGA_MAC_VER_31:
5236 rtl_hw_start_8168dp(tp);
5237 break;
5238
5239 case RTL_GIGA_MAC_VER_32:
5240 case RTL_GIGA_MAC_VER_33:
5241 rtl_hw_start_8168e_1(tp);
5242 break;
5243 case RTL_GIGA_MAC_VER_34:
5244 rtl_hw_start_8168e_2(tp);
5245 break;
5246
5247 case RTL_GIGA_MAC_VER_35:
5248 case RTL_GIGA_MAC_VER_36:
5249 rtl_hw_start_8168f_1(tp);
5250 break;
5251
5252 case RTL_GIGA_MAC_VER_38:
5253 rtl_hw_start_8411(tp);
5254 break;
5255
5256 case RTL_GIGA_MAC_VER_40:
5257 case RTL_GIGA_MAC_VER_41:
5258 rtl_hw_start_8168g_1(tp);
5259 break;
5260
5261 default:
5262 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5263 dev->name, tp->mac_version);
5264 break;
5265 }
5266
5267 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5268
5269 RTL_W8(Cfg9346, Cfg9346_Lock);
5270
5271 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5272 }
5273
5274 #define R810X_CPCMD_QUIRK_MASK (\
5275 EnableBist | \
5276 Mac_dbgo_oe | \
5277 Force_half_dup | \
5278 Force_rxflow_en | \
5279 Force_txflow_en | \
5280 Cxpl_dbg_sel | \
5281 ASF | \
5282 PktCntrDisable | \
5283 Mac_dbgo_sel)
5284
5285 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5286 {
5287 void __iomem *ioaddr = tp->mmio_addr;
5288 struct pci_dev *pdev = tp->pci_dev;
5289 static const struct ephy_info e_info_8102e_1[] = {
5290 { 0x01, 0, 0x6e65 },
5291 { 0x02, 0, 0x091f },
5292 { 0x03, 0, 0xc2f9 },
5293 { 0x06, 0, 0xafb5 },
5294 { 0x07, 0, 0x0e00 },
5295 { 0x19, 0, 0xec80 },
5296 { 0x01, 0, 0x2e65 },
5297 { 0x01, 0, 0x6e65 }
5298 };
5299 u8 cfg1;
5300
5301 rtl_csi_access_enable_2(tp);
5302
5303 RTL_W8(DBG_REG, FIX_NAK_1);
5304
5305 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5306
5307 RTL_W8(Config1,
5308 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5309 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5310
5311 cfg1 = RTL_R8(Config1);
5312 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5313 RTL_W8(Config1, cfg1 & ~LEDS0);
5314
5315 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5316 }
5317
5318 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5319 {
5320 void __iomem *ioaddr = tp->mmio_addr;
5321 struct pci_dev *pdev = tp->pci_dev;
5322
5323 rtl_csi_access_enable_2(tp);
5324
5325 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5326
5327 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5328 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5329 }
5330
5331 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5332 {
5333 rtl_hw_start_8102e_2(tp);
5334
5335 rtl_ephy_write(tp, 0x03, 0xc2f9);
5336 }
5337
5338 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5339 {
5340 void __iomem *ioaddr = tp->mmio_addr;
5341 static const struct ephy_info e_info_8105e_1[] = {
5342 { 0x07, 0, 0x4000 },
5343 { 0x19, 0, 0x0200 },
5344 { 0x19, 0, 0x0020 },
5345 { 0x1e, 0, 0x2000 },
5346 { 0x03, 0, 0x0001 },
5347 { 0x19, 0, 0x0100 },
5348 { 0x19, 0, 0x0004 },
5349 { 0x0a, 0, 0x0020 }
5350 };
5351
5352 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5353 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5354
5355 /* Disable Early Tally Counter */
5356 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5357
5358 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5359 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5360
5361 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5362 }
5363
5364 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5365 {
5366 rtl_hw_start_8105e_1(tp);
5367 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5368 }
5369
5370 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5371 {
5372 void __iomem *ioaddr = tp->mmio_addr;
5373 static const struct ephy_info e_info_8402[] = {
5374 { 0x19, 0xffff, 0xff64 },
5375 { 0x1e, 0, 0x4000 }
5376 };
5377
5378 rtl_csi_access_enable_2(tp);
5379
5380 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5381 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5382
5383 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5384 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5385
5386 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5387
5388 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5389
5390 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5391 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5392 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5393 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5394 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5395 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5396 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5397 }
5398
5399 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5400 {
5401 void __iomem *ioaddr = tp->mmio_addr;
5402
5403 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5404 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5405
5406 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5407 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5408 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5409 }
5410
5411 static void rtl_hw_start_8101(struct net_device *dev)
5412 {
5413 struct rtl8169_private *tp = netdev_priv(dev);
5414 void __iomem *ioaddr = tp->mmio_addr;
5415 struct pci_dev *pdev = tp->pci_dev;
5416
5417 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5418 tp->event_slow &= ~RxFIFOOver;
5419
5420 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5421 tp->mac_version == RTL_GIGA_MAC_VER_16)
5422 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5423 PCI_EXP_DEVCTL_NOSNOOP_EN);
5424
5425 RTL_W8(Cfg9346, Cfg9346_Unlock);
5426
5427 switch (tp->mac_version) {
5428 case RTL_GIGA_MAC_VER_07:
5429 rtl_hw_start_8102e_1(tp);
5430 break;
5431
5432 case RTL_GIGA_MAC_VER_08:
5433 rtl_hw_start_8102e_3(tp);
5434 break;
5435
5436 case RTL_GIGA_MAC_VER_09:
5437 rtl_hw_start_8102e_2(tp);
5438 break;
5439
5440 case RTL_GIGA_MAC_VER_29:
5441 rtl_hw_start_8105e_1(tp);
5442 break;
5443 case RTL_GIGA_MAC_VER_30:
5444 rtl_hw_start_8105e_2(tp);
5445 break;
5446
5447 case RTL_GIGA_MAC_VER_37:
5448 rtl_hw_start_8402(tp);
5449 break;
5450
5451 case RTL_GIGA_MAC_VER_39:
5452 rtl_hw_start_8106(tp);
5453 break;
5454 }
5455
5456 RTL_W8(Cfg9346, Cfg9346_Lock);
5457
5458 RTL_W8(MaxTxPacketSize, TxPacketMax);
5459
5460 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5461
5462 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5463 RTL_W16(CPlusCmd, tp->cp_cmd);
5464
5465 RTL_W16(IntrMitigate, 0x0000);
5466
5467 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5468
5469 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5470 rtl_set_rx_tx_config_registers(tp);
5471
5472 RTL_R8(IntrMask);
5473
5474 rtl_set_rx_mode(dev);
5475
5476 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5477 }
5478
5479 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5480 {
5481 struct rtl8169_private *tp = netdev_priv(dev);
5482
5483 if (new_mtu < ETH_ZLEN ||
5484 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5485 return -EINVAL;
5486
5487 if (new_mtu > ETH_DATA_LEN)
5488 rtl_hw_jumbo_enable(tp);
5489 else
5490 rtl_hw_jumbo_disable(tp);
5491
5492 dev->mtu = new_mtu;
5493 netdev_update_features(dev);
5494
5495 return 0;
5496 }
5497
5498 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5499 {
5500 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5501 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5502 }
5503
5504 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5505 void **data_buff, struct RxDesc *desc)
5506 {
5507 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5508 DMA_FROM_DEVICE);
5509
5510 kfree(*data_buff);
5511 *data_buff = NULL;
5512 rtl8169_make_unusable_by_asic(desc);
5513 }
5514
5515 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5516 {
5517 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5518
5519 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5520 }
5521
5522 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5523 u32 rx_buf_sz)
5524 {
5525 desc->addr = cpu_to_le64(mapping);
5526 wmb();
5527 rtl8169_mark_to_asic(desc, rx_buf_sz);
5528 }
5529
5530 static inline void *rtl8169_align(void *data)
5531 {
5532 return (void *)ALIGN((long)data, 16);
5533 }
5534
5535 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5536 struct RxDesc *desc)
5537 {
5538 void *data;
5539 dma_addr_t mapping;
5540 struct device *d = &tp->pci_dev->dev;
5541 struct net_device *dev = tp->dev;
5542 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5543
5544 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5545 if (!data)
5546 return NULL;
5547
5548 if (rtl8169_align(data) != data) {
5549 kfree(data);
5550 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5551 if (!data)
5552 return NULL;
5553 }
5554
5555 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5556 DMA_FROM_DEVICE);
5557 if (unlikely(dma_mapping_error(d, mapping))) {
5558 if (net_ratelimit())
5559 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5560 goto err_out;
5561 }
5562
5563 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5564 return data;
5565
5566 err_out:
5567 kfree(data);
5568 return NULL;
5569 }
5570
5571 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5572 {
5573 unsigned int i;
5574
5575 for (i = 0; i < NUM_RX_DESC; i++) {
5576 if (tp->Rx_databuff[i]) {
5577 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5578 tp->RxDescArray + i);
5579 }
5580 }
5581 }
5582
5583 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5584 {
5585 desc->opts1 |= cpu_to_le32(RingEnd);
5586 }
5587
5588 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5589 {
5590 unsigned int i;
5591
5592 for (i = 0; i < NUM_RX_DESC; i++) {
5593 void *data;
5594
5595 if (tp->Rx_databuff[i])
5596 continue;
5597
5598 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5599 if (!data) {
5600 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5601 goto err_out;
5602 }
5603 tp->Rx_databuff[i] = data;
5604 }
5605
5606 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5607 return 0;
5608
5609 err_out:
5610 rtl8169_rx_clear(tp);
5611 return -ENOMEM;
5612 }
5613
5614 static int rtl8169_init_ring(struct net_device *dev)
5615 {
5616 struct rtl8169_private *tp = netdev_priv(dev);
5617
5618 rtl8169_init_ring_indexes(tp);
5619
5620 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5621 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5622
5623 return rtl8169_rx_fill(tp);
5624 }
5625
5626 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5627 struct TxDesc *desc)
5628 {
5629 unsigned int len = tx_skb->len;
5630
5631 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5632
5633 desc->opts1 = 0x00;
5634 desc->opts2 = 0x00;
5635 desc->addr = 0x00;
5636 tx_skb->len = 0;
5637 }
5638
5639 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5640 unsigned int n)
5641 {
5642 unsigned int i;
5643
5644 for (i = 0; i < n; i++) {
5645 unsigned int entry = (start + i) % NUM_TX_DESC;
5646 struct ring_info *tx_skb = tp->tx_skb + entry;
5647 unsigned int len = tx_skb->len;
5648
5649 if (len) {
5650 struct sk_buff *skb = tx_skb->skb;
5651
5652 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5653 tp->TxDescArray + entry);
5654 if (skb) {
5655 tp->dev->stats.tx_dropped++;
5656 dev_kfree_skb(skb);
5657 tx_skb->skb = NULL;
5658 }
5659 }
5660 }
5661 }
5662
5663 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5664 {
5665 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5666 tp->cur_tx = tp->dirty_tx = 0;
5667 }
5668
5669 static void rtl_reset_work(struct rtl8169_private *tp)
5670 {
5671 struct net_device *dev = tp->dev;
5672 int i;
5673
5674 napi_disable(&tp->napi);
5675 netif_stop_queue(dev);
5676 synchronize_sched();
5677
5678 rtl8169_hw_reset(tp);
5679
5680 for (i = 0; i < NUM_RX_DESC; i++)
5681 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5682
5683 rtl8169_tx_clear(tp);
5684 rtl8169_init_ring_indexes(tp);
5685
5686 napi_enable(&tp->napi);
5687 rtl_hw_start(dev);
5688 netif_wake_queue(dev);
5689 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5690 }
5691
5692 static void rtl8169_tx_timeout(struct net_device *dev)
5693 {
5694 struct rtl8169_private *tp = netdev_priv(dev);
5695
5696 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5697 }
5698
5699 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5700 u32 *opts)
5701 {
5702 struct skb_shared_info *info = skb_shinfo(skb);
5703 unsigned int cur_frag, entry;
5704 struct TxDesc * uninitialized_var(txd);
5705 struct device *d = &tp->pci_dev->dev;
5706
5707 entry = tp->cur_tx;
5708 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5709 const skb_frag_t *frag = info->frags + cur_frag;
5710 dma_addr_t mapping;
5711 u32 status, len;
5712 void *addr;
5713
5714 entry = (entry + 1) % NUM_TX_DESC;
5715
5716 txd = tp->TxDescArray + entry;
5717 len = skb_frag_size(frag);
5718 addr = skb_frag_address(frag);
5719 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5720 if (unlikely(dma_mapping_error(d, mapping))) {
5721 if (net_ratelimit())
5722 netif_err(tp, drv, tp->dev,
5723 "Failed to map TX fragments DMA!\n");
5724 goto err_out;
5725 }
5726
5727 /* Anti gcc 2.95.3 bugware (sic) */
5728 status = opts[0] | len |
5729 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5730
5731 txd->opts1 = cpu_to_le32(status);
5732 txd->opts2 = cpu_to_le32(opts[1]);
5733 txd->addr = cpu_to_le64(mapping);
5734
5735 tp->tx_skb[entry].len = len;
5736 }
5737
5738 if (cur_frag) {
5739 tp->tx_skb[entry].skb = skb;
5740 txd->opts1 |= cpu_to_le32(LastFrag);
5741 }
5742
5743 return cur_frag;
5744
5745 err_out:
5746 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5747 return -EIO;
5748 }
5749
5750 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5751 struct sk_buff *skb, u32 *opts)
5752 {
5753 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5754 u32 mss = skb_shinfo(skb)->gso_size;
5755 int offset = info->opts_offset;
5756
5757 if (mss) {
5758 opts[0] |= TD_LSO;
5759 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5760 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5761 const struct iphdr *ip = ip_hdr(skb);
5762
5763 if (ip->protocol == IPPROTO_TCP)
5764 opts[offset] |= info->checksum.tcp;
5765 else if (ip->protocol == IPPROTO_UDP)
5766 opts[offset] |= info->checksum.udp;
5767 else
5768 WARN_ON_ONCE(1);
5769 }
5770 }
5771
5772 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5773 struct net_device *dev)
5774 {
5775 struct rtl8169_private *tp = netdev_priv(dev);
5776 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5777 struct TxDesc *txd = tp->TxDescArray + entry;
5778 void __iomem *ioaddr = tp->mmio_addr;
5779 struct device *d = &tp->pci_dev->dev;
5780 dma_addr_t mapping;
5781 u32 status, len;
5782 u32 opts[2];
5783 int frags;
5784
5785 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5786 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5787 goto err_stop_0;
5788 }
5789
5790 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5791 goto err_stop_0;
5792
5793 len = skb_headlen(skb);
5794 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5795 if (unlikely(dma_mapping_error(d, mapping))) {
5796 if (net_ratelimit())
5797 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5798 goto err_dma_0;
5799 }
5800
5801 tp->tx_skb[entry].len = len;
5802 txd->addr = cpu_to_le64(mapping);
5803
5804 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5805 opts[0] = DescOwn;
5806
5807 rtl8169_tso_csum(tp, skb, opts);
5808
5809 frags = rtl8169_xmit_frags(tp, skb, opts);
5810 if (frags < 0)
5811 goto err_dma_1;
5812 else if (frags)
5813 opts[0] |= FirstFrag;
5814 else {
5815 opts[0] |= FirstFrag | LastFrag;
5816 tp->tx_skb[entry].skb = skb;
5817 }
5818
5819 txd->opts2 = cpu_to_le32(opts[1]);
5820
5821 skb_tx_timestamp(skb);
5822
5823 wmb();
5824
5825 /* Anti gcc 2.95.3 bugware (sic) */
5826 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5827 txd->opts1 = cpu_to_le32(status);
5828
5829 tp->cur_tx += frags + 1;
5830
5831 wmb();
5832
5833 RTL_W8(TxPoll, NPQ);
5834
5835 mmiowb();
5836
5837 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5838 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5839 * not miss a ring update when it notices a stopped queue.
5840 */
5841 smp_wmb();
5842 netif_stop_queue(dev);
5843 /* Sync with rtl_tx:
5844 * - publish queue status and cur_tx ring index (write barrier)
5845 * - refresh dirty_tx ring index (read barrier).
5846 * May the current thread have a pessimistic view of the ring
5847 * status and forget to wake up queue, a racing rtl_tx thread
5848 * can't.
5849 */
5850 smp_mb();
5851 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5852 netif_wake_queue(dev);
5853 }
5854
5855 return NETDEV_TX_OK;
5856
5857 err_dma_1:
5858 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5859 err_dma_0:
5860 dev_kfree_skb(skb);
5861 dev->stats.tx_dropped++;
5862 return NETDEV_TX_OK;
5863
5864 err_stop_0:
5865 netif_stop_queue(dev);
5866 dev->stats.tx_dropped++;
5867 return NETDEV_TX_BUSY;
5868 }
5869
5870 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5871 {
5872 struct rtl8169_private *tp = netdev_priv(dev);
5873 struct pci_dev *pdev = tp->pci_dev;
5874 u16 pci_status, pci_cmd;
5875
5876 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5877 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5878
5879 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5880 pci_cmd, pci_status);
5881
5882 /*
5883 * The recovery sequence below admits a very elaborated explanation:
5884 * - it seems to work;
5885 * - I did not see what else could be done;
5886 * - it makes iop3xx happy.
5887 *
5888 * Feel free to adjust to your needs.
5889 */
5890 if (pdev->broken_parity_status)
5891 pci_cmd &= ~PCI_COMMAND_PARITY;
5892 else
5893 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5894
5895 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5896
5897 pci_write_config_word(pdev, PCI_STATUS,
5898 pci_status & (PCI_STATUS_DETECTED_PARITY |
5899 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5900 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5901
5902 /* The infamous DAC f*ckup only happens at boot time */
5903 if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
5904 void __iomem *ioaddr = tp->mmio_addr;
5905
5906 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5907 tp->cp_cmd &= ~PCIDAC;
5908 RTL_W16(CPlusCmd, tp->cp_cmd);
5909 dev->features &= ~NETIF_F_HIGHDMA;
5910 }
5911
5912 rtl8169_hw_reset(tp);
5913
5914 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5915 }
5916
5917 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5918 {
5919 unsigned int dirty_tx, tx_left;
5920
5921 dirty_tx = tp->dirty_tx;
5922 smp_rmb();
5923 tx_left = tp->cur_tx - dirty_tx;
5924
5925 while (tx_left > 0) {
5926 unsigned int entry = dirty_tx % NUM_TX_DESC;
5927 struct ring_info *tx_skb = tp->tx_skb + entry;
5928 u32 status;
5929
5930 rmb();
5931 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5932 if (status & DescOwn)
5933 break;
5934
5935 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5936 tp->TxDescArray + entry);
5937 if (status & LastFrag) {
5938 u64_stats_update_begin(&tp->tx_stats.syncp);
5939 tp->tx_stats.packets++;
5940 tp->tx_stats.bytes += tx_skb->skb->len;
5941 u64_stats_update_end(&tp->tx_stats.syncp);
5942 dev_kfree_skb(tx_skb->skb);
5943 tx_skb->skb = NULL;
5944 }
5945 dirty_tx++;
5946 tx_left--;
5947 }
5948
5949 if (tp->dirty_tx != dirty_tx) {
5950 tp->dirty_tx = dirty_tx;
5951 /* Sync with rtl8169_start_xmit:
5952 * - publish dirty_tx ring index (write barrier)
5953 * - refresh cur_tx ring index and queue status (read barrier)
5954 * May the current thread miss the stopped queue condition,
5955 * a racing xmit thread can only have a right view of the
5956 * ring status.
5957 */
5958 smp_mb();
5959 if (netif_queue_stopped(dev) &&
5960 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5961 netif_wake_queue(dev);
5962 }
5963 /*
5964 * 8168 hack: TxPoll requests are lost when the Tx packets are
5965 * too close. Let's kick an extra TxPoll request when a burst
5966 * of start_xmit activity is detected (if it is not detected,
5967 * it is slow enough). -- FR
5968 */
5969 if (tp->cur_tx != dirty_tx) {
5970 void __iomem *ioaddr = tp->mmio_addr;
5971
5972 RTL_W8(TxPoll, NPQ);
5973 }
5974 }
5975 }
5976
5977 static inline int rtl8169_fragmented_frame(u32 status)
5978 {
5979 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5980 }
5981
5982 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5983 {
5984 u32 status = opts1 & RxProtoMask;
5985
5986 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5987 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5988 skb->ip_summed = CHECKSUM_UNNECESSARY;
5989 else
5990 skb_checksum_none_assert(skb);
5991 }
5992
5993 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5994 struct rtl8169_private *tp,
5995 int pkt_size,
5996 dma_addr_t addr)
5997 {
5998 struct sk_buff *skb;
5999 struct device *d = &tp->pci_dev->dev;
6000
6001 data = rtl8169_align(data);
6002 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6003 prefetch(data);
6004 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6005 if (skb)
6006 memcpy(skb->data, data, pkt_size);
6007 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6008
6009 return skb;
6010 }
6011
6012 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6013 {
6014 unsigned int cur_rx, rx_left;
6015 unsigned int count;
6016
6017 cur_rx = tp->cur_rx;
6018
6019 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
6020 unsigned int entry = cur_rx % NUM_RX_DESC;
6021 struct RxDesc *desc = tp->RxDescArray + entry;
6022 u32 status;
6023
6024 rmb();
6025 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6026
6027 if (status & DescOwn)
6028 break;
6029 if (unlikely(status & RxRES)) {
6030 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6031 status);
6032 dev->stats.rx_errors++;
6033 if (status & (RxRWT | RxRUNT))
6034 dev->stats.rx_length_errors++;
6035 if (status & RxCRC)
6036 dev->stats.rx_crc_errors++;
6037 if (status & RxFOVF) {
6038 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6039 dev->stats.rx_fifo_errors++;
6040 }
6041 if ((status & (RxRUNT | RxCRC)) &&
6042 !(status & (RxRWT | RxFOVF)) &&
6043 (dev->features & NETIF_F_RXALL))
6044 goto process_pkt;
6045 } else {
6046 struct sk_buff *skb;
6047 dma_addr_t addr;
6048 int pkt_size;
6049
6050 process_pkt:
6051 addr = le64_to_cpu(desc->addr);
6052 if (likely(!(dev->features & NETIF_F_RXFCS)))
6053 pkt_size = (status & 0x00003fff) - 4;
6054 else
6055 pkt_size = status & 0x00003fff;
6056
6057 /*
6058 * The driver does not support incoming fragmented
6059 * frames. They are seen as a symptom of over-mtu
6060 * sized frames.
6061 */
6062 if (unlikely(rtl8169_fragmented_frame(status))) {
6063 dev->stats.rx_dropped++;
6064 dev->stats.rx_length_errors++;
6065 goto release_descriptor;
6066 }
6067
6068 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6069 tp, pkt_size, addr);
6070 if (!skb) {
6071 dev->stats.rx_dropped++;
6072 goto release_descriptor;
6073 }
6074
6075 rtl8169_rx_csum(skb, status);
6076 skb_put(skb, pkt_size);
6077 skb->protocol = eth_type_trans(skb, dev);
6078
6079 rtl8169_rx_vlan_tag(desc, skb);
6080
6081 napi_gro_receive(&tp->napi, skb);
6082
6083 u64_stats_update_begin(&tp->rx_stats.syncp);
6084 tp->rx_stats.packets++;
6085 tp->rx_stats.bytes += pkt_size;
6086 u64_stats_update_end(&tp->rx_stats.syncp);
6087 }
6088 release_descriptor:
6089 desc->opts2 = 0;
6090 wmb();
6091 rtl8169_mark_to_asic(desc, rx_buf_sz);
6092 }
6093
6094 count = cur_rx - tp->cur_rx;
6095 tp->cur_rx = cur_rx;
6096
6097 return count;
6098 }
6099
6100 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6101 {
6102 struct net_device *dev = dev_instance;
6103 struct rtl8169_private *tp = netdev_priv(dev);
6104 int handled = 0;
6105 u16 status;
6106
6107 status = rtl_get_events(tp);
6108 if (status && status != 0xffff) {
6109 status &= RTL_EVENT_NAPI | tp->event_slow;
6110 if (status) {
6111 handled = 1;
6112
6113 rtl_irq_disable(tp);
6114 napi_schedule(&tp->napi);
6115 }
6116 }
6117 return IRQ_RETVAL(handled);
6118 }
6119
6120 /*
6121 * Workqueue context.
6122 */
6123 static void rtl_slow_event_work(struct rtl8169_private *tp)
6124 {
6125 struct net_device *dev = tp->dev;
6126 u16 status;
6127
6128 status = rtl_get_events(tp) & tp->event_slow;
6129 rtl_ack_events(tp, status);
6130
6131 if (unlikely(status & RxFIFOOver)) {
6132 switch (tp->mac_version) {
6133 /* Work around for rx fifo overflow */
6134 case RTL_GIGA_MAC_VER_11:
6135 netif_stop_queue(dev);
6136 /* XXX - Hack alert. See rtl_task(). */
6137 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6138 default:
6139 break;
6140 }
6141 }
6142
6143 if (unlikely(status & SYSErr))
6144 rtl8169_pcierr_interrupt(dev);
6145
6146 if (status & LinkChg)
6147 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6148
6149 rtl_irq_enable_all(tp);
6150 }
6151
6152 static void rtl_task(struct work_struct *work)
6153 {
6154 static const struct {
6155 int bitnr;
6156 void (*action)(struct rtl8169_private *);
6157 } rtl_work[] = {
6158 /* XXX - keep rtl_slow_event_work() as first element. */
6159 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6160 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6161 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6162 };
6163 struct rtl8169_private *tp =
6164 container_of(work, struct rtl8169_private, wk.work);
6165 struct net_device *dev = tp->dev;
6166 int i;
6167
6168 rtl_lock_work(tp);
6169
6170 if (!netif_running(dev) ||
6171 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6172 goto out_unlock;
6173
6174 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6175 bool pending;
6176
6177 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6178 if (pending)
6179 rtl_work[i].action(tp);
6180 }
6181
6182 out_unlock:
6183 rtl_unlock_work(tp);
6184 }
6185
6186 static int rtl8169_poll(struct napi_struct *napi, int budget)
6187 {
6188 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6189 struct net_device *dev = tp->dev;
6190 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6191 int work_done= 0;
6192 u16 status;
6193
6194 status = rtl_get_events(tp);
6195 rtl_ack_events(tp, status & ~tp->event_slow);
6196
6197 if (status & RTL_EVENT_NAPI_RX)
6198 work_done = rtl_rx(dev, tp, (u32) budget);
6199
6200 if (status & RTL_EVENT_NAPI_TX)
6201 rtl_tx(dev, tp);
6202
6203 if (status & tp->event_slow) {
6204 enable_mask &= ~tp->event_slow;
6205
6206 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6207 }
6208
6209 if (work_done < budget) {
6210 napi_complete(napi);
6211
6212 rtl_irq_enable(tp, enable_mask);
6213 mmiowb();
6214 }
6215
6216 return work_done;
6217 }
6218
6219 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6220 {
6221 struct rtl8169_private *tp = netdev_priv(dev);
6222
6223 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6224 return;
6225
6226 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6227 RTL_W32(RxMissed, 0);
6228 }
6229
6230 static void rtl8169_down(struct net_device *dev)
6231 {
6232 struct rtl8169_private *tp = netdev_priv(dev);
6233 void __iomem *ioaddr = tp->mmio_addr;
6234
6235 del_timer_sync(&tp->timer);
6236
6237 napi_disable(&tp->napi);
6238 netif_stop_queue(dev);
6239
6240 rtl8169_hw_reset(tp);
6241 /*
6242 * At this point device interrupts can not be enabled in any function,
6243 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6244 * and napi is disabled (rtl8169_poll).
6245 */
6246 rtl8169_rx_missed(dev, ioaddr);
6247
6248 /* Give a racing hard_start_xmit a few cycles to complete. */
6249 synchronize_sched();
6250
6251 rtl8169_tx_clear(tp);
6252
6253 rtl8169_rx_clear(tp);
6254
6255 rtl_pll_power_down(tp);
6256 }
6257
6258 static int rtl8169_close(struct net_device *dev)
6259 {
6260 struct rtl8169_private *tp = netdev_priv(dev);
6261 struct pci_dev *pdev = tp->pci_dev;
6262
6263 pm_runtime_get_sync(&pdev->dev);
6264
6265 /* Update counters before going down */
6266 rtl8169_update_counters(dev);
6267
6268 rtl_lock_work(tp);
6269 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6270
6271 rtl8169_down(dev);
6272 rtl_unlock_work(tp);
6273
6274 free_irq(pdev->irq, dev);
6275
6276 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6277 tp->RxPhyAddr);
6278 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6279 tp->TxPhyAddr);
6280 tp->TxDescArray = NULL;
6281 tp->RxDescArray = NULL;
6282
6283 pm_runtime_put_sync(&pdev->dev);
6284
6285 return 0;
6286 }
6287
6288 #ifdef CONFIG_NET_POLL_CONTROLLER
6289 static void rtl8169_netpoll(struct net_device *dev)
6290 {
6291 struct rtl8169_private *tp = netdev_priv(dev);
6292
6293 rtl8169_interrupt(tp->pci_dev->irq, dev);
6294 }
6295 #endif
6296
6297 static int rtl_open(struct net_device *dev)
6298 {
6299 struct rtl8169_private *tp = netdev_priv(dev);
6300 void __iomem *ioaddr = tp->mmio_addr;
6301 struct pci_dev *pdev = tp->pci_dev;
6302 int retval = -ENOMEM;
6303
6304 pm_runtime_get_sync(&pdev->dev);
6305
6306 /*
6307 * Rx and Tx descriptors needs 256 bytes alignment.
6308 * dma_alloc_coherent provides more.
6309 */
6310 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6311 &tp->TxPhyAddr, GFP_KERNEL);
6312 if (!tp->TxDescArray)
6313 goto err_pm_runtime_put;
6314
6315 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6316 &tp->RxPhyAddr, GFP_KERNEL);
6317 if (!tp->RxDescArray)
6318 goto err_free_tx_0;
6319
6320 retval = rtl8169_init_ring(dev);
6321 if (retval < 0)
6322 goto err_free_rx_1;
6323
6324 INIT_WORK(&tp->wk.work, rtl_task);
6325
6326 smp_mb();
6327
6328 rtl_request_firmware(tp);
6329
6330 retval = request_irq(pdev->irq, rtl8169_interrupt,
6331 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6332 dev->name, dev);
6333 if (retval < 0)
6334 goto err_release_fw_2;
6335
6336 rtl_lock_work(tp);
6337
6338 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6339
6340 napi_enable(&tp->napi);
6341
6342 rtl8169_init_phy(dev, tp);
6343
6344 __rtl8169_set_features(dev, dev->features);
6345
6346 rtl_pll_power_up(tp);
6347
6348 rtl_hw_start(dev);
6349
6350 netif_start_queue(dev);
6351
6352 rtl_unlock_work(tp);
6353
6354 tp->saved_wolopts = 0;
6355 pm_runtime_put_noidle(&pdev->dev);
6356
6357 rtl8169_check_link_status(dev, tp, ioaddr);
6358 out:
6359 return retval;
6360
6361 err_release_fw_2:
6362 rtl_release_firmware(tp);
6363 rtl8169_rx_clear(tp);
6364 err_free_rx_1:
6365 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6366 tp->RxPhyAddr);
6367 tp->RxDescArray = NULL;
6368 err_free_tx_0:
6369 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6370 tp->TxPhyAddr);
6371 tp->TxDescArray = NULL;
6372 err_pm_runtime_put:
6373 pm_runtime_put_noidle(&pdev->dev);
6374 goto out;
6375 }
6376
6377 static struct rtnl_link_stats64 *
6378 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6379 {
6380 struct rtl8169_private *tp = netdev_priv(dev);
6381 void __iomem *ioaddr = tp->mmio_addr;
6382 unsigned int start;
6383
6384 if (netif_running(dev))
6385 rtl8169_rx_missed(dev, ioaddr);
6386
6387 do {
6388 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6389 stats->rx_packets = tp->rx_stats.packets;
6390 stats->rx_bytes = tp->rx_stats.bytes;
6391 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6392
6393
6394 do {
6395 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6396 stats->tx_packets = tp->tx_stats.packets;
6397 stats->tx_bytes = tp->tx_stats.bytes;
6398 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6399
6400 stats->rx_dropped = dev->stats.rx_dropped;
6401 stats->tx_dropped = dev->stats.tx_dropped;
6402 stats->rx_length_errors = dev->stats.rx_length_errors;
6403 stats->rx_errors = dev->stats.rx_errors;
6404 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6405 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6406 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6407
6408 return stats;
6409 }
6410
6411 static void rtl8169_net_suspend(struct net_device *dev)
6412 {
6413 struct rtl8169_private *tp = netdev_priv(dev);
6414
6415 if (!netif_running(dev))
6416 return;
6417
6418 netif_device_detach(dev);
6419 netif_stop_queue(dev);
6420
6421 rtl_lock_work(tp);
6422 napi_disable(&tp->napi);
6423 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6424 rtl_unlock_work(tp);
6425
6426 rtl_pll_power_down(tp);
6427 }
6428
6429 #ifdef CONFIG_PM
6430
6431 static int rtl8169_suspend(struct device *device)
6432 {
6433 struct pci_dev *pdev = to_pci_dev(device);
6434 struct net_device *dev = pci_get_drvdata(pdev);
6435
6436 rtl8169_net_suspend(dev);
6437
6438 return 0;
6439 }
6440
6441 static void __rtl8169_resume(struct net_device *dev)
6442 {
6443 struct rtl8169_private *tp = netdev_priv(dev);
6444
6445 netif_device_attach(dev);
6446
6447 rtl_pll_power_up(tp);
6448
6449 rtl_lock_work(tp);
6450 napi_enable(&tp->napi);
6451 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6452 rtl_unlock_work(tp);
6453
6454 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6455 }
6456
6457 static int rtl8169_resume(struct device *device)
6458 {
6459 struct pci_dev *pdev = to_pci_dev(device);
6460 struct net_device *dev = pci_get_drvdata(pdev);
6461 struct rtl8169_private *tp = netdev_priv(dev);
6462
6463 rtl8169_init_phy(dev, tp);
6464
6465 if (netif_running(dev))
6466 __rtl8169_resume(dev);
6467
6468 return 0;
6469 }
6470
6471 static int rtl8169_runtime_suspend(struct device *device)
6472 {
6473 struct pci_dev *pdev = to_pci_dev(device);
6474 struct net_device *dev = pci_get_drvdata(pdev);
6475 struct rtl8169_private *tp = netdev_priv(dev);
6476
6477 if (!tp->TxDescArray)
6478 return 0;
6479
6480 rtl_lock_work(tp);
6481 tp->saved_wolopts = __rtl8169_get_wol(tp);
6482 __rtl8169_set_wol(tp, WAKE_ANY);
6483 rtl_unlock_work(tp);
6484
6485 rtl8169_net_suspend(dev);
6486
6487 return 0;
6488 }
6489
6490 static int rtl8169_runtime_resume(struct device *device)
6491 {
6492 struct pci_dev *pdev = to_pci_dev(device);
6493 struct net_device *dev = pci_get_drvdata(pdev);
6494 struct rtl8169_private *tp = netdev_priv(dev);
6495
6496 if (!tp->TxDescArray)
6497 return 0;
6498
6499 rtl_lock_work(tp);
6500 __rtl8169_set_wol(tp, tp->saved_wolopts);
6501 tp->saved_wolopts = 0;
6502 rtl_unlock_work(tp);
6503
6504 rtl8169_init_phy(dev, tp);
6505
6506 __rtl8169_resume(dev);
6507
6508 return 0;
6509 }
6510
6511 static int rtl8169_runtime_idle(struct device *device)
6512 {
6513 struct pci_dev *pdev = to_pci_dev(device);
6514 struct net_device *dev = pci_get_drvdata(pdev);
6515 struct rtl8169_private *tp = netdev_priv(dev);
6516
6517 return tp->TxDescArray ? -EBUSY : 0;
6518 }
6519
6520 static const struct dev_pm_ops rtl8169_pm_ops = {
6521 .suspend = rtl8169_suspend,
6522 .resume = rtl8169_resume,
6523 .freeze = rtl8169_suspend,
6524 .thaw = rtl8169_resume,
6525 .poweroff = rtl8169_suspend,
6526 .restore = rtl8169_resume,
6527 .runtime_suspend = rtl8169_runtime_suspend,
6528 .runtime_resume = rtl8169_runtime_resume,
6529 .runtime_idle = rtl8169_runtime_idle,
6530 };
6531
6532 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6533
6534 #else /* !CONFIG_PM */
6535
6536 #define RTL8169_PM_OPS NULL
6537
6538 #endif /* !CONFIG_PM */
6539
6540 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6541 {
6542 void __iomem *ioaddr = tp->mmio_addr;
6543
6544 /* WoL fails with 8168b when the receiver is disabled. */
6545 switch (tp->mac_version) {
6546 case RTL_GIGA_MAC_VER_11:
6547 case RTL_GIGA_MAC_VER_12:
6548 case RTL_GIGA_MAC_VER_17:
6549 pci_clear_master(tp->pci_dev);
6550
6551 RTL_W8(ChipCmd, CmdRxEnb);
6552 /* PCI commit */
6553 RTL_R8(ChipCmd);
6554 break;
6555 default:
6556 break;
6557 }
6558 }
6559
6560 static void rtl_shutdown(struct pci_dev *pdev)
6561 {
6562 struct net_device *dev = pci_get_drvdata(pdev);
6563 struct rtl8169_private *tp = netdev_priv(dev);
6564 struct device *d = &pdev->dev;
6565
6566 pm_runtime_get_sync(d);
6567
6568 rtl8169_net_suspend(dev);
6569
6570 /* Restore original MAC address */
6571 rtl_rar_set(tp, dev->perm_addr);
6572
6573 rtl8169_hw_reset(tp);
6574
6575 if (system_state == SYSTEM_POWER_OFF) {
6576 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6577 rtl_wol_suspend_quirk(tp);
6578 rtl_wol_shutdown_quirk(tp);
6579 }
6580
6581 pci_wake_from_d3(pdev, true);
6582 pci_set_power_state(pdev, PCI_D3hot);
6583 }
6584
6585 pm_runtime_put_noidle(d);
6586 }
6587
6588 static void rtl_remove_one(struct pci_dev *pdev)
6589 {
6590 struct net_device *dev = pci_get_drvdata(pdev);
6591 struct rtl8169_private *tp = netdev_priv(dev);
6592
6593 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6594 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6595 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6596 rtl8168_driver_stop(tp);
6597 }
6598
6599 cancel_work_sync(&tp->wk.work);
6600
6601 netif_napi_del(&tp->napi);
6602
6603 unregister_netdev(dev);
6604
6605 rtl_release_firmware(tp);
6606
6607 if (pci_dev_run_wake(pdev))
6608 pm_runtime_get_noresume(&pdev->dev);
6609
6610 /* restore original MAC address */
6611 rtl_rar_set(tp, dev->perm_addr);
6612
6613 rtl_disable_msi(pdev, tp);
6614 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6615 pci_set_drvdata(pdev, NULL);
6616 }
6617
6618 static const struct net_device_ops rtl_netdev_ops = {
6619 .ndo_open = rtl_open,
6620 .ndo_stop = rtl8169_close,
6621 .ndo_get_stats64 = rtl8169_get_stats64,
6622 .ndo_start_xmit = rtl8169_start_xmit,
6623 .ndo_tx_timeout = rtl8169_tx_timeout,
6624 .ndo_validate_addr = eth_validate_addr,
6625 .ndo_change_mtu = rtl8169_change_mtu,
6626 .ndo_fix_features = rtl8169_fix_features,
6627 .ndo_set_features = rtl8169_set_features,
6628 .ndo_set_mac_address = rtl_set_mac_address,
6629 .ndo_do_ioctl = rtl8169_ioctl,
6630 .ndo_set_rx_mode = rtl_set_rx_mode,
6631 #ifdef CONFIG_NET_POLL_CONTROLLER
6632 .ndo_poll_controller = rtl8169_netpoll,
6633 #endif
6634
6635 };
6636
6637 static const struct rtl_cfg_info {
6638 void (*hw_start)(struct net_device *);
6639 unsigned int region;
6640 unsigned int align;
6641 u16 event_slow;
6642 unsigned features;
6643 u8 default_ver;
6644 } rtl_cfg_infos [] = {
6645 [RTL_CFG_0] = {
6646 .hw_start = rtl_hw_start_8169,
6647 .region = 1,
6648 .align = 0,
6649 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6650 .features = RTL_FEATURE_GMII,
6651 .default_ver = RTL_GIGA_MAC_VER_01,
6652 },
6653 [RTL_CFG_1] = {
6654 .hw_start = rtl_hw_start_8168,
6655 .region = 2,
6656 .align = 8,
6657 .event_slow = SYSErr | LinkChg | RxOverflow,
6658 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6659 .default_ver = RTL_GIGA_MAC_VER_11,
6660 },
6661 [RTL_CFG_2] = {
6662 .hw_start = rtl_hw_start_8101,
6663 .region = 2,
6664 .align = 8,
6665 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6666 PCSTimeout,
6667 .features = RTL_FEATURE_MSI,
6668 .default_ver = RTL_GIGA_MAC_VER_13,
6669 }
6670 };
6671
6672 /* Cfg9346_Unlock assumed. */
6673 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6674 const struct rtl_cfg_info *cfg)
6675 {
6676 void __iomem *ioaddr = tp->mmio_addr;
6677 unsigned msi = 0;
6678 u8 cfg2;
6679
6680 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6681 if (cfg->features & RTL_FEATURE_MSI) {
6682 if (pci_enable_msi(tp->pci_dev)) {
6683 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6684 } else {
6685 cfg2 |= MSIEnable;
6686 msi = RTL_FEATURE_MSI;
6687 }
6688 }
6689 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6690 RTL_W8(Config2, cfg2);
6691 return msi;
6692 }
6693
6694 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6695 {
6696 void __iomem *ioaddr = tp->mmio_addr;
6697
6698 return RTL_R8(MCU) & LINK_LIST_RDY;
6699 }
6700
6701 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6702 {
6703 void __iomem *ioaddr = tp->mmio_addr;
6704
6705 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6706 }
6707
6708 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6709 {
6710 void __iomem *ioaddr = tp->mmio_addr;
6711 u32 data;
6712
6713 tp->ocp_base = OCP_STD_PHY_BASE;
6714
6715 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6716
6717 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6718 return;
6719
6720 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6721 return;
6722
6723 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6724 msleep(1);
6725 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6726
6727 data = r8168_mac_ocp_read(tp, 0xe8de);
6728 data &= ~(1 << 14);
6729 r8168_mac_ocp_write(tp, 0xe8de, data);
6730
6731 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6732 return;
6733
6734 data = r8168_mac_ocp_read(tp, 0xe8de);
6735 data |= (1 << 15);
6736 r8168_mac_ocp_write(tp, 0xe8de, data);
6737
6738 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6739 return;
6740 }
6741
6742 static void rtl_hw_initialize(struct rtl8169_private *tp)
6743 {
6744 switch (tp->mac_version) {
6745 case RTL_GIGA_MAC_VER_40:
6746 case RTL_GIGA_MAC_VER_41:
6747 rtl_hw_init_8168g(tp);
6748 break;
6749
6750 default:
6751 break;
6752 }
6753 }
6754
6755 static int
6756 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6757 {
6758 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6759 const unsigned int region = cfg->region;
6760 struct rtl8169_private *tp;
6761 struct mii_if_info *mii;
6762 struct net_device *dev;
6763 void __iomem *ioaddr;
6764 int chipset, i;
6765 int rc;
6766
6767 if (netif_msg_drv(&debug)) {
6768 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6769 MODULENAME, RTL8169_VERSION);
6770 }
6771
6772 dev = alloc_etherdev(sizeof (*tp));
6773 if (!dev) {
6774 rc = -ENOMEM;
6775 goto out;
6776 }
6777
6778 SET_NETDEV_DEV(dev, &pdev->dev);
6779 dev->netdev_ops = &rtl_netdev_ops;
6780 tp = netdev_priv(dev);
6781 tp->dev = dev;
6782 tp->pci_dev = pdev;
6783 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6784
6785 mii = &tp->mii;
6786 mii->dev = dev;
6787 mii->mdio_read = rtl_mdio_read;
6788 mii->mdio_write = rtl_mdio_write;
6789 mii->phy_id_mask = 0x1f;
6790 mii->reg_num_mask = 0x1f;
6791 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6792
6793 /* disable ASPM completely as that cause random device stop working
6794 * problems as well as full system hangs for some PCIe devices users */
6795 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6796 PCIE_LINK_STATE_CLKPM);
6797
6798 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6799 rc = pci_enable_device(pdev);
6800 if (rc < 0) {
6801 netif_err(tp, probe, dev, "enable failure\n");
6802 goto err_out_free_dev_1;
6803 }
6804
6805 if (pci_set_mwi(pdev) < 0)
6806 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6807
6808 /* make sure PCI base addr 1 is MMIO */
6809 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6810 netif_err(tp, probe, dev,
6811 "region #%d not an MMIO resource, aborting\n",
6812 region);
6813 rc = -ENODEV;
6814 goto err_out_mwi_2;
6815 }
6816
6817 /* check for weird/broken PCI region reporting */
6818 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6819 netif_err(tp, probe, dev,
6820 "Invalid PCI region size(s), aborting\n");
6821 rc = -ENODEV;
6822 goto err_out_mwi_2;
6823 }
6824
6825 rc = pci_request_regions(pdev, MODULENAME);
6826 if (rc < 0) {
6827 netif_err(tp, probe, dev, "could not request regions\n");
6828 goto err_out_mwi_2;
6829 }
6830
6831 tp->cp_cmd = RxChkSum;
6832
6833 if ((sizeof(dma_addr_t) > 4) &&
6834 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6835 tp->cp_cmd |= PCIDAC;
6836 dev->features |= NETIF_F_HIGHDMA;
6837 } else {
6838 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6839 if (rc < 0) {
6840 netif_err(tp, probe, dev, "DMA configuration failed\n");
6841 goto err_out_free_res_3;
6842 }
6843 }
6844
6845 /* ioremap MMIO region */
6846 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6847 if (!ioaddr) {
6848 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6849 rc = -EIO;
6850 goto err_out_free_res_3;
6851 }
6852 tp->mmio_addr = ioaddr;
6853
6854 if (!pci_is_pcie(pdev))
6855 netif_info(tp, probe, dev, "not PCI Express\n");
6856
6857 /* Identify chip attached to board */
6858 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6859
6860 rtl_init_rxcfg(tp);
6861
6862 rtl_irq_disable(tp);
6863
6864 rtl_hw_initialize(tp);
6865
6866 rtl_hw_reset(tp);
6867
6868 rtl_ack_events(tp, 0xffff);
6869
6870 pci_set_master(pdev);
6871
6872 /*
6873 * Pretend we are using VLANs; This bypasses a nasty bug where
6874 * Interrupts stop flowing on high load on 8110SCd controllers.
6875 */
6876 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6877 tp->cp_cmd |= RxVlan;
6878
6879 rtl_init_mdio_ops(tp);
6880 rtl_init_pll_power_ops(tp);
6881 rtl_init_jumbo_ops(tp);
6882 rtl_init_csi_ops(tp);
6883
6884 rtl8169_print_mac_version(tp);
6885
6886 chipset = tp->mac_version;
6887 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6888
6889 RTL_W8(Cfg9346, Cfg9346_Unlock);
6890 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6891 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6892 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6893 tp->features |= RTL_FEATURE_WOL;
6894 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6895 tp->features |= RTL_FEATURE_WOL;
6896 tp->features |= rtl_try_msi(tp, cfg);
6897 RTL_W8(Cfg9346, Cfg9346_Lock);
6898
6899 if (rtl_tbi_enabled(tp)) {
6900 tp->set_speed = rtl8169_set_speed_tbi;
6901 tp->get_settings = rtl8169_gset_tbi;
6902 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6903 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6904 tp->link_ok = rtl8169_tbi_link_ok;
6905 tp->do_ioctl = rtl_tbi_ioctl;
6906 } else {
6907 tp->set_speed = rtl8169_set_speed_xmii;
6908 tp->get_settings = rtl8169_gset_xmii;
6909 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6910 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6911 tp->link_ok = rtl8169_xmii_link_ok;
6912 tp->do_ioctl = rtl_xmii_ioctl;
6913 }
6914
6915 mutex_init(&tp->wk.mutex);
6916
6917 /* Get MAC address */
6918 for (i = 0; i < ETH_ALEN; i++)
6919 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6920
6921 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6922 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6923
6924 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6925
6926 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6927 * properly for all devices */
6928 dev->features |= NETIF_F_RXCSUM |
6929 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6930
6931 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6932 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6933 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6934 NETIF_F_HIGHDMA;
6935
6936 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6937 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6938 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6939
6940 dev->hw_features |= NETIF_F_RXALL;
6941 dev->hw_features |= NETIF_F_RXFCS;
6942
6943 tp->hw_start = cfg->hw_start;
6944 tp->event_slow = cfg->event_slow;
6945
6946 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6947 ~(RxBOVF | RxFOVF) : ~0;
6948
6949 init_timer(&tp->timer);
6950 tp->timer.data = (unsigned long) dev;
6951 tp->timer.function = rtl8169_phy_timer;
6952
6953 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6954
6955 rc = register_netdev(dev);
6956 if (rc < 0)
6957 goto err_out_msi_4;
6958
6959 pci_set_drvdata(pdev, dev);
6960
6961 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6962 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6963 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6964 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6965 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6966 "tx checksumming: %s]\n",
6967 rtl_chip_infos[chipset].jumbo_max,
6968 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6969 }
6970
6971 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6972 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6973 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6974 rtl8168_driver_start(tp);
6975 }
6976
6977 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6978
6979 if (pci_dev_run_wake(pdev))
6980 pm_runtime_put_noidle(&pdev->dev);
6981
6982 netif_carrier_off(dev);
6983
6984 out:
6985 return rc;
6986
6987 err_out_msi_4:
6988 netif_napi_del(&tp->napi);
6989 rtl_disable_msi(pdev, tp);
6990 iounmap(ioaddr);
6991 err_out_free_res_3:
6992 pci_release_regions(pdev);
6993 err_out_mwi_2:
6994 pci_clear_mwi(pdev);
6995 pci_disable_device(pdev);
6996 err_out_free_dev_1:
6997 free_netdev(dev);
6998 goto out;
6999 }
7000
7001 static struct pci_driver rtl8169_pci_driver = {
7002 .name = MODULENAME,
7003 .id_table = rtl8169_pci_tbl,
7004 .probe = rtl_init_one,
7005 .remove = rtl_remove_one,
7006 .shutdown = rtl_shutdown,
7007 .driver.pm = RTL8169_PM_OPS,
7008 };
7009
7010 module_pci_driver(rtl8169_pci_driver);
This page took 0.184459 seconds and 5 git commands to generate.