r8169: remove rtl_ocpdr_cond.
[deliverable/linux.git] / drivers / net / ethernet / realtek / r8169.c
... / ...
CommitLineData
1/*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/delay.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/if_vlan.h>
20#include <linux/crc32.h>
21#include <linux/in.h>
22#include <linux/ip.h>
23#include <linux/tcp.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/pm_runtime.h>
28#include <linux/firmware.h>
29#include <linux/pci-aspm.h>
30#include <linux/prefetch.h>
31
32#include <asm/io.h>
33#include <asm/irq.h>
34
35#define RTL8169_VERSION "2.3LK-NAPI"
36#define MODULENAME "r8169"
37#define PFX MODULENAME ": "
38
39#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
51
52#ifdef RTL8169_DEBUG
53#define assert(expr) \
54 if (!(expr)) { \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
57 }
58#define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
60#else
61#define assert(expr) do {} while (0)
62#define dprintk(fmt, args...) do {} while (0)
63#endif /* RTL8169_DEBUG */
64
65#define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
67
68#define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
70
71/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72#define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
74
75/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77static const int multicast_filter_limit = 32;
78
79#define MAX_READ_REQUEST_SHIFT 12
80#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
81#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83
84#define R8169_REGS_SIZE 256
85#define R8169_NAPI_WEIGHT 64
86#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
87#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
88#define RX_BUF_SIZE 1536 /* Rx Buffer size */
89#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
90#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
91
92#define RTL8169_TX_TIMEOUT (6*HZ)
93#define RTL8169_PHY_TIMEOUT (10*HZ)
94
95#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
96#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
97#define RTL_EEPROM_SIG_ADDR 0x0000
98
99/* write/read MMIO register */
100#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
101#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
102#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
103#define RTL_R8(reg) readb (ioaddr + (reg))
104#define RTL_R16(reg) readw (ioaddr + (reg))
105#define RTL_R32(reg) readl (ioaddr + (reg))
106
107enum mac_version {
108 RTL_GIGA_MAC_VER_01 = 0,
109 RTL_GIGA_MAC_VER_02,
110 RTL_GIGA_MAC_VER_03,
111 RTL_GIGA_MAC_VER_04,
112 RTL_GIGA_MAC_VER_05,
113 RTL_GIGA_MAC_VER_06,
114 RTL_GIGA_MAC_VER_07,
115 RTL_GIGA_MAC_VER_08,
116 RTL_GIGA_MAC_VER_09,
117 RTL_GIGA_MAC_VER_10,
118 RTL_GIGA_MAC_VER_11,
119 RTL_GIGA_MAC_VER_12,
120 RTL_GIGA_MAC_VER_13,
121 RTL_GIGA_MAC_VER_14,
122 RTL_GIGA_MAC_VER_15,
123 RTL_GIGA_MAC_VER_16,
124 RTL_GIGA_MAC_VER_17,
125 RTL_GIGA_MAC_VER_18,
126 RTL_GIGA_MAC_VER_19,
127 RTL_GIGA_MAC_VER_20,
128 RTL_GIGA_MAC_VER_21,
129 RTL_GIGA_MAC_VER_22,
130 RTL_GIGA_MAC_VER_23,
131 RTL_GIGA_MAC_VER_24,
132 RTL_GIGA_MAC_VER_25,
133 RTL_GIGA_MAC_VER_26,
134 RTL_GIGA_MAC_VER_27,
135 RTL_GIGA_MAC_VER_28,
136 RTL_GIGA_MAC_VER_29,
137 RTL_GIGA_MAC_VER_30,
138 RTL_GIGA_MAC_VER_31,
139 RTL_GIGA_MAC_VER_32,
140 RTL_GIGA_MAC_VER_33,
141 RTL_GIGA_MAC_VER_34,
142 RTL_GIGA_MAC_VER_35,
143 RTL_GIGA_MAC_VER_36,
144 RTL_GIGA_MAC_VER_37,
145 RTL_GIGA_MAC_VER_38,
146 RTL_GIGA_MAC_VER_39,
147 RTL_GIGA_MAC_VER_40,
148 RTL_GIGA_MAC_VER_41,
149 RTL_GIGA_MAC_NONE = 0xff,
150};
151
152enum rtl_tx_desc_version {
153 RTL_TD_0 = 0,
154 RTL_TD_1 = 1,
155};
156
157#define JUMBO_1K ETH_DATA_LEN
158#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
159#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
160#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
161#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
162
163#define _R(NAME,TD,FW,SZ,B) { \
164 .name = NAME, \
165 .txd_version = TD, \
166 .fw_name = FW, \
167 .jumbo_max = SZ, \
168 .jumbo_tx_csum = B \
169}
170
171static const struct {
172 const char *name;
173 enum rtl_tx_desc_version txd_version;
174 const char *fw_name;
175 u16 jumbo_max;
176 bool jumbo_tx_csum;
177} rtl_chip_infos[] = {
178 /* PCI devices. */
179 [RTL_GIGA_MAC_VER_01] =
180 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_02] =
182 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_03] =
184 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
185 [RTL_GIGA_MAC_VER_04] =
186 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
187 [RTL_GIGA_MAC_VER_05] =
188 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
189 [RTL_GIGA_MAC_VER_06] =
190 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
191 /* PCI-E devices. */
192 [RTL_GIGA_MAC_VER_07] =
193 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_08] =
195 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
196 [RTL_GIGA_MAC_VER_09] =
197 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
198 [RTL_GIGA_MAC_VER_10] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_11] =
201 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
202 [RTL_GIGA_MAC_VER_12] =
203 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
204 [RTL_GIGA_MAC_VER_13] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_14] =
207 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
208 [RTL_GIGA_MAC_VER_15] =
209 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
210 [RTL_GIGA_MAC_VER_16] =
211 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
212 [RTL_GIGA_MAC_VER_17] =
213 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
214 [RTL_GIGA_MAC_VER_18] =
215 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_19] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_20] =
219 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_21] =
221 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_22] =
223 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
224 [RTL_GIGA_MAC_VER_23] =
225 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
226 [RTL_GIGA_MAC_VER_24] =
227 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
228 [RTL_GIGA_MAC_VER_25] =
229 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
230 JUMBO_9K, false),
231 [RTL_GIGA_MAC_VER_26] =
232 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
233 JUMBO_9K, false),
234 [RTL_GIGA_MAC_VER_27] =
235 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
236 [RTL_GIGA_MAC_VER_28] =
237 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
238 [RTL_GIGA_MAC_VER_29] =
239 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
240 JUMBO_1K, true),
241 [RTL_GIGA_MAC_VER_30] =
242 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
243 JUMBO_1K, true),
244 [RTL_GIGA_MAC_VER_31] =
245 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
246 [RTL_GIGA_MAC_VER_32] =
247 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
248 JUMBO_9K, false),
249 [RTL_GIGA_MAC_VER_33] =
250 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_34] =
253 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
254 JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_35] =
256 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
257 JUMBO_9K, false),
258 [RTL_GIGA_MAC_VER_36] =
259 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
260 JUMBO_9K, false),
261 [RTL_GIGA_MAC_VER_37] =
262 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
263 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_38] =
265 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_39] =
268 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
269 JUMBO_1K, true),
270 [RTL_GIGA_MAC_VER_40] =
271 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
272 JUMBO_9K, false),
273 [RTL_GIGA_MAC_VER_41] =
274 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
275};
276#undef _R
277
278enum cfg_version {
279 RTL_CFG_0 = 0x00,
280 RTL_CFG_1,
281 RTL_CFG_2
282};
283
284static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
285 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
286 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
290 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
291 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
292 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
293 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
294 { PCI_VENDOR_ID_LINKSYS, 0x1032,
295 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
296 { 0x0001, 0x8168,
297 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
298 {0,},
299};
300
301MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
302
303static int rx_buf_sz = 16383;
304static int use_dac;
305static struct {
306 u32 msg_enable;
307} debug = { -1 };
308
309enum rtl_registers {
310 MAC0 = 0, /* Ethernet hardware address. */
311 MAC4 = 4,
312 MAR0 = 8, /* Multicast filter. */
313 CounterAddrLow = 0x10,
314 CounterAddrHigh = 0x14,
315 TxDescStartAddrLow = 0x20,
316 TxDescStartAddrHigh = 0x24,
317 TxHDescStartAddrLow = 0x28,
318 TxHDescStartAddrHigh = 0x2c,
319 FLASH = 0x30,
320 ERSR = 0x36,
321 ChipCmd = 0x37,
322 TxPoll = 0x38,
323 IntrMask = 0x3c,
324 IntrStatus = 0x3e,
325
326 TxConfig = 0x40,
327#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
328#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
329
330 RxConfig = 0x44,
331#define RX128_INT_EN (1 << 15) /* 8111c and later */
332#define RX_MULTI_EN (1 << 14) /* 8111c only */
333#define RXCFG_FIFO_SHIFT 13
334 /* No threshold before first PCI xfer */
335#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
336#define RXCFG_DMA_SHIFT 8
337 /* Unlimited maximum PCI burst. */
338#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
339
340 RxMissed = 0x4c,
341 Cfg9346 = 0x50,
342 Config0 = 0x51,
343 Config1 = 0x52,
344 Config2 = 0x53,
345#define PME_SIGNAL (1 << 5) /* 8168c and later */
346
347 Config3 = 0x54,
348 Config4 = 0x55,
349 Config5 = 0x56,
350 MultiIntr = 0x5c,
351 PHYAR = 0x60,
352 PHYstatus = 0x6c,
353 RxMaxSize = 0xda,
354 CPlusCmd = 0xe0,
355 IntrMitigate = 0xe2,
356 RxDescAddrLow = 0xe4,
357 RxDescAddrHigh = 0xe8,
358 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
359
360#define NoEarlyTx 0x3f /* Max value : no early transmit. */
361
362 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
363
364#define TxPacketMax (8064 >> 7)
365#define EarlySize 0x27
366
367 FuncEvent = 0xf0,
368 FuncEventMask = 0xf4,
369 FuncPresetState = 0xf8,
370 FuncForceEvent = 0xfc,
371};
372
373enum rtl8110_registers {
374 TBICSR = 0x64,
375 TBI_ANAR = 0x68,
376 TBI_LPAR = 0x6a,
377};
378
379enum rtl8168_8101_registers {
380 CSIDR = 0x64,
381 CSIAR = 0x68,
382#define CSIAR_FLAG 0x80000000
383#define CSIAR_WRITE_CMD 0x80000000
384#define CSIAR_BYTE_ENABLE 0x0f
385#define CSIAR_BYTE_ENABLE_SHIFT 12
386#define CSIAR_ADDR_MASK 0x0fff
387#define CSIAR_FUNC_CARD 0x00000000
388#define CSIAR_FUNC_SDIO 0x00010000
389#define CSIAR_FUNC_NIC 0x00020000
390 PMCH = 0x6f,
391 EPHYAR = 0x80,
392#define EPHYAR_FLAG 0x80000000
393#define EPHYAR_WRITE_CMD 0x80000000
394#define EPHYAR_REG_MASK 0x1f
395#define EPHYAR_REG_SHIFT 16
396#define EPHYAR_DATA_MASK 0xffff
397 DLLPR = 0xd0,
398#define PFM_EN (1 << 6)
399 DBG_REG = 0xd1,
400#define FIX_NAK_1 (1 << 4)
401#define FIX_NAK_2 (1 << 3)
402 TWSI = 0xd2,
403 MCU = 0xd3,
404#define NOW_IS_OOB (1 << 7)
405#define TX_EMPTY (1 << 5)
406#define RX_EMPTY (1 << 4)
407#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
408#define EN_NDP (1 << 3)
409#define EN_OOB_RESET (1 << 2)
410#define LINK_LIST_RDY (1 << 1)
411 EFUSEAR = 0xdc,
412#define EFUSEAR_FLAG 0x80000000
413#define EFUSEAR_WRITE_CMD 0x80000000
414#define EFUSEAR_READ_CMD 0x00000000
415#define EFUSEAR_REG_MASK 0x03ff
416#define EFUSEAR_REG_SHIFT 8
417#define EFUSEAR_DATA_MASK 0xff
418};
419
420enum rtl8168_registers {
421 LED_FREQ = 0x1a,
422 EEE_LED = 0x1b,
423 ERIDR = 0x70,
424 ERIAR = 0x74,
425#define ERIAR_FLAG 0x80000000
426#define ERIAR_WRITE_CMD 0x80000000
427#define ERIAR_READ_CMD 0x00000000
428#define ERIAR_ADDR_BYTE_ALIGN 4
429#define ERIAR_TYPE_SHIFT 16
430#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
431#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
432#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
433#define ERIAR_MASK_SHIFT 12
434#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
435#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
436#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
437#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
438 EPHY_RXER_NUM = 0x7c,
439 OCPDR = 0xb0, /* OCP GPHY access */
440#define OCPDR_WRITE_CMD 0x80000000
441#define OCPDR_READ_CMD 0x00000000
442#define OCPDR_REG_MASK 0x7f
443#define OCPDR_GPHY_REG_SHIFT 16
444#define OCPDR_DATA_MASK 0xffff
445 OCPAR = 0xb4,
446#define OCPAR_FLAG 0x80000000
447#define OCPAR_GPHY_WRITE_CMD 0x8000f060
448#define OCPAR_GPHY_READ_CMD 0x0000f060
449 GPHY_OCP = 0xb8,
450 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
451 MISC = 0xf0, /* 8168e only. */
452#define TXPLA_RST (1 << 29)
453#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
454#define PWM_EN (1 << 22)
455#define RXDV_GATED_EN (1 << 19)
456#define EARLY_TALLY_EN (1 << 16)
457};
458
459enum rtl_register_content {
460 /* InterruptStatusBits */
461 SYSErr = 0x8000,
462 PCSTimeout = 0x4000,
463 SWInt = 0x0100,
464 TxDescUnavail = 0x0080,
465 RxFIFOOver = 0x0040,
466 LinkChg = 0x0020,
467 RxOverflow = 0x0010,
468 TxErr = 0x0008,
469 TxOK = 0x0004,
470 RxErr = 0x0002,
471 RxOK = 0x0001,
472
473 /* RxStatusDesc */
474 RxBOVF = (1 << 24),
475 RxFOVF = (1 << 23),
476 RxRWT = (1 << 22),
477 RxRES = (1 << 21),
478 RxRUNT = (1 << 20),
479 RxCRC = (1 << 19),
480
481 /* ChipCmdBits */
482 StopReq = 0x80,
483 CmdReset = 0x10,
484 CmdRxEnb = 0x08,
485 CmdTxEnb = 0x04,
486 RxBufEmpty = 0x01,
487
488 /* TXPoll register p.5 */
489 HPQ = 0x80, /* Poll cmd on the high prio queue */
490 NPQ = 0x40, /* Poll cmd on the low prio queue */
491 FSWInt = 0x01, /* Forced software interrupt */
492
493 /* Cfg9346Bits */
494 Cfg9346_Lock = 0x00,
495 Cfg9346_Unlock = 0xc0,
496
497 /* rx_mode_bits */
498 AcceptErr = 0x20,
499 AcceptRunt = 0x10,
500 AcceptBroadcast = 0x08,
501 AcceptMulticast = 0x04,
502 AcceptMyPhys = 0x02,
503 AcceptAllPhys = 0x01,
504#define RX_CONFIG_ACCEPT_MASK 0x3f
505
506 /* TxConfigBits */
507 TxInterFrameGapShift = 24,
508 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
509
510 /* Config1 register p.24 */
511 LEDS1 = (1 << 7),
512 LEDS0 = (1 << 6),
513 Speed_down = (1 << 4),
514 MEMMAP = (1 << 3),
515 IOMAP = (1 << 2),
516 VPD = (1 << 1),
517 PMEnable = (1 << 0), /* Power Management Enable */
518
519 /* Config2 register p. 25 */
520 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
521 PCI_Clock_66MHz = 0x01,
522 PCI_Clock_33MHz = 0x00,
523
524 /* Config3 register p.25 */
525 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
526 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
527 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
528 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
529
530 /* Config4 register */
531 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
532
533 /* Config5 register p.27 */
534 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
535 MWF = (1 << 5), /* Accept Multicast wakeup frame */
536 UWF = (1 << 4), /* Accept Unicast wakeup frame */
537 Spi_en = (1 << 3),
538 LanWake = (1 << 1), /* LanWake enable/disable */
539 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
540
541 /* TBICSR p.28 */
542 TBIReset = 0x80000000,
543 TBILoopback = 0x40000000,
544 TBINwEnable = 0x20000000,
545 TBINwRestart = 0x10000000,
546 TBILinkOk = 0x02000000,
547 TBINwComplete = 0x01000000,
548
549 /* CPlusCmd p.31 */
550 EnableBist = (1 << 15), // 8168 8101
551 Mac_dbgo_oe = (1 << 14), // 8168 8101
552 Normal_mode = (1 << 13), // unused
553 Force_half_dup = (1 << 12), // 8168 8101
554 Force_rxflow_en = (1 << 11), // 8168 8101
555 Force_txflow_en = (1 << 10), // 8168 8101
556 Cxpl_dbg_sel = (1 << 9), // 8168 8101
557 ASF = (1 << 8), // 8168 8101
558 PktCntrDisable = (1 << 7), // 8168 8101
559 Mac_dbgo_sel = 0x001c, // 8168
560 RxVlan = (1 << 6),
561 RxChkSum = (1 << 5),
562 PCIDAC = (1 << 4),
563 PCIMulRW = (1 << 3),
564 INTT_0 = 0x0000, // 8168
565 INTT_1 = 0x0001, // 8168
566 INTT_2 = 0x0002, // 8168
567 INTT_3 = 0x0003, // 8168
568
569 /* rtl8169_PHYstatus */
570 TBI_Enable = 0x80,
571 TxFlowCtrl = 0x40,
572 RxFlowCtrl = 0x20,
573 _1000bpsF = 0x10,
574 _100bps = 0x08,
575 _10bps = 0x04,
576 LinkStatus = 0x02,
577 FullDup = 0x01,
578
579 /* _TBICSRBit */
580 TBILinkOK = 0x02000000,
581
582 /* DumpCounterCommand */
583 CounterDump = 0x8,
584};
585
586enum rtl_desc_bit {
587 /* First doubleword. */
588 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
589 RingEnd = (1 << 30), /* End of descriptor ring */
590 FirstFrag = (1 << 29), /* First segment of a packet */
591 LastFrag = (1 << 28), /* Final segment of a packet */
592};
593
594/* Generic case. */
595enum rtl_tx_desc_bit {
596 /* First doubleword. */
597 TD_LSO = (1 << 27), /* Large Send Offload */
598#define TD_MSS_MAX 0x07ffu /* MSS value */
599
600 /* Second doubleword. */
601 TxVlanTag = (1 << 17), /* Add VLAN tag */
602};
603
604/* 8169, 8168b and 810x except 8102e. */
605enum rtl_tx_desc_bit_0 {
606 /* First doubleword. */
607#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
608 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
609 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
610 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
611};
612
613/* 8102e, 8168c and beyond. */
614enum rtl_tx_desc_bit_1 {
615 /* Second doubleword. */
616#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
617 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
618 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
619 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
620};
621
622static const struct rtl_tx_desc_info {
623 struct {
624 u32 udp;
625 u32 tcp;
626 } checksum;
627 u16 mss_shift;
628 u16 opts_offset;
629} tx_desc_info [] = {
630 [RTL_TD_0] = {
631 .checksum = {
632 .udp = TD0_IP_CS | TD0_UDP_CS,
633 .tcp = TD0_IP_CS | TD0_TCP_CS
634 },
635 .mss_shift = TD0_MSS_SHIFT,
636 .opts_offset = 0
637 },
638 [RTL_TD_1] = {
639 .checksum = {
640 .udp = TD1_IP_CS | TD1_UDP_CS,
641 .tcp = TD1_IP_CS | TD1_TCP_CS
642 },
643 .mss_shift = TD1_MSS_SHIFT,
644 .opts_offset = 1
645 }
646};
647
648enum rtl_rx_desc_bit {
649 /* Rx private */
650 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
651 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
652
653#define RxProtoUDP (PID1)
654#define RxProtoTCP (PID0)
655#define RxProtoIP (PID1 | PID0)
656#define RxProtoMask RxProtoIP
657
658 IPFail = (1 << 16), /* IP checksum failed */
659 UDPFail = (1 << 15), /* UDP/IP checksum failed */
660 TCPFail = (1 << 14), /* TCP/IP checksum failed */
661 RxVlanTag = (1 << 16), /* VLAN tag available */
662};
663
664#define RsvdMask 0x3fffc000
665
666struct TxDesc {
667 __le32 opts1;
668 __le32 opts2;
669 __le64 addr;
670};
671
672struct RxDesc {
673 __le32 opts1;
674 __le32 opts2;
675 __le64 addr;
676};
677
678struct ring_info {
679 struct sk_buff *skb;
680 u32 len;
681 u8 __pad[sizeof(void *) - sizeof(u32)];
682};
683
684enum features {
685 RTL_FEATURE_WOL = (1 << 0),
686 RTL_FEATURE_MSI = (1 << 1),
687 RTL_FEATURE_GMII = (1 << 2),
688};
689
690struct rtl8169_counters {
691 __le64 tx_packets;
692 __le64 rx_packets;
693 __le64 tx_errors;
694 __le32 rx_errors;
695 __le16 rx_missed;
696 __le16 align_errors;
697 __le32 tx_one_collision;
698 __le32 tx_multi_collision;
699 __le64 rx_unicast;
700 __le64 rx_broadcast;
701 __le32 rx_multicast;
702 __le16 tx_aborted;
703 __le16 tx_underun;
704};
705
706enum rtl_flag {
707 RTL_FLAG_TASK_ENABLED,
708 RTL_FLAG_TASK_SLOW_PENDING,
709 RTL_FLAG_TASK_RESET_PENDING,
710 RTL_FLAG_TASK_PHY_PENDING,
711 RTL_FLAG_MAX
712};
713
714struct rtl8169_stats {
715 u64 packets;
716 u64 bytes;
717 struct u64_stats_sync syncp;
718};
719
720struct rtl8169_private {
721 void __iomem *mmio_addr; /* memory map physical address */
722 struct pci_dev *pci_dev;
723 struct net_device *dev;
724 struct napi_struct napi;
725 u32 msg_enable;
726 u16 txd_version;
727 u16 mac_version;
728 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
729 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
730 u32 dirty_rx;
731 u32 dirty_tx;
732 struct rtl8169_stats rx_stats;
733 struct rtl8169_stats tx_stats;
734 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
735 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
736 dma_addr_t TxPhyAddr;
737 dma_addr_t RxPhyAddr;
738 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
739 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
740 struct timer_list timer;
741 u16 cp_cmd;
742
743 u16 event_slow;
744
745 struct mdio_ops {
746 void (*write)(struct rtl8169_private *, int, int);
747 int (*read)(struct rtl8169_private *, int);
748 } mdio_ops;
749
750 struct pll_power_ops {
751 void (*down)(struct rtl8169_private *);
752 void (*up)(struct rtl8169_private *);
753 } pll_power_ops;
754
755 struct jumbo_ops {
756 void (*enable)(struct rtl8169_private *);
757 void (*disable)(struct rtl8169_private *);
758 } jumbo_ops;
759
760 struct csi_ops {
761 void (*write)(struct rtl8169_private *, int, int);
762 u32 (*read)(struct rtl8169_private *, int);
763 } csi_ops;
764
765 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
766 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
767 void (*phy_reset_enable)(struct rtl8169_private *tp);
768 void (*hw_start)(struct net_device *);
769 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
770 unsigned int (*link_ok)(void __iomem *);
771 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
772
773 struct {
774 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
775 struct mutex mutex;
776 struct work_struct work;
777 } wk;
778
779 unsigned features;
780
781 struct mii_if_info mii;
782 struct rtl8169_counters counters;
783 u32 saved_wolopts;
784 u32 opts1_mask;
785
786 struct rtl_fw {
787 const struct firmware *fw;
788
789#define RTL_VER_SIZE 32
790
791 char version[RTL_VER_SIZE];
792
793 struct rtl_fw_phy_action {
794 __le32 *code;
795 size_t size;
796 } phy_action;
797 } *rtl_fw;
798#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
799
800 u32 ocp_base;
801};
802
803MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
804MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
805module_param(use_dac, int, 0);
806MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
807module_param_named(debug, debug.msg_enable, int, 0);
808MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
809MODULE_LICENSE("GPL");
810MODULE_VERSION(RTL8169_VERSION);
811MODULE_FIRMWARE(FIRMWARE_8168D_1);
812MODULE_FIRMWARE(FIRMWARE_8168D_2);
813MODULE_FIRMWARE(FIRMWARE_8168E_1);
814MODULE_FIRMWARE(FIRMWARE_8168E_2);
815MODULE_FIRMWARE(FIRMWARE_8168E_3);
816MODULE_FIRMWARE(FIRMWARE_8105E_1);
817MODULE_FIRMWARE(FIRMWARE_8168F_1);
818MODULE_FIRMWARE(FIRMWARE_8168F_2);
819MODULE_FIRMWARE(FIRMWARE_8402_1);
820MODULE_FIRMWARE(FIRMWARE_8411_1);
821MODULE_FIRMWARE(FIRMWARE_8106E_1);
822MODULE_FIRMWARE(FIRMWARE_8168G_1);
823
824static void rtl_lock_work(struct rtl8169_private *tp)
825{
826 mutex_lock(&tp->wk.mutex);
827}
828
829static void rtl_unlock_work(struct rtl8169_private *tp)
830{
831 mutex_unlock(&tp->wk.mutex);
832}
833
834static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
835{
836 int cap = pci_pcie_cap(pdev);
837
838 if (cap) {
839 u16 ctl;
840
841 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
842 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
843 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
844 }
845}
846
847struct rtl_cond {
848 bool (*check)(struct rtl8169_private *);
849 const char *msg;
850};
851
852static void rtl_udelay(unsigned int d)
853{
854 udelay(d);
855}
856
857static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
858 void (*delay)(unsigned int), unsigned int d, int n,
859 bool high)
860{
861 int i;
862
863 for (i = 0; i < n; i++) {
864 delay(d);
865 if (c->check(tp) == high)
866 return true;
867 }
868 netif_err(tp, drv, tp->dev, c->msg);
869 return false;
870}
871
872static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
873 const struct rtl_cond *c,
874 unsigned int d, int n)
875{
876 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
877}
878
879static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
880 const struct rtl_cond *c,
881 unsigned int d, int n)
882{
883 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
884}
885
886static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
887 const struct rtl_cond *c,
888 unsigned int d, int n)
889{
890 return rtl_loop_wait(tp, c, msleep, d, n, true);
891}
892
893static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
894 const struct rtl_cond *c,
895 unsigned int d, int n)
896{
897 return rtl_loop_wait(tp, c, msleep, d, n, false);
898}
899
900#define DECLARE_RTL_COND(name) \
901static bool name ## _check(struct rtl8169_private *); \
902 \
903static const struct rtl_cond name = { \
904 .check = name ## _check, \
905 .msg = #name \
906}; \
907 \
908static bool name ## _check(struct rtl8169_private *tp)
909
910DECLARE_RTL_COND(rtl_ocpar_cond)
911{
912 void __iomem *ioaddr = tp->mmio_addr;
913
914 return RTL_R32(OCPAR) & OCPAR_FLAG;
915}
916
917static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
918{
919 void __iomem *ioaddr = tp->mmio_addr;
920
921 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
922
923 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
924 RTL_R32(OCPDR) : ~0;
925}
926
927static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
928{
929 void __iomem *ioaddr = tp->mmio_addr;
930
931 RTL_W32(OCPDR, data);
932 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
933
934 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
935}
936
937DECLARE_RTL_COND(rtl_eriar_cond)
938{
939 void __iomem *ioaddr = tp->mmio_addr;
940
941 return RTL_R32(ERIAR) & ERIAR_FLAG;
942}
943
944static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
945{
946 void __iomem *ioaddr = tp->mmio_addr;
947
948 RTL_W8(ERIDR, cmd);
949 RTL_W32(ERIAR, 0x800010e8);
950 msleep(2);
951
952 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
953 return;
954
955 ocp_write(tp, 0x1, 0x30, 0x00000001);
956}
957
958#define OOB_CMD_RESET 0x00
959#define OOB_CMD_DRIVER_START 0x05
960#define OOB_CMD_DRIVER_STOP 0x06
961
962static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
963{
964 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
965}
966
967DECLARE_RTL_COND(rtl_ocp_read_cond)
968{
969 u16 reg;
970
971 reg = rtl8168_get_ocp_reg(tp);
972
973 return ocp_read(tp, 0x0f, reg) & 0x00000800;
974}
975
976static void rtl8168_driver_start(struct rtl8169_private *tp)
977{
978 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
979
980 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
981}
982
983static void rtl8168_driver_stop(struct rtl8169_private *tp)
984{
985 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
986
987 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
988}
989
990static int r8168dp_check_dash(struct rtl8169_private *tp)
991{
992 u16 reg = rtl8168_get_ocp_reg(tp);
993
994 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
995}
996
997static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
998{
999 if (reg & 0xffff0001) {
1000 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1001 return true;
1002 }
1003 return false;
1004}
1005
1006DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1007{
1008 void __iomem *ioaddr = tp->mmio_addr;
1009
1010 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1011}
1012
1013static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1014{
1015 void __iomem *ioaddr = tp->mmio_addr;
1016
1017 if (rtl_ocp_reg_failure(tp, reg))
1018 return;
1019
1020 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1021
1022 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1023}
1024
1025static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1026{
1027 void __iomem *ioaddr = tp->mmio_addr;
1028
1029 if (rtl_ocp_reg_failure(tp, reg))
1030 return 0;
1031
1032 RTL_W32(GPHY_OCP, reg << 15);
1033
1034 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1035 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1036}
1037
1038static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1039{
1040 int val;
1041
1042 val = r8168_phy_ocp_read(tp, reg);
1043 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1044}
1045
1046static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1047{
1048 void __iomem *ioaddr = tp->mmio_addr;
1049
1050 if (rtl_ocp_reg_failure(tp, reg))
1051 return;
1052
1053 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1054}
1055
1056static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1057{
1058 void __iomem *ioaddr = tp->mmio_addr;
1059
1060 if (rtl_ocp_reg_failure(tp, reg))
1061 return 0;
1062
1063 RTL_W32(OCPDR, reg << 15);
1064
1065 return RTL_R32(OCPDR);
1066}
1067
1068#define OCP_STD_PHY_BASE 0xa400
1069
1070static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1071{
1072 if (reg == 0x1f) {
1073 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1074 return;
1075 }
1076
1077 if (tp->ocp_base != OCP_STD_PHY_BASE)
1078 reg -= 0x10;
1079
1080 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1081}
1082
1083static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1084{
1085 if (tp->ocp_base != OCP_STD_PHY_BASE)
1086 reg -= 0x10;
1087
1088 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1089}
1090
1091DECLARE_RTL_COND(rtl_phyar_cond)
1092{
1093 void __iomem *ioaddr = tp->mmio_addr;
1094
1095 return RTL_R32(PHYAR) & 0x80000000;
1096}
1097
1098static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1099{
1100 void __iomem *ioaddr = tp->mmio_addr;
1101
1102 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1103
1104 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1105 /*
1106 * According to hardware specs a 20us delay is required after write
1107 * complete indication, but before sending next command.
1108 */
1109 udelay(20);
1110}
1111
1112static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1113{
1114 void __iomem *ioaddr = tp->mmio_addr;
1115 int value;
1116
1117 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1118
1119 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1120 RTL_R32(PHYAR) & 0xffff : ~0;
1121
1122 /*
1123 * According to hardware specs a 20us delay is required after read
1124 * complete indication, but before sending next command.
1125 */
1126 udelay(20);
1127
1128 return value;
1129}
1130
1131static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1132{
1133 void __iomem *ioaddr = tp->mmio_addr;
1134
1135 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1136 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1137 RTL_W32(EPHY_RXER_NUM, 0);
1138
1139 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1140}
1141
1142static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1143{
1144 r8168dp_1_mdio_access(tp, reg,
1145 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1146}
1147
1148static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1149{
1150 void __iomem *ioaddr = tp->mmio_addr;
1151
1152 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1153
1154 mdelay(1);
1155 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1156 RTL_W32(EPHY_RXER_NUM, 0);
1157
1158 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1159 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1160}
1161
1162#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1163
1164static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1165{
1166 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1167}
1168
1169static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1170{
1171 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1172}
1173
1174static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1175{
1176 void __iomem *ioaddr = tp->mmio_addr;
1177
1178 r8168dp_2_mdio_start(ioaddr);
1179
1180 r8169_mdio_write(tp, reg, value);
1181
1182 r8168dp_2_mdio_stop(ioaddr);
1183}
1184
1185static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1186{
1187 void __iomem *ioaddr = tp->mmio_addr;
1188 int value;
1189
1190 r8168dp_2_mdio_start(ioaddr);
1191
1192 value = r8169_mdio_read(tp, reg);
1193
1194 r8168dp_2_mdio_stop(ioaddr);
1195
1196 return value;
1197}
1198
1199static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1200{
1201 tp->mdio_ops.write(tp, location, val);
1202}
1203
1204static int rtl_readphy(struct rtl8169_private *tp, int location)
1205{
1206 return tp->mdio_ops.read(tp, location);
1207}
1208
1209static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1210{
1211 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1212}
1213
1214static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1215{
1216 int val;
1217
1218 val = rtl_readphy(tp, reg_addr);
1219 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1220}
1221
1222static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1223 int val)
1224{
1225 struct rtl8169_private *tp = netdev_priv(dev);
1226
1227 rtl_writephy(tp, location, val);
1228}
1229
1230static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1231{
1232 struct rtl8169_private *tp = netdev_priv(dev);
1233
1234 return rtl_readphy(tp, location);
1235}
1236
1237DECLARE_RTL_COND(rtl_ephyar_cond)
1238{
1239 void __iomem *ioaddr = tp->mmio_addr;
1240
1241 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1242}
1243
1244static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1245{
1246 void __iomem *ioaddr = tp->mmio_addr;
1247
1248 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1249 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1250
1251 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1252
1253 udelay(10);
1254}
1255
1256static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1257{
1258 void __iomem *ioaddr = tp->mmio_addr;
1259
1260 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1261
1262 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1263 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1264}
1265
1266static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1267 u32 val, int type)
1268{
1269 void __iomem *ioaddr = tp->mmio_addr;
1270
1271 BUG_ON((addr & 3) || (mask == 0));
1272 RTL_W32(ERIDR, val);
1273 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1274
1275 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1276}
1277
1278static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1279{
1280 void __iomem *ioaddr = tp->mmio_addr;
1281
1282 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1283
1284 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1285 RTL_R32(ERIDR) : ~0;
1286}
1287
1288static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1289 u32 m, int type)
1290{
1291 u32 val;
1292
1293 val = rtl_eri_read(tp, addr, type);
1294 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1295}
1296
1297struct exgmac_reg {
1298 u16 addr;
1299 u16 mask;
1300 u32 val;
1301};
1302
1303static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1304 const struct exgmac_reg *r, int len)
1305{
1306 while (len-- > 0) {
1307 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1308 r++;
1309 }
1310}
1311
1312DECLARE_RTL_COND(rtl_efusear_cond)
1313{
1314 void __iomem *ioaddr = tp->mmio_addr;
1315
1316 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1317}
1318
1319static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1320{
1321 void __iomem *ioaddr = tp->mmio_addr;
1322
1323 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1324
1325 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1326 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1327}
1328
1329static u16 rtl_get_events(struct rtl8169_private *tp)
1330{
1331 void __iomem *ioaddr = tp->mmio_addr;
1332
1333 return RTL_R16(IntrStatus);
1334}
1335
1336static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1337{
1338 void __iomem *ioaddr = tp->mmio_addr;
1339
1340 RTL_W16(IntrStatus, bits);
1341 mmiowb();
1342}
1343
1344static void rtl_irq_disable(struct rtl8169_private *tp)
1345{
1346 void __iomem *ioaddr = tp->mmio_addr;
1347
1348 RTL_W16(IntrMask, 0);
1349 mmiowb();
1350}
1351
1352static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1353{
1354 void __iomem *ioaddr = tp->mmio_addr;
1355
1356 RTL_W16(IntrMask, bits);
1357}
1358
1359#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1360#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1361#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1362
1363static void rtl_irq_enable_all(struct rtl8169_private *tp)
1364{
1365 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1366}
1367
1368static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1369{
1370 void __iomem *ioaddr = tp->mmio_addr;
1371
1372 rtl_irq_disable(tp);
1373 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1374 RTL_R8(ChipCmd);
1375}
1376
1377static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1378{
1379 void __iomem *ioaddr = tp->mmio_addr;
1380
1381 return RTL_R32(TBICSR) & TBIReset;
1382}
1383
1384static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1385{
1386 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1387}
1388
1389static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1390{
1391 return RTL_R32(TBICSR) & TBILinkOk;
1392}
1393
1394static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1395{
1396 return RTL_R8(PHYstatus) & LinkStatus;
1397}
1398
1399static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1400{
1401 void __iomem *ioaddr = tp->mmio_addr;
1402
1403 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1404}
1405
1406static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1407{
1408 unsigned int val;
1409
1410 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1411 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1412}
1413
1414static void rtl_link_chg_patch(struct rtl8169_private *tp)
1415{
1416 void __iomem *ioaddr = tp->mmio_addr;
1417 struct net_device *dev = tp->dev;
1418
1419 if (!netif_running(dev))
1420 return;
1421
1422 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1423 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1424 if (RTL_R8(PHYstatus) & _1000bpsF) {
1425 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1426 ERIAR_EXGMAC);
1427 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1428 ERIAR_EXGMAC);
1429 } else if (RTL_R8(PHYstatus) & _100bps) {
1430 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1431 ERIAR_EXGMAC);
1432 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1433 ERIAR_EXGMAC);
1434 } else {
1435 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1436 ERIAR_EXGMAC);
1437 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1438 ERIAR_EXGMAC);
1439 }
1440 /* Reset packet filter */
1441 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1442 ERIAR_EXGMAC);
1443 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1444 ERIAR_EXGMAC);
1445 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1446 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1447 if (RTL_R8(PHYstatus) & _1000bpsF) {
1448 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1449 ERIAR_EXGMAC);
1450 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1451 ERIAR_EXGMAC);
1452 } else {
1453 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1454 ERIAR_EXGMAC);
1455 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1456 ERIAR_EXGMAC);
1457 }
1458 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1459 if (RTL_R8(PHYstatus) & _10bps) {
1460 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1461 ERIAR_EXGMAC);
1462 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1463 ERIAR_EXGMAC);
1464 } else {
1465 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1466 ERIAR_EXGMAC);
1467 }
1468 }
1469}
1470
1471static void __rtl8169_check_link_status(struct net_device *dev,
1472 struct rtl8169_private *tp,
1473 void __iomem *ioaddr, bool pm)
1474{
1475 if (tp->link_ok(ioaddr)) {
1476 rtl_link_chg_patch(tp);
1477 /* This is to cancel a scheduled suspend if there's one. */
1478 if (pm)
1479 pm_request_resume(&tp->pci_dev->dev);
1480 netif_carrier_on(dev);
1481 if (net_ratelimit())
1482 netif_info(tp, ifup, dev, "link up\n");
1483 } else {
1484 netif_carrier_off(dev);
1485 netif_info(tp, ifdown, dev, "link down\n");
1486 if (pm)
1487 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1488 }
1489}
1490
1491static void rtl8169_check_link_status(struct net_device *dev,
1492 struct rtl8169_private *tp,
1493 void __iomem *ioaddr)
1494{
1495 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1496}
1497
1498#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1499
1500static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1501{
1502 void __iomem *ioaddr = tp->mmio_addr;
1503 u8 options;
1504 u32 wolopts = 0;
1505
1506 options = RTL_R8(Config1);
1507 if (!(options & PMEnable))
1508 return 0;
1509
1510 options = RTL_R8(Config3);
1511 if (options & LinkUp)
1512 wolopts |= WAKE_PHY;
1513 if (options & MagicPacket)
1514 wolopts |= WAKE_MAGIC;
1515
1516 options = RTL_R8(Config5);
1517 if (options & UWF)
1518 wolopts |= WAKE_UCAST;
1519 if (options & BWF)
1520 wolopts |= WAKE_BCAST;
1521 if (options & MWF)
1522 wolopts |= WAKE_MCAST;
1523
1524 return wolopts;
1525}
1526
1527static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1528{
1529 struct rtl8169_private *tp = netdev_priv(dev);
1530
1531 rtl_lock_work(tp);
1532
1533 wol->supported = WAKE_ANY;
1534 wol->wolopts = __rtl8169_get_wol(tp);
1535
1536 rtl_unlock_work(tp);
1537}
1538
1539static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1540{
1541 void __iomem *ioaddr = tp->mmio_addr;
1542 unsigned int i;
1543 static const struct {
1544 u32 opt;
1545 u16 reg;
1546 u8 mask;
1547 } cfg[] = {
1548 { WAKE_PHY, Config3, LinkUp },
1549 { WAKE_MAGIC, Config3, MagicPacket },
1550 { WAKE_UCAST, Config5, UWF },
1551 { WAKE_BCAST, Config5, BWF },
1552 { WAKE_MCAST, Config5, MWF },
1553 { WAKE_ANY, Config5, LanWake }
1554 };
1555 u8 options;
1556
1557 RTL_W8(Cfg9346, Cfg9346_Unlock);
1558
1559 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1560 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1561 if (wolopts & cfg[i].opt)
1562 options |= cfg[i].mask;
1563 RTL_W8(cfg[i].reg, options);
1564 }
1565
1566 switch (tp->mac_version) {
1567 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1568 options = RTL_R8(Config1) & ~PMEnable;
1569 if (wolopts)
1570 options |= PMEnable;
1571 RTL_W8(Config1, options);
1572 break;
1573 default:
1574 options = RTL_R8(Config2) & ~PME_SIGNAL;
1575 if (wolopts)
1576 options |= PME_SIGNAL;
1577 RTL_W8(Config2, options);
1578 break;
1579 }
1580
1581 RTL_W8(Cfg9346, Cfg9346_Lock);
1582}
1583
1584static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1585{
1586 struct rtl8169_private *tp = netdev_priv(dev);
1587
1588 rtl_lock_work(tp);
1589
1590 if (wol->wolopts)
1591 tp->features |= RTL_FEATURE_WOL;
1592 else
1593 tp->features &= ~RTL_FEATURE_WOL;
1594 __rtl8169_set_wol(tp, wol->wolopts);
1595
1596 rtl_unlock_work(tp);
1597
1598 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1599
1600 return 0;
1601}
1602
1603static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1604{
1605 return rtl_chip_infos[tp->mac_version].fw_name;
1606}
1607
1608static void rtl8169_get_drvinfo(struct net_device *dev,
1609 struct ethtool_drvinfo *info)
1610{
1611 struct rtl8169_private *tp = netdev_priv(dev);
1612 struct rtl_fw *rtl_fw = tp->rtl_fw;
1613
1614 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1615 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1616 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1617 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1618 if (!IS_ERR_OR_NULL(rtl_fw))
1619 strlcpy(info->fw_version, rtl_fw->version,
1620 sizeof(info->fw_version));
1621}
1622
1623static int rtl8169_get_regs_len(struct net_device *dev)
1624{
1625 return R8169_REGS_SIZE;
1626}
1627
1628static int rtl8169_set_speed_tbi(struct net_device *dev,
1629 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1630{
1631 struct rtl8169_private *tp = netdev_priv(dev);
1632 void __iomem *ioaddr = tp->mmio_addr;
1633 int ret = 0;
1634 u32 reg;
1635
1636 reg = RTL_R32(TBICSR);
1637 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1638 (duplex == DUPLEX_FULL)) {
1639 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1640 } else if (autoneg == AUTONEG_ENABLE)
1641 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1642 else {
1643 netif_warn(tp, link, dev,
1644 "incorrect speed setting refused in TBI mode\n");
1645 ret = -EOPNOTSUPP;
1646 }
1647
1648 return ret;
1649}
1650
1651static int rtl8169_set_speed_xmii(struct net_device *dev,
1652 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1653{
1654 struct rtl8169_private *tp = netdev_priv(dev);
1655 int giga_ctrl, bmcr;
1656 int rc = -EINVAL;
1657
1658 rtl_writephy(tp, 0x1f, 0x0000);
1659
1660 if (autoneg == AUTONEG_ENABLE) {
1661 int auto_nego;
1662
1663 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1664 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1665 ADVERTISE_100HALF | ADVERTISE_100FULL);
1666
1667 if (adv & ADVERTISED_10baseT_Half)
1668 auto_nego |= ADVERTISE_10HALF;
1669 if (adv & ADVERTISED_10baseT_Full)
1670 auto_nego |= ADVERTISE_10FULL;
1671 if (adv & ADVERTISED_100baseT_Half)
1672 auto_nego |= ADVERTISE_100HALF;
1673 if (adv & ADVERTISED_100baseT_Full)
1674 auto_nego |= ADVERTISE_100FULL;
1675
1676 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1677
1678 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1679 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1680
1681 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1682 if (tp->mii.supports_gmii) {
1683 if (adv & ADVERTISED_1000baseT_Half)
1684 giga_ctrl |= ADVERTISE_1000HALF;
1685 if (adv & ADVERTISED_1000baseT_Full)
1686 giga_ctrl |= ADVERTISE_1000FULL;
1687 } else if (adv & (ADVERTISED_1000baseT_Half |
1688 ADVERTISED_1000baseT_Full)) {
1689 netif_info(tp, link, dev,
1690 "PHY does not support 1000Mbps\n");
1691 goto out;
1692 }
1693
1694 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1695
1696 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1697 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1698 } else {
1699 giga_ctrl = 0;
1700
1701 if (speed == SPEED_10)
1702 bmcr = 0;
1703 else if (speed == SPEED_100)
1704 bmcr = BMCR_SPEED100;
1705 else
1706 goto out;
1707
1708 if (duplex == DUPLEX_FULL)
1709 bmcr |= BMCR_FULLDPLX;
1710 }
1711
1712 rtl_writephy(tp, MII_BMCR, bmcr);
1713
1714 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1715 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1716 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1717 rtl_writephy(tp, 0x17, 0x2138);
1718 rtl_writephy(tp, 0x0e, 0x0260);
1719 } else {
1720 rtl_writephy(tp, 0x17, 0x2108);
1721 rtl_writephy(tp, 0x0e, 0x0000);
1722 }
1723 }
1724
1725 rc = 0;
1726out:
1727 return rc;
1728}
1729
1730static int rtl8169_set_speed(struct net_device *dev,
1731 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1732{
1733 struct rtl8169_private *tp = netdev_priv(dev);
1734 int ret;
1735
1736 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1737 if (ret < 0)
1738 goto out;
1739
1740 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1741 (advertising & ADVERTISED_1000baseT_Full)) {
1742 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1743 }
1744out:
1745 return ret;
1746}
1747
1748static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1749{
1750 struct rtl8169_private *tp = netdev_priv(dev);
1751 int ret;
1752
1753 del_timer_sync(&tp->timer);
1754
1755 rtl_lock_work(tp);
1756 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1757 cmd->duplex, cmd->advertising);
1758 rtl_unlock_work(tp);
1759
1760 return ret;
1761}
1762
1763static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1764 netdev_features_t features)
1765{
1766 struct rtl8169_private *tp = netdev_priv(dev);
1767
1768 if (dev->mtu > TD_MSS_MAX)
1769 features &= ~NETIF_F_ALL_TSO;
1770
1771 if (dev->mtu > JUMBO_1K &&
1772 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1773 features &= ~NETIF_F_IP_CSUM;
1774
1775 return features;
1776}
1777
1778static void __rtl8169_set_features(struct net_device *dev,
1779 netdev_features_t features)
1780{
1781 struct rtl8169_private *tp = netdev_priv(dev);
1782 netdev_features_t changed = features ^ dev->features;
1783 void __iomem *ioaddr = tp->mmio_addr;
1784
1785 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1786 return;
1787
1788 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1789 if (features & NETIF_F_RXCSUM)
1790 tp->cp_cmd |= RxChkSum;
1791 else
1792 tp->cp_cmd &= ~RxChkSum;
1793
1794 if (dev->features & NETIF_F_HW_VLAN_RX)
1795 tp->cp_cmd |= RxVlan;
1796 else
1797 tp->cp_cmd &= ~RxVlan;
1798
1799 RTL_W16(CPlusCmd, tp->cp_cmd);
1800 RTL_R16(CPlusCmd);
1801 }
1802 if (changed & NETIF_F_RXALL) {
1803 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1804 if (features & NETIF_F_RXALL)
1805 tmp |= (AcceptErr | AcceptRunt);
1806 RTL_W32(RxConfig, tmp);
1807 }
1808}
1809
1810static int rtl8169_set_features(struct net_device *dev,
1811 netdev_features_t features)
1812{
1813 struct rtl8169_private *tp = netdev_priv(dev);
1814
1815 rtl_lock_work(tp);
1816 __rtl8169_set_features(dev, features);
1817 rtl_unlock_work(tp);
1818
1819 return 0;
1820}
1821
1822
1823static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1824 struct sk_buff *skb)
1825{
1826 return (vlan_tx_tag_present(skb)) ?
1827 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1828}
1829
1830static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1831{
1832 u32 opts2 = le32_to_cpu(desc->opts2);
1833
1834 if (opts2 & RxVlanTag)
1835 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1836
1837 desc->opts2 = 0;
1838}
1839
1840static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1841{
1842 struct rtl8169_private *tp = netdev_priv(dev);
1843 void __iomem *ioaddr = tp->mmio_addr;
1844 u32 status;
1845
1846 cmd->supported =
1847 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1848 cmd->port = PORT_FIBRE;
1849 cmd->transceiver = XCVR_INTERNAL;
1850
1851 status = RTL_R32(TBICSR);
1852 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1853 cmd->autoneg = !!(status & TBINwEnable);
1854
1855 ethtool_cmd_speed_set(cmd, SPEED_1000);
1856 cmd->duplex = DUPLEX_FULL; /* Always set */
1857
1858 return 0;
1859}
1860
1861static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1862{
1863 struct rtl8169_private *tp = netdev_priv(dev);
1864
1865 return mii_ethtool_gset(&tp->mii, cmd);
1866}
1867
1868static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1869{
1870 struct rtl8169_private *tp = netdev_priv(dev);
1871 int rc;
1872
1873 rtl_lock_work(tp);
1874 rc = tp->get_settings(dev, cmd);
1875 rtl_unlock_work(tp);
1876
1877 return rc;
1878}
1879
1880static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1881 void *p)
1882{
1883 struct rtl8169_private *tp = netdev_priv(dev);
1884
1885 if (regs->len > R8169_REGS_SIZE)
1886 regs->len = R8169_REGS_SIZE;
1887
1888 rtl_lock_work(tp);
1889 memcpy_fromio(p, tp->mmio_addr, regs->len);
1890 rtl_unlock_work(tp);
1891}
1892
1893static u32 rtl8169_get_msglevel(struct net_device *dev)
1894{
1895 struct rtl8169_private *tp = netdev_priv(dev);
1896
1897 return tp->msg_enable;
1898}
1899
1900static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1901{
1902 struct rtl8169_private *tp = netdev_priv(dev);
1903
1904 tp->msg_enable = value;
1905}
1906
1907static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1908 "tx_packets",
1909 "rx_packets",
1910 "tx_errors",
1911 "rx_errors",
1912 "rx_missed",
1913 "align_errors",
1914 "tx_single_collisions",
1915 "tx_multi_collisions",
1916 "unicast",
1917 "broadcast",
1918 "multicast",
1919 "tx_aborted",
1920 "tx_underrun",
1921};
1922
1923static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1924{
1925 switch (sset) {
1926 case ETH_SS_STATS:
1927 return ARRAY_SIZE(rtl8169_gstrings);
1928 default:
1929 return -EOPNOTSUPP;
1930 }
1931}
1932
1933DECLARE_RTL_COND(rtl_counters_cond)
1934{
1935 void __iomem *ioaddr = tp->mmio_addr;
1936
1937 return RTL_R32(CounterAddrLow) & CounterDump;
1938}
1939
1940static void rtl8169_update_counters(struct net_device *dev)
1941{
1942 struct rtl8169_private *tp = netdev_priv(dev);
1943 void __iomem *ioaddr = tp->mmio_addr;
1944 struct device *d = &tp->pci_dev->dev;
1945 struct rtl8169_counters *counters;
1946 dma_addr_t paddr;
1947 u32 cmd;
1948
1949 /*
1950 * Some chips are unable to dump tally counters when the receiver
1951 * is disabled.
1952 */
1953 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1954 return;
1955
1956 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1957 if (!counters)
1958 return;
1959
1960 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1961 cmd = (u64)paddr & DMA_BIT_MASK(32);
1962 RTL_W32(CounterAddrLow, cmd);
1963 RTL_W32(CounterAddrLow, cmd | CounterDump);
1964
1965 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1966 memcpy(&tp->counters, counters, sizeof(*counters));
1967
1968 RTL_W32(CounterAddrLow, 0);
1969 RTL_W32(CounterAddrHigh, 0);
1970
1971 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1972}
1973
1974static void rtl8169_get_ethtool_stats(struct net_device *dev,
1975 struct ethtool_stats *stats, u64 *data)
1976{
1977 struct rtl8169_private *tp = netdev_priv(dev);
1978
1979 ASSERT_RTNL();
1980
1981 rtl8169_update_counters(dev);
1982
1983 data[0] = le64_to_cpu(tp->counters.tx_packets);
1984 data[1] = le64_to_cpu(tp->counters.rx_packets);
1985 data[2] = le64_to_cpu(tp->counters.tx_errors);
1986 data[3] = le32_to_cpu(tp->counters.rx_errors);
1987 data[4] = le16_to_cpu(tp->counters.rx_missed);
1988 data[5] = le16_to_cpu(tp->counters.align_errors);
1989 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1990 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1991 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1992 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1993 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1994 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1995 data[12] = le16_to_cpu(tp->counters.tx_underun);
1996}
1997
1998static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1999{
2000 switch(stringset) {
2001 case ETH_SS_STATS:
2002 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
2003 break;
2004 }
2005}
2006
2007static const struct ethtool_ops rtl8169_ethtool_ops = {
2008 .get_drvinfo = rtl8169_get_drvinfo,
2009 .get_regs_len = rtl8169_get_regs_len,
2010 .get_link = ethtool_op_get_link,
2011 .get_settings = rtl8169_get_settings,
2012 .set_settings = rtl8169_set_settings,
2013 .get_msglevel = rtl8169_get_msglevel,
2014 .set_msglevel = rtl8169_set_msglevel,
2015 .get_regs = rtl8169_get_regs,
2016 .get_wol = rtl8169_get_wol,
2017 .set_wol = rtl8169_set_wol,
2018 .get_strings = rtl8169_get_strings,
2019 .get_sset_count = rtl8169_get_sset_count,
2020 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2021 .get_ts_info = ethtool_op_get_ts_info,
2022};
2023
2024static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2025 struct net_device *dev, u8 default_version)
2026{
2027 void __iomem *ioaddr = tp->mmio_addr;
2028 /*
2029 * The driver currently handles the 8168Bf and the 8168Be identically
2030 * but they can be identified more specifically through the test below
2031 * if needed:
2032 *
2033 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2034 *
2035 * Same thing for the 8101Eb and the 8101Ec:
2036 *
2037 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2038 */
2039 static const struct rtl_mac_info {
2040 u32 mask;
2041 u32 val;
2042 int mac_version;
2043 } mac_info[] = {
2044 /* 8168G family. */
2045 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2046 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2047
2048 /* 8168F family. */
2049 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2050 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2051 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2052
2053 /* 8168E family. */
2054 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2055 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2056 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2057 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2058
2059 /* 8168D family. */
2060 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2061 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2062 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2063
2064 /* 8168DP family. */
2065 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2066 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2067 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2068
2069 /* 8168C family. */
2070 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2071 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2072 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2073 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2074 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2075 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2076 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2077 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2078 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2079
2080 /* 8168B family. */
2081 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2082 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2083 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2084 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2085
2086 /* 8101 family. */
2087 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2088 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2089 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2090 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2091 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2092 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2093 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2094 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2095 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2096 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2097 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2098 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2099 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2100 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2101 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2102 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2103 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2104 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2105 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2106 /* FIXME: where did these entries come from ? -- FR */
2107 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2108 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2109
2110 /* 8110 family. */
2111 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2112 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2113 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2114 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2115 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2116 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2117
2118 /* Catch-all */
2119 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2120 };
2121 const struct rtl_mac_info *p = mac_info;
2122 u32 reg;
2123
2124 reg = RTL_R32(TxConfig);
2125 while ((reg & p->mask) != p->val)
2126 p++;
2127 tp->mac_version = p->mac_version;
2128
2129 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2130 netif_notice(tp, probe, dev,
2131 "unknown MAC, using family default\n");
2132 tp->mac_version = default_version;
2133 }
2134}
2135
2136static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2137{
2138 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2139}
2140
2141struct phy_reg {
2142 u16 reg;
2143 u16 val;
2144};
2145
2146static void rtl_writephy_batch(struct rtl8169_private *tp,
2147 const struct phy_reg *regs, int len)
2148{
2149 while (len-- > 0) {
2150 rtl_writephy(tp, regs->reg, regs->val);
2151 regs++;
2152 }
2153}
2154
2155#define PHY_READ 0x00000000
2156#define PHY_DATA_OR 0x10000000
2157#define PHY_DATA_AND 0x20000000
2158#define PHY_BJMPN 0x30000000
2159#define PHY_READ_EFUSE 0x40000000
2160#define PHY_READ_MAC_BYTE 0x50000000
2161#define PHY_WRITE_MAC_BYTE 0x60000000
2162#define PHY_CLEAR_READCOUNT 0x70000000
2163#define PHY_WRITE 0x80000000
2164#define PHY_READCOUNT_EQ_SKIP 0x90000000
2165#define PHY_COMP_EQ_SKIPN 0xa0000000
2166#define PHY_COMP_NEQ_SKIPN 0xb0000000
2167#define PHY_WRITE_PREVIOUS 0xc0000000
2168#define PHY_SKIPN 0xd0000000
2169#define PHY_DELAY_MS 0xe0000000
2170#define PHY_WRITE_ERI_WORD 0xf0000000
2171
2172struct fw_info {
2173 u32 magic;
2174 char version[RTL_VER_SIZE];
2175 __le32 fw_start;
2176 __le32 fw_len;
2177 u8 chksum;
2178} __packed;
2179
2180#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2181
2182static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2183{
2184 const struct firmware *fw = rtl_fw->fw;
2185 struct fw_info *fw_info = (struct fw_info *)fw->data;
2186 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2187 char *version = rtl_fw->version;
2188 bool rc = false;
2189
2190 if (fw->size < FW_OPCODE_SIZE)
2191 goto out;
2192
2193 if (!fw_info->magic) {
2194 size_t i, size, start;
2195 u8 checksum = 0;
2196
2197 if (fw->size < sizeof(*fw_info))
2198 goto out;
2199
2200 for (i = 0; i < fw->size; i++)
2201 checksum += fw->data[i];
2202 if (checksum != 0)
2203 goto out;
2204
2205 start = le32_to_cpu(fw_info->fw_start);
2206 if (start > fw->size)
2207 goto out;
2208
2209 size = le32_to_cpu(fw_info->fw_len);
2210 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2211 goto out;
2212
2213 memcpy(version, fw_info->version, RTL_VER_SIZE);
2214
2215 pa->code = (__le32 *)(fw->data + start);
2216 pa->size = size;
2217 } else {
2218 if (fw->size % FW_OPCODE_SIZE)
2219 goto out;
2220
2221 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2222
2223 pa->code = (__le32 *)fw->data;
2224 pa->size = fw->size / FW_OPCODE_SIZE;
2225 }
2226 version[RTL_VER_SIZE - 1] = 0;
2227
2228 rc = true;
2229out:
2230 return rc;
2231}
2232
2233static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2234 struct rtl_fw_phy_action *pa)
2235{
2236 bool rc = false;
2237 size_t index;
2238
2239 for (index = 0; index < pa->size; index++) {
2240 u32 action = le32_to_cpu(pa->code[index]);
2241 u32 regno = (action & 0x0fff0000) >> 16;
2242
2243 switch(action & 0xf0000000) {
2244 case PHY_READ:
2245 case PHY_DATA_OR:
2246 case PHY_DATA_AND:
2247 case PHY_READ_EFUSE:
2248 case PHY_CLEAR_READCOUNT:
2249 case PHY_WRITE:
2250 case PHY_WRITE_PREVIOUS:
2251 case PHY_DELAY_MS:
2252 break;
2253
2254 case PHY_BJMPN:
2255 if (regno > index) {
2256 netif_err(tp, ifup, tp->dev,
2257 "Out of range of firmware\n");
2258 goto out;
2259 }
2260 break;
2261 case PHY_READCOUNT_EQ_SKIP:
2262 if (index + 2 >= pa->size) {
2263 netif_err(tp, ifup, tp->dev,
2264 "Out of range of firmware\n");
2265 goto out;
2266 }
2267 break;
2268 case PHY_COMP_EQ_SKIPN:
2269 case PHY_COMP_NEQ_SKIPN:
2270 case PHY_SKIPN:
2271 if (index + 1 + regno >= pa->size) {
2272 netif_err(tp, ifup, tp->dev,
2273 "Out of range of firmware\n");
2274 goto out;
2275 }
2276 break;
2277
2278 case PHY_READ_MAC_BYTE:
2279 case PHY_WRITE_MAC_BYTE:
2280 case PHY_WRITE_ERI_WORD:
2281 default:
2282 netif_err(tp, ifup, tp->dev,
2283 "Invalid action 0x%08x\n", action);
2284 goto out;
2285 }
2286 }
2287 rc = true;
2288out:
2289 return rc;
2290}
2291
2292static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2293{
2294 struct net_device *dev = tp->dev;
2295 int rc = -EINVAL;
2296
2297 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2298 netif_err(tp, ifup, dev, "invalid firwmare\n");
2299 goto out;
2300 }
2301
2302 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2303 rc = 0;
2304out:
2305 return rc;
2306}
2307
2308static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2309{
2310 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2311 u32 predata, count;
2312 size_t index;
2313
2314 predata = count = 0;
2315
2316 for (index = 0; index < pa->size; ) {
2317 u32 action = le32_to_cpu(pa->code[index]);
2318 u32 data = action & 0x0000ffff;
2319 u32 regno = (action & 0x0fff0000) >> 16;
2320
2321 if (!action)
2322 break;
2323
2324 switch(action & 0xf0000000) {
2325 case PHY_READ:
2326 predata = rtl_readphy(tp, regno);
2327 count++;
2328 index++;
2329 break;
2330 case PHY_DATA_OR:
2331 predata |= data;
2332 index++;
2333 break;
2334 case PHY_DATA_AND:
2335 predata &= data;
2336 index++;
2337 break;
2338 case PHY_BJMPN:
2339 index -= regno;
2340 break;
2341 case PHY_READ_EFUSE:
2342 predata = rtl8168d_efuse_read(tp, regno);
2343 index++;
2344 break;
2345 case PHY_CLEAR_READCOUNT:
2346 count = 0;
2347 index++;
2348 break;
2349 case PHY_WRITE:
2350 rtl_writephy(tp, regno, data);
2351 index++;
2352 break;
2353 case PHY_READCOUNT_EQ_SKIP:
2354 index += (count == data) ? 2 : 1;
2355 break;
2356 case PHY_COMP_EQ_SKIPN:
2357 if (predata == data)
2358 index += regno;
2359 index++;
2360 break;
2361 case PHY_COMP_NEQ_SKIPN:
2362 if (predata != data)
2363 index += regno;
2364 index++;
2365 break;
2366 case PHY_WRITE_PREVIOUS:
2367 rtl_writephy(tp, regno, predata);
2368 index++;
2369 break;
2370 case PHY_SKIPN:
2371 index += regno + 1;
2372 break;
2373 case PHY_DELAY_MS:
2374 mdelay(data);
2375 index++;
2376 break;
2377
2378 case PHY_READ_MAC_BYTE:
2379 case PHY_WRITE_MAC_BYTE:
2380 case PHY_WRITE_ERI_WORD:
2381 default:
2382 BUG();
2383 }
2384 }
2385}
2386
2387static void rtl_release_firmware(struct rtl8169_private *tp)
2388{
2389 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2390 release_firmware(tp->rtl_fw->fw);
2391 kfree(tp->rtl_fw);
2392 }
2393 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2394}
2395
2396static void rtl_apply_firmware(struct rtl8169_private *tp)
2397{
2398 struct rtl_fw *rtl_fw = tp->rtl_fw;
2399
2400 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2401 if (!IS_ERR_OR_NULL(rtl_fw))
2402 rtl_phy_write_fw(tp, rtl_fw);
2403}
2404
2405static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2406{
2407 if (rtl_readphy(tp, reg) != val)
2408 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2409 else
2410 rtl_apply_firmware(tp);
2411}
2412
2413static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2414{
2415 static const struct phy_reg phy_reg_init[] = {
2416 { 0x1f, 0x0001 },
2417 { 0x06, 0x006e },
2418 { 0x08, 0x0708 },
2419 { 0x15, 0x4000 },
2420 { 0x18, 0x65c7 },
2421
2422 { 0x1f, 0x0001 },
2423 { 0x03, 0x00a1 },
2424 { 0x02, 0x0008 },
2425 { 0x01, 0x0120 },
2426 { 0x00, 0x1000 },
2427 { 0x04, 0x0800 },
2428 { 0x04, 0x0000 },
2429
2430 { 0x03, 0xff41 },
2431 { 0x02, 0xdf60 },
2432 { 0x01, 0x0140 },
2433 { 0x00, 0x0077 },
2434 { 0x04, 0x7800 },
2435 { 0x04, 0x7000 },
2436
2437 { 0x03, 0x802f },
2438 { 0x02, 0x4f02 },
2439 { 0x01, 0x0409 },
2440 { 0x00, 0xf0f9 },
2441 { 0x04, 0x9800 },
2442 { 0x04, 0x9000 },
2443
2444 { 0x03, 0xdf01 },
2445 { 0x02, 0xdf20 },
2446 { 0x01, 0xff95 },
2447 { 0x00, 0xba00 },
2448 { 0x04, 0xa800 },
2449 { 0x04, 0xa000 },
2450
2451 { 0x03, 0xff41 },
2452 { 0x02, 0xdf20 },
2453 { 0x01, 0x0140 },
2454 { 0x00, 0x00bb },
2455 { 0x04, 0xb800 },
2456 { 0x04, 0xb000 },
2457
2458 { 0x03, 0xdf41 },
2459 { 0x02, 0xdc60 },
2460 { 0x01, 0x6340 },
2461 { 0x00, 0x007d },
2462 { 0x04, 0xd800 },
2463 { 0x04, 0xd000 },
2464
2465 { 0x03, 0xdf01 },
2466 { 0x02, 0xdf20 },
2467 { 0x01, 0x100a },
2468 { 0x00, 0xa0ff },
2469 { 0x04, 0xf800 },
2470 { 0x04, 0xf000 },
2471
2472 { 0x1f, 0x0000 },
2473 { 0x0b, 0x0000 },
2474 { 0x00, 0x9200 }
2475 };
2476
2477 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2478}
2479
2480static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2481{
2482 static const struct phy_reg phy_reg_init[] = {
2483 { 0x1f, 0x0002 },
2484 { 0x01, 0x90d0 },
2485 { 0x1f, 0x0000 }
2486 };
2487
2488 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2489}
2490
2491static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2492{
2493 struct pci_dev *pdev = tp->pci_dev;
2494
2495 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2496 (pdev->subsystem_device != 0xe000))
2497 return;
2498
2499 rtl_writephy(tp, 0x1f, 0x0001);
2500 rtl_writephy(tp, 0x10, 0xf01b);
2501 rtl_writephy(tp, 0x1f, 0x0000);
2502}
2503
2504static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2505{
2506 static const struct phy_reg phy_reg_init[] = {
2507 { 0x1f, 0x0001 },
2508 { 0x04, 0x0000 },
2509 { 0x03, 0x00a1 },
2510 { 0x02, 0x0008 },
2511 { 0x01, 0x0120 },
2512 { 0x00, 0x1000 },
2513 { 0x04, 0x0800 },
2514 { 0x04, 0x9000 },
2515 { 0x03, 0x802f },
2516 { 0x02, 0x4f02 },
2517 { 0x01, 0x0409 },
2518 { 0x00, 0xf099 },
2519 { 0x04, 0x9800 },
2520 { 0x04, 0xa000 },
2521 { 0x03, 0xdf01 },
2522 { 0x02, 0xdf20 },
2523 { 0x01, 0xff95 },
2524 { 0x00, 0xba00 },
2525 { 0x04, 0xa800 },
2526 { 0x04, 0xf000 },
2527 { 0x03, 0xdf01 },
2528 { 0x02, 0xdf20 },
2529 { 0x01, 0x101a },
2530 { 0x00, 0xa0ff },
2531 { 0x04, 0xf800 },
2532 { 0x04, 0x0000 },
2533 { 0x1f, 0x0000 },
2534
2535 { 0x1f, 0x0001 },
2536 { 0x10, 0xf41b },
2537 { 0x14, 0xfb54 },
2538 { 0x18, 0xf5c7 },
2539 { 0x1f, 0x0000 },
2540
2541 { 0x1f, 0x0001 },
2542 { 0x17, 0x0cc0 },
2543 { 0x1f, 0x0000 }
2544 };
2545
2546 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2547
2548 rtl8169scd_hw_phy_config_quirk(tp);
2549}
2550
2551static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2552{
2553 static const struct phy_reg phy_reg_init[] = {
2554 { 0x1f, 0x0001 },
2555 { 0x04, 0x0000 },
2556 { 0x03, 0x00a1 },
2557 { 0x02, 0x0008 },
2558 { 0x01, 0x0120 },
2559 { 0x00, 0x1000 },
2560 { 0x04, 0x0800 },
2561 { 0x04, 0x9000 },
2562 { 0x03, 0x802f },
2563 { 0x02, 0x4f02 },
2564 { 0x01, 0x0409 },
2565 { 0x00, 0xf099 },
2566 { 0x04, 0x9800 },
2567 { 0x04, 0xa000 },
2568 { 0x03, 0xdf01 },
2569 { 0x02, 0xdf20 },
2570 { 0x01, 0xff95 },
2571 { 0x00, 0xba00 },
2572 { 0x04, 0xa800 },
2573 { 0x04, 0xf000 },
2574 { 0x03, 0xdf01 },
2575 { 0x02, 0xdf20 },
2576 { 0x01, 0x101a },
2577 { 0x00, 0xa0ff },
2578 { 0x04, 0xf800 },
2579 { 0x04, 0x0000 },
2580 { 0x1f, 0x0000 },
2581
2582 { 0x1f, 0x0001 },
2583 { 0x0b, 0x8480 },
2584 { 0x1f, 0x0000 },
2585
2586 { 0x1f, 0x0001 },
2587 { 0x18, 0x67c7 },
2588 { 0x04, 0x2000 },
2589 { 0x03, 0x002f },
2590 { 0x02, 0x4360 },
2591 { 0x01, 0x0109 },
2592 { 0x00, 0x3022 },
2593 { 0x04, 0x2800 },
2594 { 0x1f, 0x0000 },
2595
2596 { 0x1f, 0x0001 },
2597 { 0x17, 0x0cc0 },
2598 { 0x1f, 0x0000 }
2599 };
2600
2601 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2602}
2603
2604static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2605{
2606 static const struct phy_reg phy_reg_init[] = {
2607 { 0x10, 0xf41b },
2608 { 0x1f, 0x0000 }
2609 };
2610
2611 rtl_writephy(tp, 0x1f, 0x0001);
2612 rtl_patchphy(tp, 0x16, 1 << 0);
2613
2614 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2615}
2616
2617static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2618{
2619 static const struct phy_reg phy_reg_init[] = {
2620 { 0x1f, 0x0001 },
2621 { 0x10, 0xf41b },
2622 { 0x1f, 0x0000 }
2623 };
2624
2625 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2626}
2627
2628static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2629{
2630 static const struct phy_reg phy_reg_init[] = {
2631 { 0x1f, 0x0000 },
2632 { 0x1d, 0x0f00 },
2633 { 0x1f, 0x0002 },
2634 { 0x0c, 0x1ec8 },
2635 { 0x1f, 0x0000 }
2636 };
2637
2638 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2639}
2640
2641static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2642{
2643 static const struct phy_reg phy_reg_init[] = {
2644 { 0x1f, 0x0001 },
2645 { 0x1d, 0x3d98 },
2646 { 0x1f, 0x0000 }
2647 };
2648
2649 rtl_writephy(tp, 0x1f, 0x0000);
2650 rtl_patchphy(tp, 0x14, 1 << 5);
2651 rtl_patchphy(tp, 0x0d, 1 << 5);
2652
2653 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2654}
2655
2656static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2657{
2658 static const struct phy_reg phy_reg_init[] = {
2659 { 0x1f, 0x0001 },
2660 { 0x12, 0x2300 },
2661 { 0x1f, 0x0002 },
2662 { 0x00, 0x88d4 },
2663 { 0x01, 0x82b1 },
2664 { 0x03, 0x7002 },
2665 { 0x08, 0x9e30 },
2666 { 0x09, 0x01f0 },
2667 { 0x0a, 0x5500 },
2668 { 0x0c, 0x00c8 },
2669 { 0x1f, 0x0003 },
2670 { 0x12, 0xc096 },
2671 { 0x16, 0x000a },
2672 { 0x1f, 0x0000 },
2673 { 0x1f, 0x0000 },
2674 { 0x09, 0x2000 },
2675 { 0x09, 0x0000 }
2676 };
2677
2678 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2679
2680 rtl_patchphy(tp, 0x14, 1 << 5);
2681 rtl_patchphy(tp, 0x0d, 1 << 5);
2682 rtl_writephy(tp, 0x1f, 0x0000);
2683}
2684
2685static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2686{
2687 static const struct phy_reg phy_reg_init[] = {
2688 { 0x1f, 0x0001 },
2689 { 0x12, 0x2300 },
2690 { 0x03, 0x802f },
2691 { 0x02, 0x4f02 },
2692 { 0x01, 0x0409 },
2693 { 0x00, 0xf099 },
2694 { 0x04, 0x9800 },
2695 { 0x04, 0x9000 },
2696 { 0x1d, 0x3d98 },
2697 { 0x1f, 0x0002 },
2698 { 0x0c, 0x7eb8 },
2699 { 0x06, 0x0761 },
2700 { 0x1f, 0x0003 },
2701 { 0x16, 0x0f0a },
2702 { 0x1f, 0x0000 }
2703 };
2704
2705 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2706
2707 rtl_patchphy(tp, 0x16, 1 << 0);
2708 rtl_patchphy(tp, 0x14, 1 << 5);
2709 rtl_patchphy(tp, 0x0d, 1 << 5);
2710 rtl_writephy(tp, 0x1f, 0x0000);
2711}
2712
2713static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2714{
2715 static const struct phy_reg phy_reg_init[] = {
2716 { 0x1f, 0x0001 },
2717 { 0x12, 0x2300 },
2718 { 0x1d, 0x3d98 },
2719 { 0x1f, 0x0002 },
2720 { 0x0c, 0x7eb8 },
2721 { 0x06, 0x5461 },
2722 { 0x1f, 0x0003 },
2723 { 0x16, 0x0f0a },
2724 { 0x1f, 0x0000 }
2725 };
2726
2727 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2728
2729 rtl_patchphy(tp, 0x16, 1 << 0);
2730 rtl_patchphy(tp, 0x14, 1 << 5);
2731 rtl_patchphy(tp, 0x0d, 1 << 5);
2732 rtl_writephy(tp, 0x1f, 0x0000);
2733}
2734
2735static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2736{
2737 rtl8168c_3_hw_phy_config(tp);
2738}
2739
2740static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2741{
2742 static const struct phy_reg phy_reg_init_0[] = {
2743 /* Channel Estimation */
2744 { 0x1f, 0x0001 },
2745 { 0x06, 0x4064 },
2746 { 0x07, 0x2863 },
2747 { 0x08, 0x059c },
2748 { 0x09, 0x26b4 },
2749 { 0x0a, 0x6a19 },
2750 { 0x0b, 0xdcc8 },
2751 { 0x10, 0xf06d },
2752 { 0x14, 0x7f68 },
2753 { 0x18, 0x7fd9 },
2754 { 0x1c, 0xf0ff },
2755 { 0x1d, 0x3d9c },
2756 { 0x1f, 0x0003 },
2757 { 0x12, 0xf49f },
2758 { 0x13, 0x070b },
2759 { 0x1a, 0x05ad },
2760 { 0x14, 0x94c0 },
2761
2762 /*
2763 * Tx Error Issue
2764 * Enhance line driver power
2765 */
2766 { 0x1f, 0x0002 },
2767 { 0x06, 0x5561 },
2768 { 0x1f, 0x0005 },
2769 { 0x05, 0x8332 },
2770 { 0x06, 0x5561 },
2771
2772 /*
2773 * Can not link to 1Gbps with bad cable
2774 * Decrease SNR threshold form 21.07dB to 19.04dB
2775 */
2776 { 0x1f, 0x0001 },
2777 { 0x17, 0x0cc0 },
2778
2779 { 0x1f, 0x0000 },
2780 { 0x0d, 0xf880 }
2781 };
2782
2783 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2784
2785 /*
2786 * Rx Error Issue
2787 * Fine Tune Switching regulator parameter
2788 */
2789 rtl_writephy(tp, 0x1f, 0x0002);
2790 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2791 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2792
2793 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2794 static const struct phy_reg phy_reg_init[] = {
2795 { 0x1f, 0x0002 },
2796 { 0x05, 0x669a },
2797 { 0x1f, 0x0005 },
2798 { 0x05, 0x8330 },
2799 { 0x06, 0x669a },
2800 { 0x1f, 0x0002 }
2801 };
2802 int val;
2803
2804 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2805
2806 val = rtl_readphy(tp, 0x0d);
2807
2808 if ((val & 0x00ff) != 0x006c) {
2809 static const u32 set[] = {
2810 0x0065, 0x0066, 0x0067, 0x0068,
2811 0x0069, 0x006a, 0x006b, 0x006c
2812 };
2813 int i;
2814
2815 rtl_writephy(tp, 0x1f, 0x0002);
2816
2817 val &= 0xff00;
2818 for (i = 0; i < ARRAY_SIZE(set); i++)
2819 rtl_writephy(tp, 0x0d, val | set[i]);
2820 }
2821 } else {
2822 static const struct phy_reg phy_reg_init[] = {
2823 { 0x1f, 0x0002 },
2824 { 0x05, 0x6662 },
2825 { 0x1f, 0x0005 },
2826 { 0x05, 0x8330 },
2827 { 0x06, 0x6662 }
2828 };
2829
2830 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2831 }
2832
2833 /* RSET couple improve */
2834 rtl_writephy(tp, 0x1f, 0x0002);
2835 rtl_patchphy(tp, 0x0d, 0x0300);
2836 rtl_patchphy(tp, 0x0f, 0x0010);
2837
2838 /* Fine tune PLL performance */
2839 rtl_writephy(tp, 0x1f, 0x0002);
2840 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2841 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2842
2843 rtl_writephy(tp, 0x1f, 0x0005);
2844 rtl_writephy(tp, 0x05, 0x001b);
2845
2846 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2847
2848 rtl_writephy(tp, 0x1f, 0x0000);
2849}
2850
2851static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2852{
2853 static const struct phy_reg phy_reg_init_0[] = {
2854 /* Channel Estimation */
2855 { 0x1f, 0x0001 },
2856 { 0x06, 0x4064 },
2857 { 0x07, 0x2863 },
2858 { 0x08, 0x059c },
2859 { 0x09, 0x26b4 },
2860 { 0x0a, 0x6a19 },
2861 { 0x0b, 0xdcc8 },
2862 { 0x10, 0xf06d },
2863 { 0x14, 0x7f68 },
2864 { 0x18, 0x7fd9 },
2865 { 0x1c, 0xf0ff },
2866 { 0x1d, 0x3d9c },
2867 { 0x1f, 0x0003 },
2868 { 0x12, 0xf49f },
2869 { 0x13, 0x070b },
2870 { 0x1a, 0x05ad },
2871 { 0x14, 0x94c0 },
2872
2873 /*
2874 * Tx Error Issue
2875 * Enhance line driver power
2876 */
2877 { 0x1f, 0x0002 },
2878 { 0x06, 0x5561 },
2879 { 0x1f, 0x0005 },
2880 { 0x05, 0x8332 },
2881 { 0x06, 0x5561 },
2882
2883 /*
2884 * Can not link to 1Gbps with bad cable
2885 * Decrease SNR threshold form 21.07dB to 19.04dB
2886 */
2887 { 0x1f, 0x0001 },
2888 { 0x17, 0x0cc0 },
2889
2890 { 0x1f, 0x0000 },
2891 { 0x0d, 0xf880 }
2892 };
2893
2894 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2895
2896 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2897 static const struct phy_reg phy_reg_init[] = {
2898 { 0x1f, 0x0002 },
2899 { 0x05, 0x669a },
2900 { 0x1f, 0x0005 },
2901 { 0x05, 0x8330 },
2902 { 0x06, 0x669a },
2903
2904 { 0x1f, 0x0002 }
2905 };
2906 int val;
2907
2908 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2909
2910 val = rtl_readphy(tp, 0x0d);
2911 if ((val & 0x00ff) != 0x006c) {
2912 static const u32 set[] = {
2913 0x0065, 0x0066, 0x0067, 0x0068,
2914 0x0069, 0x006a, 0x006b, 0x006c
2915 };
2916 int i;
2917
2918 rtl_writephy(tp, 0x1f, 0x0002);
2919
2920 val &= 0xff00;
2921 for (i = 0; i < ARRAY_SIZE(set); i++)
2922 rtl_writephy(tp, 0x0d, val | set[i]);
2923 }
2924 } else {
2925 static const struct phy_reg phy_reg_init[] = {
2926 { 0x1f, 0x0002 },
2927 { 0x05, 0x2642 },
2928 { 0x1f, 0x0005 },
2929 { 0x05, 0x8330 },
2930 { 0x06, 0x2642 }
2931 };
2932
2933 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2934 }
2935
2936 /* Fine tune PLL performance */
2937 rtl_writephy(tp, 0x1f, 0x0002);
2938 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2939 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2940
2941 /* Switching regulator Slew rate */
2942 rtl_writephy(tp, 0x1f, 0x0002);
2943 rtl_patchphy(tp, 0x0f, 0x0017);
2944
2945 rtl_writephy(tp, 0x1f, 0x0005);
2946 rtl_writephy(tp, 0x05, 0x001b);
2947
2948 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2949
2950 rtl_writephy(tp, 0x1f, 0x0000);
2951}
2952
2953static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2954{
2955 static const struct phy_reg phy_reg_init[] = {
2956 { 0x1f, 0x0002 },
2957 { 0x10, 0x0008 },
2958 { 0x0d, 0x006c },
2959
2960 { 0x1f, 0x0000 },
2961 { 0x0d, 0xf880 },
2962
2963 { 0x1f, 0x0001 },
2964 { 0x17, 0x0cc0 },
2965
2966 { 0x1f, 0x0001 },
2967 { 0x0b, 0xa4d8 },
2968 { 0x09, 0x281c },
2969 { 0x07, 0x2883 },
2970 { 0x0a, 0x6b35 },
2971 { 0x1d, 0x3da4 },
2972 { 0x1c, 0xeffd },
2973 { 0x14, 0x7f52 },
2974 { 0x18, 0x7fc6 },
2975 { 0x08, 0x0601 },
2976 { 0x06, 0x4063 },
2977 { 0x10, 0xf074 },
2978 { 0x1f, 0x0003 },
2979 { 0x13, 0x0789 },
2980 { 0x12, 0xf4bd },
2981 { 0x1a, 0x04fd },
2982 { 0x14, 0x84b0 },
2983 { 0x1f, 0x0000 },
2984 { 0x00, 0x9200 },
2985
2986 { 0x1f, 0x0005 },
2987 { 0x01, 0x0340 },
2988 { 0x1f, 0x0001 },
2989 { 0x04, 0x4000 },
2990 { 0x03, 0x1d21 },
2991 { 0x02, 0x0c32 },
2992 { 0x01, 0x0200 },
2993 { 0x00, 0x5554 },
2994 { 0x04, 0x4800 },
2995 { 0x04, 0x4000 },
2996 { 0x04, 0xf000 },
2997 { 0x03, 0xdf01 },
2998 { 0x02, 0xdf20 },
2999 { 0x01, 0x101a },
3000 { 0x00, 0xa0ff },
3001 { 0x04, 0xf800 },
3002 { 0x04, 0xf000 },
3003 { 0x1f, 0x0000 },
3004
3005 { 0x1f, 0x0007 },
3006 { 0x1e, 0x0023 },
3007 { 0x16, 0x0000 },
3008 { 0x1f, 0x0000 }
3009 };
3010
3011 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3012}
3013
3014static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3015{
3016 static const struct phy_reg phy_reg_init[] = {
3017 { 0x1f, 0x0001 },
3018 { 0x17, 0x0cc0 },
3019
3020 { 0x1f, 0x0007 },
3021 { 0x1e, 0x002d },
3022 { 0x18, 0x0040 },
3023 { 0x1f, 0x0000 }
3024 };
3025
3026 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3027 rtl_patchphy(tp, 0x0d, 1 << 5);
3028}
3029
3030static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3031{
3032 static const struct phy_reg phy_reg_init[] = {
3033 /* Enable Delay cap */
3034 { 0x1f, 0x0005 },
3035 { 0x05, 0x8b80 },
3036 { 0x06, 0xc896 },
3037 { 0x1f, 0x0000 },
3038
3039 /* Channel estimation fine tune */
3040 { 0x1f, 0x0001 },
3041 { 0x0b, 0x6c20 },
3042 { 0x07, 0x2872 },
3043 { 0x1c, 0xefff },
3044 { 0x1f, 0x0003 },
3045 { 0x14, 0x6420 },
3046 { 0x1f, 0x0000 },
3047
3048 /* Update PFM & 10M TX idle timer */
3049 { 0x1f, 0x0007 },
3050 { 0x1e, 0x002f },
3051 { 0x15, 0x1919 },
3052 { 0x1f, 0x0000 },
3053
3054 { 0x1f, 0x0007 },
3055 { 0x1e, 0x00ac },
3056 { 0x18, 0x0006 },
3057 { 0x1f, 0x0000 }
3058 };
3059
3060 rtl_apply_firmware(tp);
3061
3062 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3063
3064 /* DCO enable for 10M IDLE Power */
3065 rtl_writephy(tp, 0x1f, 0x0007);
3066 rtl_writephy(tp, 0x1e, 0x0023);
3067 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3068 rtl_writephy(tp, 0x1f, 0x0000);
3069
3070 /* For impedance matching */
3071 rtl_writephy(tp, 0x1f, 0x0002);
3072 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3073 rtl_writephy(tp, 0x1f, 0x0000);
3074
3075 /* PHY auto speed down */
3076 rtl_writephy(tp, 0x1f, 0x0007);
3077 rtl_writephy(tp, 0x1e, 0x002d);
3078 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3079 rtl_writephy(tp, 0x1f, 0x0000);
3080 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3081
3082 rtl_writephy(tp, 0x1f, 0x0005);
3083 rtl_writephy(tp, 0x05, 0x8b86);
3084 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3085 rtl_writephy(tp, 0x1f, 0x0000);
3086
3087 rtl_writephy(tp, 0x1f, 0x0005);
3088 rtl_writephy(tp, 0x05, 0x8b85);
3089 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3090 rtl_writephy(tp, 0x1f, 0x0007);
3091 rtl_writephy(tp, 0x1e, 0x0020);
3092 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3093 rtl_writephy(tp, 0x1f, 0x0006);
3094 rtl_writephy(tp, 0x00, 0x5a00);
3095 rtl_writephy(tp, 0x1f, 0x0000);
3096 rtl_writephy(tp, 0x0d, 0x0007);
3097 rtl_writephy(tp, 0x0e, 0x003c);
3098 rtl_writephy(tp, 0x0d, 0x4007);
3099 rtl_writephy(tp, 0x0e, 0x0000);
3100 rtl_writephy(tp, 0x0d, 0x0000);
3101}
3102
3103static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3104{
3105 static const struct phy_reg phy_reg_init[] = {
3106 /* Enable Delay cap */
3107 { 0x1f, 0x0004 },
3108 { 0x1f, 0x0007 },
3109 { 0x1e, 0x00ac },
3110 { 0x18, 0x0006 },
3111 { 0x1f, 0x0002 },
3112 { 0x1f, 0x0000 },
3113 { 0x1f, 0x0000 },
3114
3115 /* Channel estimation fine tune */
3116 { 0x1f, 0x0003 },
3117 { 0x09, 0xa20f },
3118 { 0x1f, 0x0000 },
3119 { 0x1f, 0x0000 },
3120
3121 /* Green Setting */
3122 { 0x1f, 0x0005 },
3123 { 0x05, 0x8b5b },
3124 { 0x06, 0x9222 },
3125 { 0x05, 0x8b6d },
3126 { 0x06, 0x8000 },
3127 { 0x05, 0x8b76 },
3128 { 0x06, 0x8000 },
3129 { 0x1f, 0x0000 }
3130 };
3131
3132 rtl_apply_firmware(tp);
3133
3134 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3135
3136 /* For 4-corner performance improve */
3137 rtl_writephy(tp, 0x1f, 0x0005);
3138 rtl_writephy(tp, 0x05, 0x8b80);
3139 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3140 rtl_writephy(tp, 0x1f, 0x0000);
3141
3142 /* PHY auto speed down */
3143 rtl_writephy(tp, 0x1f, 0x0004);
3144 rtl_writephy(tp, 0x1f, 0x0007);
3145 rtl_writephy(tp, 0x1e, 0x002d);
3146 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3147 rtl_writephy(tp, 0x1f, 0x0002);
3148 rtl_writephy(tp, 0x1f, 0x0000);
3149 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3150
3151 /* improve 10M EEE waveform */
3152 rtl_writephy(tp, 0x1f, 0x0005);
3153 rtl_writephy(tp, 0x05, 0x8b86);
3154 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3155 rtl_writephy(tp, 0x1f, 0x0000);
3156
3157 /* Improve 2-pair detection performance */
3158 rtl_writephy(tp, 0x1f, 0x0005);
3159 rtl_writephy(tp, 0x05, 0x8b85);
3160 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3161 rtl_writephy(tp, 0x1f, 0x0000);
3162
3163 /* EEE setting */
3164 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3165 rtl_writephy(tp, 0x1f, 0x0005);
3166 rtl_writephy(tp, 0x05, 0x8b85);
3167 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3168 rtl_writephy(tp, 0x1f, 0x0004);
3169 rtl_writephy(tp, 0x1f, 0x0007);
3170 rtl_writephy(tp, 0x1e, 0x0020);
3171 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3172 rtl_writephy(tp, 0x1f, 0x0002);
3173 rtl_writephy(tp, 0x1f, 0x0000);
3174 rtl_writephy(tp, 0x0d, 0x0007);
3175 rtl_writephy(tp, 0x0e, 0x003c);
3176 rtl_writephy(tp, 0x0d, 0x4007);
3177 rtl_writephy(tp, 0x0e, 0x0000);
3178 rtl_writephy(tp, 0x0d, 0x0000);
3179
3180 /* Green feature */
3181 rtl_writephy(tp, 0x1f, 0x0003);
3182 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3183 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3184 rtl_writephy(tp, 0x1f, 0x0000);
3185}
3186
3187static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3188{
3189 /* For 4-corner performance improve */
3190 rtl_writephy(tp, 0x1f, 0x0005);
3191 rtl_writephy(tp, 0x05, 0x8b80);
3192 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3193 rtl_writephy(tp, 0x1f, 0x0000);
3194
3195 /* PHY auto speed down */
3196 rtl_writephy(tp, 0x1f, 0x0007);
3197 rtl_writephy(tp, 0x1e, 0x002d);
3198 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3199 rtl_writephy(tp, 0x1f, 0x0000);
3200 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3201
3202 /* Improve 10M EEE waveform */
3203 rtl_writephy(tp, 0x1f, 0x0005);
3204 rtl_writephy(tp, 0x05, 0x8b86);
3205 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3206 rtl_writephy(tp, 0x1f, 0x0000);
3207}
3208
3209static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3210{
3211 static const struct phy_reg phy_reg_init[] = {
3212 /* Channel estimation fine tune */
3213 { 0x1f, 0x0003 },
3214 { 0x09, 0xa20f },
3215 { 0x1f, 0x0000 },
3216
3217 /* Modify green table for giga & fnet */
3218 { 0x1f, 0x0005 },
3219 { 0x05, 0x8b55 },
3220 { 0x06, 0x0000 },
3221 { 0x05, 0x8b5e },
3222 { 0x06, 0x0000 },
3223 { 0x05, 0x8b67 },
3224 { 0x06, 0x0000 },
3225 { 0x05, 0x8b70 },
3226 { 0x06, 0x0000 },
3227 { 0x1f, 0x0000 },
3228 { 0x1f, 0x0007 },
3229 { 0x1e, 0x0078 },
3230 { 0x17, 0x0000 },
3231 { 0x19, 0x00fb },
3232 { 0x1f, 0x0000 },
3233
3234 /* Modify green table for 10M */
3235 { 0x1f, 0x0005 },
3236 { 0x05, 0x8b79 },
3237 { 0x06, 0xaa00 },
3238 { 0x1f, 0x0000 },
3239
3240 /* Disable hiimpedance detection (RTCT) */
3241 { 0x1f, 0x0003 },
3242 { 0x01, 0x328a },
3243 { 0x1f, 0x0000 }
3244 };
3245
3246 rtl_apply_firmware(tp);
3247
3248 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3249
3250 rtl8168f_hw_phy_config(tp);
3251
3252 /* Improve 2-pair detection performance */
3253 rtl_writephy(tp, 0x1f, 0x0005);
3254 rtl_writephy(tp, 0x05, 0x8b85);
3255 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3256 rtl_writephy(tp, 0x1f, 0x0000);
3257}
3258
3259static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3260{
3261 rtl_apply_firmware(tp);
3262
3263 rtl8168f_hw_phy_config(tp);
3264}
3265
3266static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3267{
3268 static const struct phy_reg phy_reg_init[] = {
3269 /* Channel estimation fine tune */
3270 { 0x1f, 0x0003 },
3271 { 0x09, 0xa20f },
3272 { 0x1f, 0x0000 },
3273
3274 /* Modify green table for giga & fnet */
3275 { 0x1f, 0x0005 },
3276 { 0x05, 0x8b55 },
3277 { 0x06, 0x0000 },
3278 { 0x05, 0x8b5e },
3279 { 0x06, 0x0000 },
3280 { 0x05, 0x8b67 },
3281 { 0x06, 0x0000 },
3282 { 0x05, 0x8b70 },
3283 { 0x06, 0x0000 },
3284 { 0x1f, 0x0000 },
3285 { 0x1f, 0x0007 },
3286 { 0x1e, 0x0078 },
3287 { 0x17, 0x0000 },
3288 { 0x19, 0x00aa },
3289 { 0x1f, 0x0000 },
3290
3291 /* Modify green table for 10M */
3292 { 0x1f, 0x0005 },
3293 { 0x05, 0x8b79 },
3294 { 0x06, 0xaa00 },
3295 { 0x1f, 0x0000 },
3296
3297 /* Disable hiimpedance detection (RTCT) */
3298 { 0x1f, 0x0003 },
3299 { 0x01, 0x328a },
3300 { 0x1f, 0x0000 }
3301 };
3302
3303
3304 rtl_apply_firmware(tp);
3305
3306 rtl8168f_hw_phy_config(tp);
3307
3308 /* Improve 2-pair detection performance */
3309 rtl_writephy(tp, 0x1f, 0x0005);
3310 rtl_writephy(tp, 0x05, 0x8b85);
3311 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3312 rtl_writephy(tp, 0x1f, 0x0000);
3313
3314 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3315
3316 /* Modify green table for giga */
3317 rtl_writephy(tp, 0x1f, 0x0005);
3318 rtl_writephy(tp, 0x05, 0x8b54);
3319 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3320 rtl_writephy(tp, 0x05, 0x8b5d);
3321 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3322 rtl_writephy(tp, 0x05, 0x8a7c);
3323 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3324 rtl_writephy(tp, 0x05, 0x8a7f);
3325 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3326 rtl_writephy(tp, 0x05, 0x8a82);
3327 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3328 rtl_writephy(tp, 0x05, 0x8a85);
3329 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3330 rtl_writephy(tp, 0x05, 0x8a88);
3331 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3332 rtl_writephy(tp, 0x1f, 0x0000);
3333
3334 /* uc same-seed solution */
3335 rtl_writephy(tp, 0x1f, 0x0005);
3336 rtl_writephy(tp, 0x05, 0x8b85);
3337 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3338 rtl_writephy(tp, 0x1f, 0x0000);
3339
3340 /* eee setting */
3341 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3342 rtl_writephy(tp, 0x1f, 0x0005);
3343 rtl_writephy(tp, 0x05, 0x8b85);
3344 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3345 rtl_writephy(tp, 0x1f, 0x0004);
3346 rtl_writephy(tp, 0x1f, 0x0007);
3347 rtl_writephy(tp, 0x1e, 0x0020);
3348 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3349 rtl_writephy(tp, 0x1f, 0x0000);
3350 rtl_writephy(tp, 0x0d, 0x0007);
3351 rtl_writephy(tp, 0x0e, 0x003c);
3352 rtl_writephy(tp, 0x0d, 0x4007);
3353 rtl_writephy(tp, 0x0e, 0x0000);
3354 rtl_writephy(tp, 0x0d, 0x0000);
3355
3356 /* Green feature */
3357 rtl_writephy(tp, 0x1f, 0x0003);
3358 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3359 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3360 rtl_writephy(tp, 0x1f, 0x0000);
3361}
3362
3363static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3364{
3365 static const u16 mac_ocp_patch[] = {
3366 0xe008, 0xe01b, 0xe01d, 0xe01f,
3367 0xe021, 0xe023, 0xe025, 0xe027,
3368 0x49d2, 0xf10d, 0x766c, 0x49e2,
3369 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3370
3371 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3372 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3373 0xbe00, 0xb416, 0x0076, 0xe86c,
3374 0xc602, 0xbe00, 0x0000, 0xc602,
3375
3376 0xbe00, 0x0000, 0xc602, 0xbe00,
3377 0x0000, 0xc602, 0xbe00, 0x0000,
3378 0xc602, 0xbe00, 0x0000, 0xc602,
3379 0xbe00, 0x0000, 0xc602, 0xbe00,
3380
3381 0x0000, 0x0000, 0x0000, 0x0000
3382 };
3383 u32 i;
3384
3385 /* Patch code for GPHY reset */
3386 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3387 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3388 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3389 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3390
3391 rtl_apply_firmware(tp);
3392
3393 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3394 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3395 else
3396 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3397
3398 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3399 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3400 else
3401 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3402
3403 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3404 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3405
3406 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3407 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3408
3409 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3410}
3411
3412static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3413{
3414 static const struct phy_reg phy_reg_init[] = {
3415 { 0x1f, 0x0003 },
3416 { 0x08, 0x441d },
3417 { 0x01, 0x9100 },
3418 { 0x1f, 0x0000 }
3419 };
3420
3421 rtl_writephy(tp, 0x1f, 0x0000);
3422 rtl_patchphy(tp, 0x11, 1 << 12);
3423 rtl_patchphy(tp, 0x19, 1 << 13);
3424 rtl_patchphy(tp, 0x10, 1 << 15);
3425
3426 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3427}
3428
3429static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3430{
3431 static const struct phy_reg phy_reg_init[] = {
3432 { 0x1f, 0x0005 },
3433 { 0x1a, 0x0000 },
3434 { 0x1f, 0x0000 },
3435
3436 { 0x1f, 0x0004 },
3437 { 0x1c, 0x0000 },
3438 { 0x1f, 0x0000 },
3439
3440 { 0x1f, 0x0001 },
3441 { 0x15, 0x7701 },
3442 { 0x1f, 0x0000 }
3443 };
3444
3445 /* Disable ALDPS before ram code */
3446 rtl_writephy(tp, 0x1f, 0x0000);
3447 rtl_writephy(tp, 0x18, 0x0310);
3448 msleep(100);
3449
3450 rtl_apply_firmware(tp);
3451
3452 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3453}
3454
3455static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3456{
3457 /* Disable ALDPS before setting firmware */
3458 rtl_writephy(tp, 0x1f, 0x0000);
3459 rtl_writephy(tp, 0x18, 0x0310);
3460 msleep(20);
3461
3462 rtl_apply_firmware(tp);
3463
3464 /* EEE setting */
3465 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3466 rtl_writephy(tp, 0x1f, 0x0004);
3467 rtl_writephy(tp, 0x10, 0x401f);
3468 rtl_writephy(tp, 0x19, 0x7030);
3469 rtl_writephy(tp, 0x1f, 0x0000);
3470}
3471
3472static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3473{
3474 static const struct phy_reg phy_reg_init[] = {
3475 { 0x1f, 0x0004 },
3476 { 0x10, 0xc07f },
3477 { 0x19, 0x7030 },
3478 { 0x1f, 0x0000 }
3479 };
3480
3481 /* Disable ALDPS before ram code */
3482 rtl_writephy(tp, 0x1f, 0x0000);
3483 rtl_writephy(tp, 0x18, 0x0310);
3484 msleep(100);
3485
3486 rtl_apply_firmware(tp);
3487
3488 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3489 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3490
3491 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3492}
3493
3494static void rtl_hw_phy_config(struct net_device *dev)
3495{
3496 struct rtl8169_private *tp = netdev_priv(dev);
3497
3498 rtl8169_print_mac_version(tp);
3499
3500 switch (tp->mac_version) {
3501 case RTL_GIGA_MAC_VER_01:
3502 break;
3503 case RTL_GIGA_MAC_VER_02:
3504 case RTL_GIGA_MAC_VER_03:
3505 rtl8169s_hw_phy_config(tp);
3506 break;
3507 case RTL_GIGA_MAC_VER_04:
3508 rtl8169sb_hw_phy_config(tp);
3509 break;
3510 case RTL_GIGA_MAC_VER_05:
3511 rtl8169scd_hw_phy_config(tp);
3512 break;
3513 case RTL_GIGA_MAC_VER_06:
3514 rtl8169sce_hw_phy_config(tp);
3515 break;
3516 case RTL_GIGA_MAC_VER_07:
3517 case RTL_GIGA_MAC_VER_08:
3518 case RTL_GIGA_MAC_VER_09:
3519 rtl8102e_hw_phy_config(tp);
3520 break;
3521 case RTL_GIGA_MAC_VER_11:
3522 rtl8168bb_hw_phy_config(tp);
3523 break;
3524 case RTL_GIGA_MAC_VER_12:
3525 rtl8168bef_hw_phy_config(tp);
3526 break;
3527 case RTL_GIGA_MAC_VER_17:
3528 rtl8168bef_hw_phy_config(tp);
3529 break;
3530 case RTL_GIGA_MAC_VER_18:
3531 rtl8168cp_1_hw_phy_config(tp);
3532 break;
3533 case RTL_GIGA_MAC_VER_19:
3534 rtl8168c_1_hw_phy_config(tp);
3535 break;
3536 case RTL_GIGA_MAC_VER_20:
3537 rtl8168c_2_hw_phy_config(tp);
3538 break;
3539 case RTL_GIGA_MAC_VER_21:
3540 rtl8168c_3_hw_phy_config(tp);
3541 break;
3542 case RTL_GIGA_MAC_VER_22:
3543 rtl8168c_4_hw_phy_config(tp);
3544 break;
3545 case RTL_GIGA_MAC_VER_23:
3546 case RTL_GIGA_MAC_VER_24:
3547 rtl8168cp_2_hw_phy_config(tp);
3548 break;
3549 case RTL_GIGA_MAC_VER_25:
3550 rtl8168d_1_hw_phy_config(tp);
3551 break;
3552 case RTL_GIGA_MAC_VER_26:
3553 rtl8168d_2_hw_phy_config(tp);
3554 break;
3555 case RTL_GIGA_MAC_VER_27:
3556 rtl8168d_3_hw_phy_config(tp);
3557 break;
3558 case RTL_GIGA_MAC_VER_28:
3559 rtl8168d_4_hw_phy_config(tp);
3560 break;
3561 case RTL_GIGA_MAC_VER_29:
3562 case RTL_GIGA_MAC_VER_30:
3563 rtl8105e_hw_phy_config(tp);
3564 break;
3565 case RTL_GIGA_MAC_VER_31:
3566 /* None. */
3567 break;
3568 case RTL_GIGA_MAC_VER_32:
3569 case RTL_GIGA_MAC_VER_33:
3570 rtl8168e_1_hw_phy_config(tp);
3571 break;
3572 case RTL_GIGA_MAC_VER_34:
3573 rtl8168e_2_hw_phy_config(tp);
3574 break;
3575 case RTL_GIGA_MAC_VER_35:
3576 rtl8168f_1_hw_phy_config(tp);
3577 break;
3578 case RTL_GIGA_MAC_VER_36:
3579 rtl8168f_2_hw_phy_config(tp);
3580 break;
3581
3582 case RTL_GIGA_MAC_VER_37:
3583 rtl8402_hw_phy_config(tp);
3584 break;
3585
3586 case RTL_GIGA_MAC_VER_38:
3587 rtl8411_hw_phy_config(tp);
3588 break;
3589
3590 case RTL_GIGA_MAC_VER_39:
3591 rtl8106e_hw_phy_config(tp);
3592 break;
3593
3594 case RTL_GIGA_MAC_VER_40:
3595 rtl8168g_1_hw_phy_config(tp);
3596 break;
3597
3598 case RTL_GIGA_MAC_VER_41:
3599 default:
3600 break;
3601 }
3602}
3603
3604static void rtl_phy_work(struct rtl8169_private *tp)
3605{
3606 struct timer_list *timer = &tp->timer;
3607 void __iomem *ioaddr = tp->mmio_addr;
3608 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3609
3610 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3611
3612 if (tp->phy_reset_pending(tp)) {
3613 /*
3614 * A busy loop could burn quite a few cycles on nowadays CPU.
3615 * Let's delay the execution of the timer for a few ticks.
3616 */
3617 timeout = HZ/10;
3618 goto out_mod_timer;
3619 }
3620
3621 if (tp->link_ok(ioaddr))
3622 return;
3623
3624 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3625
3626 tp->phy_reset_enable(tp);
3627
3628out_mod_timer:
3629 mod_timer(timer, jiffies + timeout);
3630}
3631
3632static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3633{
3634 if (!test_and_set_bit(flag, tp->wk.flags))
3635 schedule_work(&tp->wk.work);
3636}
3637
3638static void rtl8169_phy_timer(unsigned long __opaque)
3639{
3640 struct net_device *dev = (struct net_device *)__opaque;
3641 struct rtl8169_private *tp = netdev_priv(dev);
3642
3643 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3644}
3645
3646static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3647 void __iomem *ioaddr)
3648{
3649 iounmap(ioaddr);
3650 pci_release_regions(pdev);
3651 pci_clear_mwi(pdev);
3652 pci_disable_device(pdev);
3653 free_netdev(dev);
3654}
3655
3656DECLARE_RTL_COND(rtl_phy_reset_cond)
3657{
3658 return tp->phy_reset_pending(tp);
3659}
3660
3661static void rtl8169_phy_reset(struct net_device *dev,
3662 struct rtl8169_private *tp)
3663{
3664 tp->phy_reset_enable(tp);
3665 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3666}
3667
3668static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3669{
3670 void __iomem *ioaddr = tp->mmio_addr;
3671
3672 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3673 (RTL_R8(PHYstatus) & TBI_Enable);
3674}
3675
3676static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3677{
3678 void __iomem *ioaddr = tp->mmio_addr;
3679
3680 rtl_hw_phy_config(dev);
3681
3682 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3683 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3684 RTL_W8(0x82, 0x01);
3685 }
3686
3687 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3688
3689 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3690 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3691
3692 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3693 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3694 RTL_W8(0x82, 0x01);
3695 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3696 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3697 }
3698
3699 rtl8169_phy_reset(dev, tp);
3700
3701 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3702 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3703 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3704 (tp->mii.supports_gmii ?
3705 ADVERTISED_1000baseT_Half |
3706 ADVERTISED_1000baseT_Full : 0));
3707
3708 if (rtl_tbi_enabled(tp))
3709 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3710}
3711
3712static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3713{
3714 void __iomem *ioaddr = tp->mmio_addr;
3715 u32 high;
3716 u32 low;
3717
3718 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3719 high = addr[4] | (addr[5] << 8);
3720
3721 rtl_lock_work(tp);
3722
3723 RTL_W8(Cfg9346, Cfg9346_Unlock);
3724
3725 RTL_W32(MAC4, high);
3726 RTL_R32(MAC4);
3727
3728 RTL_W32(MAC0, low);
3729 RTL_R32(MAC0);
3730
3731 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3732 const struct exgmac_reg e[] = {
3733 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3734 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3735 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3736 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3737 low >> 16 },
3738 };
3739
3740 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3741 }
3742
3743 RTL_W8(Cfg9346, Cfg9346_Lock);
3744
3745 rtl_unlock_work(tp);
3746}
3747
3748static int rtl_set_mac_address(struct net_device *dev, void *p)
3749{
3750 struct rtl8169_private *tp = netdev_priv(dev);
3751 struct sockaddr *addr = p;
3752
3753 if (!is_valid_ether_addr(addr->sa_data))
3754 return -EADDRNOTAVAIL;
3755
3756 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3757
3758 rtl_rar_set(tp, dev->dev_addr);
3759
3760 return 0;
3761}
3762
3763static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3764{
3765 struct rtl8169_private *tp = netdev_priv(dev);
3766 struct mii_ioctl_data *data = if_mii(ifr);
3767
3768 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3769}
3770
3771static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3772 struct mii_ioctl_data *data, int cmd)
3773{
3774 switch (cmd) {
3775 case SIOCGMIIPHY:
3776 data->phy_id = 32; /* Internal PHY */
3777 return 0;
3778
3779 case SIOCGMIIREG:
3780 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3781 return 0;
3782
3783 case SIOCSMIIREG:
3784 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3785 return 0;
3786 }
3787 return -EOPNOTSUPP;
3788}
3789
3790static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3791{
3792 return -EOPNOTSUPP;
3793}
3794
3795static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3796{
3797 if (tp->features & RTL_FEATURE_MSI) {
3798 pci_disable_msi(pdev);
3799 tp->features &= ~RTL_FEATURE_MSI;
3800 }
3801}
3802
3803static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3804{
3805 struct mdio_ops *ops = &tp->mdio_ops;
3806
3807 switch (tp->mac_version) {
3808 case RTL_GIGA_MAC_VER_27:
3809 ops->write = r8168dp_1_mdio_write;
3810 ops->read = r8168dp_1_mdio_read;
3811 break;
3812 case RTL_GIGA_MAC_VER_28:
3813 case RTL_GIGA_MAC_VER_31:
3814 ops->write = r8168dp_2_mdio_write;
3815 ops->read = r8168dp_2_mdio_read;
3816 break;
3817 case RTL_GIGA_MAC_VER_40:
3818 case RTL_GIGA_MAC_VER_41:
3819 ops->write = r8168g_mdio_write;
3820 ops->read = r8168g_mdio_read;
3821 break;
3822 default:
3823 ops->write = r8169_mdio_write;
3824 ops->read = r8169_mdio_read;
3825 break;
3826 }
3827}
3828
3829static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3830{
3831 void __iomem *ioaddr = tp->mmio_addr;
3832
3833 switch (tp->mac_version) {
3834 case RTL_GIGA_MAC_VER_29:
3835 case RTL_GIGA_MAC_VER_30:
3836 case RTL_GIGA_MAC_VER_32:
3837 case RTL_GIGA_MAC_VER_33:
3838 case RTL_GIGA_MAC_VER_34:
3839 case RTL_GIGA_MAC_VER_37:
3840 case RTL_GIGA_MAC_VER_38:
3841 case RTL_GIGA_MAC_VER_39:
3842 case RTL_GIGA_MAC_VER_40:
3843 case RTL_GIGA_MAC_VER_41:
3844 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3845 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3846 break;
3847 default:
3848 break;
3849 }
3850}
3851
3852static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3853{
3854 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3855 return false;
3856
3857 rtl_writephy(tp, 0x1f, 0x0000);
3858 rtl_writephy(tp, MII_BMCR, 0x0000);
3859
3860 rtl_wol_suspend_quirk(tp);
3861
3862 return true;
3863}
3864
3865static void r810x_phy_power_down(struct rtl8169_private *tp)
3866{
3867 rtl_writephy(tp, 0x1f, 0x0000);
3868 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3869}
3870
3871static void r810x_phy_power_up(struct rtl8169_private *tp)
3872{
3873 rtl_writephy(tp, 0x1f, 0x0000);
3874 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3875}
3876
3877static void r810x_pll_power_down(struct rtl8169_private *tp)
3878{
3879 void __iomem *ioaddr = tp->mmio_addr;
3880
3881 if (rtl_wol_pll_power_down(tp))
3882 return;
3883
3884 r810x_phy_power_down(tp);
3885
3886 switch (tp->mac_version) {
3887 case RTL_GIGA_MAC_VER_07:
3888 case RTL_GIGA_MAC_VER_08:
3889 case RTL_GIGA_MAC_VER_09:
3890 case RTL_GIGA_MAC_VER_10:
3891 case RTL_GIGA_MAC_VER_13:
3892 case RTL_GIGA_MAC_VER_16:
3893 break;
3894 default:
3895 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3896 break;
3897 }
3898}
3899
3900static void r810x_pll_power_up(struct rtl8169_private *tp)
3901{
3902 void __iomem *ioaddr = tp->mmio_addr;
3903
3904 r810x_phy_power_up(tp);
3905
3906 switch (tp->mac_version) {
3907 case RTL_GIGA_MAC_VER_07:
3908 case RTL_GIGA_MAC_VER_08:
3909 case RTL_GIGA_MAC_VER_09:
3910 case RTL_GIGA_MAC_VER_10:
3911 case RTL_GIGA_MAC_VER_13:
3912 case RTL_GIGA_MAC_VER_16:
3913 break;
3914 default:
3915 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3916 break;
3917 }
3918}
3919
3920static void r8168_phy_power_up(struct rtl8169_private *tp)
3921{
3922 rtl_writephy(tp, 0x1f, 0x0000);
3923 switch (tp->mac_version) {
3924 case RTL_GIGA_MAC_VER_11:
3925 case RTL_GIGA_MAC_VER_12:
3926 case RTL_GIGA_MAC_VER_17:
3927 case RTL_GIGA_MAC_VER_18:
3928 case RTL_GIGA_MAC_VER_19:
3929 case RTL_GIGA_MAC_VER_20:
3930 case RTL_GIGA_MAC_VER_21:
3931 case RTL_GIGA_MAC_VER_22:
3932 case RTL_GIGA_MAC_VER_23:
3933 case RTL_GIGA_MAC_VER_24:
3934 case RTL_GIGA_MAC_VER_25:
3935 case RTL_GIGA_MAC_VER_26:
3936 case RTL_GIGA_MAC_VER_27:
3937 case RTL_GIGA_MAC_VER_28:
3938 case RTL_GIGA_MAC_VER_31:
3939 rtl_writephy(tp, 0x0e, 0x0000);
3940 break;
3941 default:
3942 break;
3943 }
3944 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3945}
3946
3947static void r8168_phy_power_down(struct rtl8169_private *tp)
3948{
3949 rtl_writephy(tp, 0x1f, 0x0000);
3950 switch (tp->mac_version) {
3951 case RTL_GIGA_MAC_VER_32:
3952 case RTL_GIGA_MAC_VER_33:
3953 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3954 break;
3955
3956 case RTL_GIGA_MAC_VER_11:
3957 case RTL_GIGA_MAC_VER_12:
3958 case RTL_GIGA_MAC_VER_17:
3959 case RTL_GIGA_MAC_VER_18:
3960 case RTL_GIGA_MAC_VER_19:
3961 case RTL_GIGA_MAC_VER_20:
3962 case RTL_GIGA_MAC_VER_21:
3963 case RTL_GIGA_MAC_VER_22:
3964 case RTL_GIGA_MAC_VER_23:
3965 case RTL_GIGA_MAC_VER_24:
3966 case RTL_GIGA_MAC_VER_25:
3967 case RTL_GIGA_MAC_VER_26:
3968 case RTL_GIGA_MAC_VER_27:
3969 case RTL_GIGA_MAC_VER_28:
3970 case RTL_GIGA_MAC_VER_31:
3971 rtl_writephy(tp, 0x0e, 0x0200);
3972 default:
3973 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3974 break;
3975 }
3976}
3977
3978static void r8168_pll_power_down(struct rtl8169_private *tp)
3979{
3980 void __iomem *ioaddr = tp->mmio_addr;
3981
3982 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3983 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3984 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3985 r8168dp_check_dash(tp)) {
3986 return;
3987 }
3988
3989 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3990 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3991 (RTL_R16(CPlusCmd) & ASF)) {
3992 return;
3993 }
3994
3995 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3996 tp->mac_version == RTL_GIGA_MAC_VER_33)
3997 rtl_ephy_write(tp, 0x19, 0xff64);
3998
3999 if (rtl_wol_pll_power_down(tp))
4000 return;
4001
4002 r8168_phy_power_down(tp);
4003
4004 switch (tp->mac_version) {
4005 case RTL_GIGA_MAC_VER_25:
4006 case RTL_GIGA_MAC_VER_26:
4007 case RTL_GIGA_MAC_VER_27:
4008 case RTL_GIGA_MAC_VER_28:
4009 case RTL_GIGA_MAC_VER_31:
4010 case RTL_GIGA_MAC_VER_32:
4011 case RTL_GIGA_MAC_VER_33:
4012 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4013 break;
4014 }
4015}
4016
4017static void r8168_pll_power_up(struct rtl8169_private *tp)
4018{
4019 void __iomem *ioaddr = tp->mmio_addr;
4020
4021 switch (tp->mac_version) {
4022 case RTL_GIGA_MAC_VER_25:
4023 case RTL_GIGA_MAC_VER_26:
4024 case RTL_GIGA_MAC_VER_27:
4025 case RTL_GIGA_MAC_VER_28:
4026 case RTL_GIGA_MAC_VER_31:
4027 case RTL_GIGA_MAC_VER_32:
4028 case RTL_GIGA_MAC_VER_33:
4029 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4030 break;
4031 }
4032
4033 r8168_phy_power_up(tp);
4034}
4035
4036static void rtl_generic_op(struct rtl8169_private *tp,
4037 void (*op)(struct rtl8169_private *))
4038{
4039 if (op)
4040 op(tp);
4041}
4042
4043static void rtl_pll_power_down(struct rtl8169_private *tp)
4044{
4045 rtl_generic_op(tp, tp->pll_power_ops.down);
4046}
4047
4048static void rtl_pll_power_up(struct rtl8169_private *tp)
4049{
4050 rtl_generic_op(tp, tp->pll_power_ops.up);
4051}
4052
4053static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
4054{
4055 struct pll_power_ops *ops = &tp->pll_power_ops;
4056
4057 switch (tp->mac_version) {
4058 case RTL_GIGA_MAC_VER_07:
4059 case RTL_GIGA_MAC_VER_08:
4060 case RTL_GIGA_MAC_VER_09:
4061 case RTL_GIGA_MAC_VER_10:
4062 case RTL_GIGA_MAC_VER_16:
4063 case RTL_GIGA_MAC_VER_29:
4064 case RTL_GIGA_MAC_VER_30:
4065 case RTL_GIGA_MAC_VER_37:
4066 case RTL_GIGA_MAC_VER_39:
4067 ops->down = r810x_pll_power_down;
4068 ops->up = r810x_pll_power_up;
4069 break;
4070
4071 case RTL_GIGA_MAC_VER_11:
4072 case RTL_GIGA_MAC_VER_12:
4073 case RTL_GIGA_MAC_VER_17:
4074 case RTL_GIGA_MAC_VER_18:
4075 case RTL_GIGA_MAC_VER_19:
4076 case RTL_GIGA_MAC_VER_20:
4077 case RTL_GIGA_MAC_VER_21:
4078 case RTL_GIGA_MAC_VER_22:
4079 case RTL_GIGA_MAC_VER_23:
4080 case RTL_GIGA_MAC_VER_24:
4081 case RTL_GIGA_MAC_VER_25:
4082 case RTL_GIGA_MAC_VER_26:
4083 case RTL_GIGA_MAC_VER_27:
4084 case RTL_GIGA_MAC_VER_28:
4085 case RTL_GIGA_MAC_VER_31:
4086 case RTL_GIGA_MAC_VER_32:
4087 case RTL_GIGA_MAC_VER_33:
4088 case RTL_GIGA_MAC_VER_34:
4089 case RTL_GIGA_MAC_VER_35:
4090 case RTL_GIGA_MAC_VER_36:
4091 case RTL_GIGA_MAC_VER_38:
4092 case RTL_GIGA_MAC_VER_40:
4093 case RTL_GIGA_MAC_VER_41:
4094 ops->down = r8168_pll_power_down;
4095 ops->up = r8168_pll_power_up;
4096 break;
4097
4098 default:
4099 ops->down = NULL;
4100 ops->up = NULL;
4101 break;
4102 }
4103}
4104
4105static void rtl_init_rxcfg(struct rtl8169_private *tp)
4106{
4107 void __iomem *ioaddr = tp->mmio_addr;
4108
4109 switch (tp->mac_version) {
4110 case RTL_GIGA_MAC_VER_01:
4111 case RTL_GIGA_MAC_VER_02:
4112 case RTL_GIGA_MAC_VER_03:
4113 case RTL_GIGA_MAC_VER_04:
4114 case RTL_GIGA_MAC_VER_05:
4115 case RTL_GIGA_MAC_VER_06:
4116 case RTL_GIGA_MAC_VER_10:
4117 case RTL_GIGA_MAC_VER_11:
4118 case RTL_GIGA_MAC_VER_12:
4119 case RTL_GIGA_MAC_VER_13:
4120 case RTL_GIGA_MAC_VER_14:
4121 case RTL_GIGA_MAC_VER_15:
4122 case RTL_GIGA_MAC_VER_16:
4123 case RTL_GIGA_MAC_VER_17:
4124 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4125 break;
4126 case RTL_GIGA_MAC_VER_18:
4127 case RTL_GIGA_MAC_VER_19:
4128 case RTL_GIGA_MAC_VER_20:
4129 case RTL_GIGA_MAC_VER_21:
4130 case RTL_GIGA_MAC_VER_22:
4131 case RTL_GIGA_MAC_VER_23:
4132 case RTL_GIGA_MAC_VER_24:
4133 case RTL_GIGA_MAC_VER_34:
4134 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4135 break;
4136 default:
4137 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4138 break;
4139 }
4140}
4141
4142static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4143{
4144 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4145}
4146
4147static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4148{
4149 void __iomem *ioaddr = tp->mmio_addr;
4150
4151 RTL_W8(Cfg9346, Cfg9346_Unlock);
4152 rtl_generic_op(tp, tp->jumbo_ops.enable);
4153 RTL_W8(Cfg9346, Cfg9346_Lock);
4154}
4155
4156static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4157{
4158 void __iomem *ioaddr = tp->mmio_addr;
4159
4160 RTL_W8(Cfg9346, Cfg9346_Unlock);
4161 rtl_generic_op(tp, tp->jumbo_ops.disable);
4162 RTL_W8(Cfg9346, Cfg9346_Lock);
4163}
4164
4165static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4166{
4167 void __iomem *ioaddr = tp->mmio_addr;
4168
4169 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4170 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4171 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4172}
4173
4174static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4175{
4176 void __iomem *ioaddr = tp->mmio_addr;
4177
4178 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4179 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4180 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4181}
4182
4183static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4184{
4185 void __iomem *ioaddr = tp->mmio_addr;
4186
4187 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4188}
4189
4190static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4191{
4192 void __iomem *ioaddr = tp->mmio_addr;
4193
4194 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4195}
4196
4197static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4198{
4199 void __iomem *ioaddr = tp->mmio_addr;
4200
4201 RTL_W8(MaxTxPacketSize, 0x3f);
4202 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4203 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4204 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4205}
4206
4207static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4208{
4209 void __iomem *ioaddr = tp->mmio_addr;
4210
4211 RTL_W8(MaxTxPacketSize, 0x0c);
4212 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4213 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4214 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4215}
4216
4217static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4218{
4219 rtl_tx_performance_tweak(tp->pci_dev,
4220 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4221}
4222
4223static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4224{
4225 rtl_tx_performance_tweak(tp->pci_dev,
4226 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4227}
4228
4229static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4230{
4231 void __iomem *ioaddr = tp->mmio_addr;
4232
4233 r8168b_0_hw_jumbo_enable(tp);
4234
4235 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4236}
4237
4238static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4239{
4240 void __iomem *ioaddr = tp->mmio_addr;
4241
4242 r8168b_0_hw_jumbo_disable(tp);
4243
4244 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4245}
4246
4247static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4248{
4249 struct jumbo_ops *ops = &tp->jumbo_ops;
4250
4251 switch (tp->mac_version) {
4252 case RTL_GIGA_MAC_VER_11:
4253 ops->disable = r8168b_0_hw_jumbo_disable;
4254 ops->enable = r8168b_0_hw_jumbo_enable;
4255 break;
4256 case RTL_GIGA_MAC_VER_12:
4257 case RTL_GIGA_MAC_VER_17:
4258 ops->disable = r8168b_1_hw_jumbo_disable;
4259 ops->enable = r8168b_1_hw_jumbo_enable;
4260 break;
4261 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4262 case RTL_GIGA_MAC_VER_19:
4263 case RTL_GIGA_MAC_VER_20:
4264 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4265 case RTL_GIGA_MAC_VER_22:
4266 case RTL_GIGA_MAC_VER_23:
4267 case RTL_GIGA_MAC_VER_24:
4268 case RTL_GIGA_MAC_VER_25:
4269 case RTL_GIGA_MAC_VER_26:
4270 ops->disable = r8168c_hw_jumbo_disable;
4271 ops->enable = r8168c_hw_jumbo_enable;
4272 break;
4273 case RTL_GIGA_MAC_VER_27:
4274 case RTL_GIGA_MAC_VER_28:
4275 ops->disable = r8168dp_hw_jumbo_disable;
4276 ops->enable = r8168dp_hw_jumbo_enable;
4277 break;
4278 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4279 case RTL_GIGA_MAC_VER_32:
4280 case RTL_GIGA_MAC_VER_33:
4281 case RTL_GIGA_MAC_VER_34:
4282 ops->disable = r8168e_hw_jumbo_disable;
4283 ops->enable = r8168e_hw_jumbo_enable;
4284 break;
4285
4286 /*
4287 * No action needed for jumbo frames with 8169.
4288 * No jumbo for 810x at all.
4289 */
4290 case RTL_GIGA_MAC_VER_40:
4291 case RTL_GIGA_MAC_VER_41:
4292 default:
4293 ops->disable = NULL;
4294 ops->enable = NULL;
4295 break;
4296 }
4297}
4298
4299DECLARE_RTL_COND(rtl_chipcmd_cond)
4300{
4301 void __iomem *ioaddr = tp->mmio_addr;
4302
4303 return RTL_R8(ChipCmd) & CmdReset;
4304}
4305
4306static void rtl_hw_reset(struct rtl8169_private *tp)
4307{
4308 void __iomem *ioaddr = tp->mmio_addr;
4309
4310 RTL_W8(ChipCmd, CmdReset);
4311
4312 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4313}
4314
4315static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4316{
4317 struct rtl_fw *rtl_fw;
4318 const char *name;
4319 int rc = -ENOMEM;
4320
4321 name = rtl_lookup_firmware_name(tp);
4322 if (!name)
4323 goto out_no_firmware;
4324
4325 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4326 if (!rtl_fw)
4327 goto err_warn;
4328
4329 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4330 if (rc < 0)
4331 goto err_free;
4332
4333 rc = rtl_check_firmware(tp, rtl_fw);
4334 if (rc < 0)
4335 goto err_release_firmware;
4336
4337 tp->rtl_fw = rtl_fw;
4338out:
4339 return;
4340
4341err_release_firmware:
4342 release_firmware(rtl_fw->fw);
4343err_free:
4344 kfree(rtl_fw);
4345err_warn:
4346 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4347 name, rc);
4348out_no_firmware:
4349 tp->rtl_fw = NULL;
4350 goto out;
4351}
4352
4353static void rtl_request_firmware(struct rtl8169_private *tp)
4354{
4355 if (IS_ERR(tp->rtl_fw))
4356 rtl_request_uncached_firmware(tp);
4357}
4358
4359static void rtl_rx_close(struct rtl8169_private *tp)
4360{
4361 void __iomem *ioaddr = tp->mmio_addr;
4362
4363 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4364}
4365
4366DECLARE_RTL_COND(rtl_npq_cond)
4367{
4368 void __iomem *ioaddr = tp->mmio_addr;
4369
4370 return RTL_R8(TxPoll) & NPQ;
4371}
4372
4373DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4374{
4375 void __iomem *ioaddr = tp->mmio_addr;
4376
4377 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4378}
4379
4380static void rtl8169_hw_reset(struct rtl8169_private *tp)
4381{
4382 void __iomem *ioaddr = tp->mmio_addr;
4383
4384 /* Disable interrupts */
4385 rtl8169_irq_mask_and_ack(tp);
4386
4387 rtl_rx_close(tp);
4388
4389 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4390 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4391 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4392 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4393 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4394 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4395 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4396 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4397 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4398 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4399 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4400 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4401 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4402 } else {
4403 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4404 udelay(100);
4405 }
4406
4407 rtl_hw_reset(tp);
4408}
4409
4410static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4411{
4412 void __iomem *ioaddr = tp->mmio_addr;
4413
4414 /* Set DMA burst size and Interframe Gap Time */
4415 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4416 (InterFrameGap << TxInterFrameGapShift));
4417}
4418
4419static void rtl_hw_start(struct net_device *dev)
4420{
4421 struct rtl8169_private *tp = netdev_priv(dev);
4422
4423 tp->hw_start(dev);
4424
4425 rtl_irq_enable_all(tp);
4426}
4427
4428static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4429 void __iomem *ioaddr)
4430{
4431 /*
4432 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4433 * register to be written before TxDescAddrLow to work.
4434 * Switching from MMIO to I/O access fixes the issue as well.
4435 */
4436 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4437 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4438 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4439 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4440}
4441
4442static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4443{
4444 u16 cmd;
4445
4446 cmd = RTL_R16(CPlusCmd);
4447 RTL_W16(CPlusCmd, cmd);
4448 return cmd;
4449}
4450
4451static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4452{
4453 /* Low hurts. Let's disable the filtering. */
4454 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4455}
4456
4457static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4458{
4459 static const struct rtl_cfg2_info {
4460 u32 mac_version;
4461 u32 clk;
4462 u32 val;
4463 } cfg2_info [] = {
4464 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4465 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4466 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4467 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4468 };
4469 const struct rtl_cfg2_info *p = cfg2_info;
4470 unsigned int i;
4471 u32 clk;
4472
4473 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4474 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4475 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4476 RTL_W32(0x7c, p->val);
4477 break;
4478 }
4479 }
4480}
4481
4482static void rtl_set_rx_mode(struct net_device *dev)
4483{
4484 struct rtl8169_private *tp = netdev_priv(dev);
4485 void __iomem *ioaddr = tp->mmio_addr;
4486 u32 mc_filter[2]; /* Multicast hash filter */
4487 int rx_mode;
4488 u32 tmp = 0;
4489
4490 if (dev->flags & IFF_PROMISC) {
4491 /* Unconditionally log net taps. */
4492 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4493 rx_mode =
4494 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4495 AcceptAllPhys;
4496 mc_filter[1] = mc_filter[0] = 0xffffffff;
4497 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4498 (dev->flags & IFF_ALLMULTI)) {
4499 /* Too many to filter perfectly -- accept all multicasts. */
4500 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4501 mc_filter[1] = mc_filter[0] = 0xffffffff;
4502 } else {
4503 struct netdev_hw_addr *ha;
4504
4505 rx_mode = AcceptBroadcast | AcceptMyPhys;
4506 mc_filter[1] = mc_filter[0] = 0;
4507 netdev_for_each_mc_addr(ha, dev) {
4508 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4509 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4510 rx_mode |= AcceptMulticast;
4511 }
4512 }
4513
4514 if (dev->features & NETIF_F_RXALL)
4515 rx_mode |= (AcceptErr | AcceptRunt);
4516
4517 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4518
4519 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4520 u32 data = mc_filter[0];
4521
4522 mc_filter[0] = swab32(mc_filter[1]);
4523 mc_filter[1] = swab32(data);
4524 }
4525
4526 RTL_W32(MAR0 + 4, mc_filter[1]);
4527 RTL_W32(MAR0 + 0, mc_filter[0]);
4528
4529 RTL_W32(RxConfig, tmp);
4530}
4531
4532static void rtl_hw_start_8169(struct net_device *dev)
4533{
4534 struct rtl8169_private *tp = netdev_priv(dev);
4535 void __iomem *ioaddr = tp->mmio_addr;
4536 struct pci_dev *pdev = tp->pci_dev;
4537
4538 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4539 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4540 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4541 }
4542
4543 RTL_W8(Cfg9346, Cfg9346_Unlock);
4544 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4545 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4546 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4547 tp->mac_version == RTL_GIGA_MAC_VER_04)
4548 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4549
4550 rtl_init_rxcfg(tp);
4551
4552 RTL_W8(EarlyTxThres, NoEarlyTx);
4553
4554 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4555
4556 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4557 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4558 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4559 tp->mac_version == RTL_GIGA_MAC_VER_04)
4560 rtl_set_rx_tx_config_registers(tp);
4561
4562 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4563
4564 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4565 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4566 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4567 "Bit-3 and bit-14 MUST be 1\n");
4568 tp->cp_cmd |= (1 << 14);
4569 }
4570
4571 RTL_W16(CPlusCmd, tp->cp_cmd);
4572
4573 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4574
4575 /*
4576 * Undocumented corner. Supposedly:
4577 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4578 */
4579 RTL_W16(IntrMitigate, 0x0000);
4580
4581 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4582
4583 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4584 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4585 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4586 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4587 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4588 rtl_set_rx_tx_config_registers(tp);
4589 }
4590
4591 RTL_W8(Cfg9346, Cfg9346_Lock);
4592
4593 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4594 RTL_R8(IntrMask);
4595
4596 RTL_W32(RxMissed, 0);
4597
4598 rtl_set_rx_mode(dev);
4599
4600 /* no early-rx interrupts */
4601 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4602}
4603
4604static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4605{
4606 if (tp->csi_ops.write)
4607 tp->csi_ops.write(tp, addr, value);
4608}
4609
4610static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4611{
4612 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4613}
4614
4615static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4616{
4617 u32 csi;
4618
4619 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4620 rtl_csi_write(tp, 0x070c, csi | bits);
4621}
4622
4623static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4624{
4625 rtl_csi_access_enable(tp, 0x17000000);
4626}
4627
4628static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4629{
4630 rtl_csi_access_enable(tp, 0x27000000);
4631}
4632
4633DECLARE_RTL_COND(rtl_csiar_cond)
4634{
4635 void __iomem *ioaddr = tp->mmio_addr;
4636
4637 return RTL_R32(CSIAR) & CSIAR_FLAG;
4638}
4639
4640static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4641{
4642 void __iomem *ioaddr = tp->mmio_addr;
4643
4644 RTL_W32(CSIDR, value);
4645 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4646 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4647
4648 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4649}
4650
4651static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4652{
4653 void __iomem *ioaddr = tp->mmio_addr;
4654
4655 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4656 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4657
4658 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4659 RTL_R32(CSIDR) : ~0;
4660}
4661
4662static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4663{
4664 void __iomem *ioaddr = tp->mmio_addr;
4665
4666 RTL_W32(CSIDR, value);
4667 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4668 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4669 CSIAR_FUNC_NIC);
4670
4671 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4672}
4673
4674static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4675{
4676 void __iomem *ioaddr = tp->mmio_addr;
4677
4678 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4679 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4680
4681 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4682 RTL_R32(CSIDR) : ~0;
4683}
4684
4685static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4686{
4687 struct csi_ops *ops = &tp->csi_ops;
4688
4689 switch (tp->mac_version) {
4690 case RTL_GIGA_MAC_VER_01:
4691 case RTL_GIGA_MAC_VER_02:
4692 case RTL_GIGA_MAC_VER_03:
4693 case RTL_GIGA_MAC_VER_04:
4694 case RTL_GIGA_MAC_VER_05:
4695 case RTL_GIGA_MAC_VER_06:
4696 case RTL_GIGA_MAC_VER_10:
4697 case RTL_GIGA_MAC_VER_11:
4698 case RTL_GIGA_MAC_VER_12:
4699 case RTL_GIGA_MAC_VER_13:
4700 case RTL_GIGA_MAC_VER_14:
4701 case RTL_GIGA_MAC_VER_15:
4702 case RTL_GIGA_MAC_VER_16:
4703 case RTL_GIGA_MAC_VER_17:
4704 ops->write = NULL;
4705 ops->read = NULL;
4706 break;
4707
4708 case RTL_GIGA_MAC_VER_37:
4709 case RTL_GIGA_MAC_VER_38:
4710 ops->write = r8402_csi_write;
4711 ops->read = r8402_csi_read;
4712 break;
4713
4714 default:
4715 ops->write = r8169_csi_write;
4716 ops->read = r8169_csi_read;
4717 break;
4718 }
4719}
4720
4721struct ephy_info {
4722 unsigned int offset;
4723 u16 mask;
4724 u16 bits;
4725};
4726
4727static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4728 int len)
4729{
4730 u16 w;
4731
4732 while (len-- > 0) {
4733 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4734 rtl_ephy_write(tp, e->offset, w);
4735 e++;
4736 }
4737}
4738
4739static void rtl_disable_clock_request(struct pci_dev *pdev)
4740{
4741 int cap = pci_pcie_cap(pdev);
4742
4743 if (cap) {
4744 u16 ctl;
4745
4746 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4747 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4748 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4749 }
4750}
4751
4752static void rtl_enable_clock_request(struct pci_dev *pdev)
4753{
4754 int cap = pci_pcie_cap(pdev);
4755
4756 if (cap) {
4757 u16 ctl;
4758
4759 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4760 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4761 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4762 }
4763}
4764
4765#define R8168_CPCMD_QUIRK_MASK (\
4766 EnableBist | \
4767 Mac_dbgo_oe | \
4768 Force_half_dup | \
4769 Force_rxflow_en | \
4770 Force_txflow_en | \
4771 Cxpl_dbg_sel | \
4772 ASF | \
4773 PktCntrDisable | \
4774 Mac_dbgo_sel)
4775
4776static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4777{
4778 void __iomem *ioaddr = tp->mmio_addr;
4779 struct pci_dev *pdev = tp->pci_dev;
4780
4781 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4782
4783 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4784
4785 rtl_tx_performance_tweak(pdev,
4786 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4787}
4788
4789static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4790{
4791 void __iomem *ioaddr = tp->mmio_addr;
4792
4793 rtl_hw_start_8168bb(tp);
4794
4795 RTL_W8(MaxTxPacketSize, TxPacketMax);
4796
4797 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4798}
4799
4800static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4801{
4802 void __iomem *ioaddr = tp->mmio_addr;
4803 struct pci_dev *pdev = tp->pci_dev;
4804
4805 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4806
4807 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4808
4809 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4810
4811 rtl_disable_clock_request(pdev);
4812
4813 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4814}
4815
4816static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4817{
4818 static const struct ephy_info e_info_8168cp[] = {
4819 { 0x01, 0, 0x0001 },
4820 { 0x02, 0x0800, 0x1000 },
4821 { 0x03, 0, 0x0042 },
4822 { 0x06, 0x0080, 0x0000 },
4823 { 0x07, 0, 0x2000 }
4824 };
4825
4826 rtl_csi_access_enable_2(tp);
4827
4828 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4829
4830 __rtl_hw_start_8168cp(tp);
4831}
4832
4833static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4834{
4835 void __iomem *ioaddr = tp->mmio_addr;
4836 struct pci_dev *pdev = tp->pci_dev;
4837
4838 rtl_csi_access_enable_2(tp);
4839
4840 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4841
4842 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4843
4844 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4845}
4846
4847static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4848{
4849 void __iomem *ioaddr = tp->mmio_addr;
4850 struct pci_dev *pdev = tp->pci_dev;
4851
4852 rtl_csi_access_enable_2(tp);
4853
4854 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4855
4856 /* Magic. */
4857 RTL_W8(DBG_REG, 0x20);
4858
4859 RTL_W8(MaxTxPacketSize, TxPacketMax);
4860
4861 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4862
4863 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4864}
4865
4866static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4867{
4868 void __iomem *ioaddr = tp->mmio_addr;
4869 static const struct ephy_info e_info_8168c_1[] = {
4870 { 0x02, 0x0800, 0x1000 },
4871 { 0x03, 0, 0x0002 },
4872 { 0x06, 0x0080, 0x0000 }
4873 };
4874
4875 rtl_csi_access_enable_2(tp);
4876
4877 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4878
4879 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4880
4881 __rtl_hw_start_8168cp(tp);
4882}
4883
4884static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4885{
4886 static const struct ephy_info e_info_8168c_2[] = {
4887 { 0x01, 0, 0x0001 },
4888 { 0x03, 0x0400, 0x0220 }
4889 };
4890
4891 rtl_csi_access_enable_2(tp);
4892
4893 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4894
4895 __rtl_hw_start_8168cp(tp);
4896}
4897
4898static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4899{
4900 rtl_hw_start_8168c_2(tp);
4901}
4902
4903static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4904{
4905 rtl_csi_access_enable_2(tp);
4906
4907 __rtl_hw_start_8168cp(tp);
4908}
4909
4910static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4911{
4912 void __iomem *ioaddr = tp->mmio_addr;
4913 struct pci_dev *pdev = tp->pci_dev;
4914
4915 rtl_csi_access_enable_2(tp);
4916
4917 rtl_disable_clock_request(pdev);
4918
4919 RTL_W8(MaxTxPacketSize, TxPacketMax);
4920
4921 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4922
4923 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4924}
4925
4926static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4927{
4928 void __iomem *ioaddr = tp->mmio_addr;
4929 struct pci_dev *pdev = tp->pci_dev;
4930
4931 rtl_csi_access_enable_1(tp);
4932
4933 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4934
4935 RTL_W8(MaxTxPacketSize, TxPacketMax);
4936
4937 rtl_disable_clock_request(pdev);
4938}
4939
4940static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4941{
4942 void __iomem *ioaddr = tp->mmio_addr;
4943 struct pci_dev *pdev = tp->pci_dev;
4944 static const struct ephy_info e_info_8168d_4[] = {
4945 { 0x0b, ~0, 0x48 },
4946 { 0x19, 0x20, 0x50 },
4947 { 0x0c, ~0, 0x20 }
4948 };
4949 int i;
4950
4951 rtl_csi_access_enable_1(tp);
4952
4953 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4954
4955 RTL_W8(MaxTxPacketSize, TxPacketMax);
4956
4957 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4958 const struct ephy_info *e = e_info_8168d_4 + i;
4959 u16 w;
4960
4961 w = rtl_ephy_read(tp, e->offset);
4962 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4963 }
4964
4965 rtl_enable_clock_request(pdev);
4966}
4967
4968static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4969{
4970 void __iomem *ioaddr = tp->mmio_addr;
4971 struct pci_dev *pdev = tp->pci_dev;
4972 static const struct ephy_info e_info_8168e_1[] = {
4973 { 0x00, 0x0200, 0x0100 },
4974 { 0x00, 0x0000, 0x0004 },
4975 { 0x06, 0x0002, 0x0001 },
4976 { 0x06, 0x0000, 0x0030 },
4977 { 0x07, 0x0000, 0x2000 },
4978 { 0x00, 0x0000, 0x0020 },
4979 { 0x03, 0x5800, 0x2000 },
4980 { 0x03, 0x0000, 0x0001 },
4981 { 0x01, 0x0800, 0x1000 },
4982 { 0x07, 0x0000, 0x4000 },
4983 { 0x1e, 0x0000, 0x2000 },
4984 { 0x19, 0xffff, 0xfe6c },
4985 { 0x0a, 0x0000, 0x0040 }
4986 };
4987
4988 rtl_csi_access_enable_2(tp);
4989
4990 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4991
4992 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4993
4994 RTL_W8(MaxTxPacketSize, TxPacketMax);
4995
4996 rtl_disable_clock_request(pdev);
4997
4998 /* Reset tx FIFO pointer */
4999 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5000 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5001
5002 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5003}
5004
5005static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5006{
5007 void __iomem *ioaddr = tp->mmio_addr;
5008 struct pci_dev *pdev = tp->pci_dev;
5009 static const struct ephy_info e_info_8168e_2[] = {
5010 { 0x09, 0x0000, 0x0080 },
5011 { 0x19, 0x0000, 0x0224 }
5012 };
5013
5014 rtl_csi_access_enable_1(tp);
5015
5016 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5017
5018 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5019
5020 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5021 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5022 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5023 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5024 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5025 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5026 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5027 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5028
5029 RTL_W8(MaxTxPacketSize, EarlySize);
5030
5031 rtl_disable_clock_request(pdev);
5032
5033 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5034 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5035
5036 /* Adjust EEE LED frequency */
5037 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5038
5039 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5040 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5041 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5042}
5043
5044static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5045{
5046 void __iomem *ioaddr = tp->mmio_addr;
5047 struct pci_dev *pdev = tp->pci_dev;
5048
5049 rtl_csi_access_enable_2(tp);
5050
5051 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5052
5053 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5054 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5055 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5056 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5057 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5058 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5059 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5060 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5061 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5062 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5063
5064 RTL_W8(MaxTxPacketSize, EarlySize);
5065
5066 rtl_disable_clock_request(pdev);
5067
5068 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5069 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5070 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5071 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5072 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5073}
5074
5075static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5076{
5077 void __iomem *ioaddr = tp->mmio_addr;
5078 static const struct ephy_info e_info_8168f_1[] = {
5079 { 0x06, 0x00c0, 0x0020 },
5080 { 0x08, 0x0001, 0x0002 },
5081 { 0x09, 0x0000, 0x0080 },
5082 { 0x19, 0x0000, 0x0224 }
5083 };
5084
5085 rtl_hw_start_8168f(tp);
5086
5087 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5088
5089 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5090
5091 /* Adjust EEE LED frequency */
5092 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5093}
5094
5095static void rtl_hw_start_8411(struct rtl8169_private *tp)
5096{
5097 static const struct ephy_info e_info_8168f_1[] = {
5098 { 0x06, 0x00c0, 0x0020 },
5099 { 0x0f, 0xffff, 0x5200 },
5100 { 0x1e, 0x0000, 0x4000 },
5101 { 0x19, 0x0000, 0x0224 }
5102 };
5103
5104 rtl_hw_start_8168f(tp);
5105
5106 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5107
5108 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5109}
5110
5111static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5112{
5113 void __iomem *ioaddr = tp->mmio_addr;
5114 struct pci_dev *pdev = tp->pci_dev;
5115
5116 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5117 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5118 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5119 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5120
5121 rtl_csi_access_enable_1(tp);
5122
5123 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5124
5125 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5126 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5127
5128 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5129 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5130 RTL_W8(MaxTxPacketSize, EarlySize);
5131
5132 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5133 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5134
5135 /* Adjust EEE LED frequency */
5136 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5137
5138 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5139}
5140
5141static void rtl_hw_start_8168(struct net_device *dev)
5142{
5143 struct rtl8169_private *tp = netdev_priv(dev);
5144 void __iomem *ioaddr = tp->mmio_addr;
5145
5146 RTL_W8(Cfg9346, Cfg9346_Unlock);
5147
5148 RTL_W8(MaxTxPacketSize, TxPacketMax);
5149
5150 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5151
5152 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5153
5154 RTL_W16(CPlusCmd, tp->cp_cmd);
5155
5156 RTL_W16(IntrMitigate, 0x5151);
5157
5158 /* Work around for RxFIFO overflow. */
5159 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5160 tp->event_slow |= RxFIFOOver | PCSTimeout;
5161 tp->event_slow &= ~RxOverflow;
5162 }
5163
5164 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5165
5166 rtl_set_rx_mode(dev);
5167
5168 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5169 (InterFrameGap << TxInterFrameGapShift));
5170
5171 RTL_R8(IntrMask);
5172
5173 switch (tp->mac_version) {
5174 case RTL_GIGA_MAC_VER_11:
5175 rtl_hw_start_8168bb(tp);
5176 break;
5177
5178 case RTL_GIGA_MAC_VER_12:
5179 case RTL_GIGA_MAC_VER_17:
5180 rtl_hw_start_8168bef(tp);
5181 break;
5182
5183 case RTL_GIGA_MAC_VER_18:
5184 rtl_hw_start_8168cp_1(tp);
5185 break;
5186
5187 case RTL_GIGA_MAC_VER_19:
5188 rtl_hw_start_8168c_1(tp);
5189 break;
5190
5191 case RTL_GIGA_MAC_VER_20:
5192 rtl_hw_start_8168c_2(tp);
5193 break;
5194
5195 case RTL_GIGA_MAC_VER_21:
5196 rtl_hw_start_8168c_3(tp);
5197 break;
5198
5199 case RTL_GIGA_MAC_VER_22:
5200 rtl_hw_start_8168c_4(tp);
5201 break;
5202
5203 case RTL_GIGA_MAC_VER_23:
5204 rtl_hw_start_8168cp_2(tp);
5205 break;
5206
5207 case RTL_GIGA_MAC_VER_24:
5208 rtl_hw_start_8168cp_3(tp);
5209 break;
5210
5211 case RTL_GIGA_MAC_VER_25:
5212 case RTL_GIGA_MAC_VER_26:
5213 case RTL_GIGA_MAC_VER_27:
5214 rtl_hw_start_8168d(tp);
5215 break;
5216
5217 case RTL_GIGA_MAC_VER_28:
5218 rtl_hw_start_8168d_4(tp);
5219 break;
5220
5221 case RTL_GIGA_MAC_VER_31:
5222 rtl_hw_start_8168dp(tp);
5223 break;
5224
5225 case RTL_GIGA_MAC_VER_32:
5226 case RTL_GIGA_MAC_VER_33:
5227 rtl_hw_start_8168e_1(tp);
5228 break;
5229 case RTL_GIGA_MAC_VER_34:
5230 rtl_hw_start_8168e_2(tp);
5231 break;
5232
5233 case RTL_GIGA_MAC_VER_35:
5234 case RTL_GIGA_MAC_VER_36:
5235 rtl_hw_start_8168f_1(tp);
5236 break;
5237
5238 case RTL_GIGA_MAC_VER_38:
5239 rtl_hw_start_8411(tp);
5240 break;
5241
5242 case RTL_GIGA_MAC_VER_40:
5243 case RTL_GIGA_MAC_VER_41:
5244 rtl_hw_start_8168g_1(tp);
5245 break;
5246
5247 default:
5248 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5249 dev->name, tp->mac_version);
5250 break;
5251 }
5252
5253 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5254
5255 RTL_W8(Cfg9346, Cfg9346_Lock);
5256
5257 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5258}
5259
5260#define R810X_CPCMD_QUIRK_MASK (\
5261 EnableBist | \
5262 Mac_dbgo_oe | \
5263 Force_half_dup | \
5264 Force_rxflow_en | \
5265 Force_txflow_en | \
5266 Cxpl_dbg_sel | \
5267 ASF | \
5268 PktCntrDisable | \
5269 Mac_dbgo_sel)
5270
5271static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5272{
5273 void __iomem *ioaddr = tp->mmio_addr;
5274 struct pci_dev *pdev = tp->pci_dev;
5275 static const struct ephy_info e_info_8102e_1[] = {
5276 { 0x01, 0, 0x6e65 },
5277 { 0x02, 0, 0x091f },
5278 { 0x03, 0, 0xc2f9 },
5279 { 0x06, 0, 0xafb5 },
5280 { 0x07, 0, 0x0e00 },
5281 { 0x19, 0, 0xec80 },
5282 { 0x01, 0, 0x2e65 },
5283 { 0x01, 0, 0x6e65 }
5284 };
5285 u8 cfg1;
5286
5287 rtl_csi_access_enable_2(tp);
5288
5289 RTL_W8(DBG_REG, FIX_NAK_1);
5290
5291 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5292
5293 RTL_W8(Config1,
5294 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5295 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5296
5297 cfg1 = RTL_R8(Config1);
5298 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5299 RTL_W8(Config1, cfg1 & ~LEDS0);
5300
5301 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5302}
5303
5304static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5305{
5306 void __iomem *ioaddr = tp->mmio_addr;
5307 struct pci_dev *pdev = tp->pci_dev;
5308
5309 rtl_csi_access_enable_2(tp);
5310
5311 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5312
5313 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5314 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5315}
5316
5317static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5318{
5319 rtl_hw_start_8102e_2(tp);
5320
5321 rtl_ephy_write(tp, 0x03, 0xc2f9);
5322}
5323
5324static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5325{
5326 void __iomem *ioaddr = tp->mmio_addr;
5327 static const struct ephy_info e_info_8105e_1[] = {
5328 { 0x07, 0, 0x4000 },
5329 { 0x19, 0, 0x0200 },
5330 { 0x19, 0, 0x0020 },
5331 { 0x1e, 0, 0x2000 },
5332 { 0x03, 0, 0x0001 },
5333 { 0x19, 0, 0x0100 },
5334 { 0x19, 0, 0x0004 },
5335 { 0x0a, 0, 0x0020 }
5336 };
5337
5338 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5339 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5340
5341 /* Disable Early Tally Counter */
5342 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5343
5344 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5345 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5346
5347 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5348}
5349
5350static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5351{
5352 rtl_hw_start_8105e_1(tp);
5353 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5354}
5355
5356static void rtl_hw_start_8402(struct rtl8169_private *tp)
5357{
5358 void __iomem *ioaddr = tp->mmio_addr;
5359 static const struct ephy_info e_info_8402[] = {
5360 { 0x19, 0xffff, 0xff64 },
5361 { 0x1e, 0, 0x4000 }
5362 };
5363
5364 rtl_csi_access_enable_2(tp);
5365
5366 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5367 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5368
5369 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5370 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5371
5372 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5373
5374 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5375
5376 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5377 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5378 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5379 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5380 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5381 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5382 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5383}
5384
5385static void rtl_hw_start_8106(struct rtl8169_private *tp)
5386{
5387 void __iomem *ioaddr = tp->mmio_addr;
5388
5389 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5390 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5391
5392 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5393 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5394 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5395}
5396
5397static void rtl_hw_start_8101(struct net_device *dev)
5398{
5399 struct rtl8169_private *tp = netdev_priv(dev);
5400 void __iomem *ioaddr = tp->mmio_addr;
5401 struct pci_dev *pdev = tp->pci_dev;
5402
5403 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5404 tp->event_slow &= ~RxFIFOOver;
5405
5406 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5407 tp->mac_version == RTL_GIGA_MAC_VER_16) {
5408 int cap = pci_pcie_cap(pdev);
5409
5410 if (cap) {
5411 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
5412 PCI_EXP_DEVCTL_NOSNOOP_EN);
5413 }
5414 }
5415
5416 RTL_W8(Cfg9346, Cfg9346_Unlock);
5417
5418 switch (tp->mac_version) {
5419 case RTL_GIGA_MAC_VER_07:
5420 rtl_hw_start_8102e_1(tp);
5421 break;
5422
5423 case RTL_GIGA_MAC_VER_08:
5424 rtl_hw_start_8102e_3(tp);
5425 break;
5426
5427 case RTL_GIGA_MAC_VER_09:
5428 rtl_hw_start_8102e_2(tp);
5429 break;
5430
5431 case RTL_GIGA_MAC_VER_29:
5432 rtl_hw_start_8105e_1(tp);
5433 break;
5434 case RTL_GIGA_MAC_VER_30:
5435 rtl_hw_start_8105e_2(tp);
5436 break;
5437
5438 case RTL_GIGA_MAC_VER_37:
5439 rtl_hw_start_8402(tp);
5440 break;
5441
5442 case RTL_GIGA_MAC_VER_39:
5443 rtl_hw_start_8106(tp);
5444 break;
5445 }
5446
5447 RTL_W8(Cfg9346, Cfg9346_Lock);
5448
5449 RTL_W8(MaxTxPacketSize, TxPacketMax);
5450
5451 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5452
5453 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5454 RTL_W16(CPlusCmd, tp->cp_cmd);
5455
5456 RTL_W16(IntrMitigate, 0x0000);
5457
5458 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5459
5460 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5461 rtl_set_rx_tx_config_registers(tp);
5462
5463 RTL_R8(IntrMask);
5464
5465 rtl_set_rx_mode(dev);
5466
5467 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5468}
5469
5470static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5471{
5472 struct rtl8169_private *tp = netdev_priv(dev);
5473
5474 if (new_mtu < ETH_ZLEN ||
5475 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5476 return -EINVAL;
5477
5478 if (new_mtu > ETH_DATA_LEN)
5479 rtl_hw_jumbo_enable(tp);
5480 else
5481 rtl_hw_jumbo_disable(tp);
5482
5483 dev->mtu = new_mtu;
5484 netdev_update_features(dev);
5485
5486 return 0;
5487}
5488
5489static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5490{
5491 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5492 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5493}
5494
5495static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5496 void **data_buff, struct RxDesc *desc)
5497{
5498 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5499 DMA_FROM_DEVICE);
5500
5501 kfree(*data_buff);
5502 *data_buff = NULL;
5503 rtl8169_make_unusable_by_asic(desc);
5504}
5505
5506static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5507{
5508 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5509
5510 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5511}
5512
5513static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5514 u32 rx_buf_sz)
5515{
5516 desc->addr = cpu_to_le64(mapping);
5517 wmb();
5518 rtl8169_mark_to_asic(desc, rx_buf_sz);
5519}
5520
5521static inline void *rtl8169_align(void *data)
5522{
5523 return (void *)ALIGN((long)data, 16);
5524}
5525
5526static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5527 struct RxDesc *desc)
5528{
5529 void *data;
5530 dma_addr_t mapping;
5531 struct device *d = &tp->pci_dev->dev;
5532 struct net_device *dev = tp->dev;
5533 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5534
5535 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5536 if (!data)
5537 return NULL;
5538
5539 if (rtl8169_align(data) != data) {
5540 kfree(data);
5541 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5542 if (!data)
5543 return NULL;
5544 }
5545
5546 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5547 DMA_FROM_DEVICE);
5548 if (unlikely(dma_mapping_error(d, mapping))) {
5549 if (net_ratelimit())
5550 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5551 goto err_out;
5552 }
5553
5554 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5555 return data;
5556
5557err_out:
5558 kfree(data);
5559 return NULL;
5560}
5561
5562static void rtl8169_rx_clear(struct rtl8169_private *tp)
5563{
5564 unsigned int i;
5565
5566 for (i = 0; i < NUM_RX_DESC; i++) {
5567 if (tp->Rx_databuff[i]) {
5568 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5569 tp->RxDescArray + i);
5570 }
5571 }
5572}
5573
5574static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5575{
5576 desc->opts1 |= cpu_to_le32(RingEnd);
5577}
5578
5579static int rtl8169_rx_fill(struct rtl8169_private *tp)
5580{
5581 unsigned int i;
5582
5583 for (i = 0; i < NUM_RX_DESC; i++) {
5584 void *data;
5585
5586 if (tp->Rx_databuff[i])
5587 continue;
5588
5589 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5590 if (!data) {
5591 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5592 goto err_out;
5593 }
5594 tp->Rx_databuff[i] = data;
5595 }
5596
5597 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5598 return 0;
5599
5600err_out:
5601 rtl8169_rx_clear(tp);
5602 return -ENOMEM;
5603}
5604
5605static int rtl8169_init_ring(struct net_device *dev)
5606{
5607 struct rtl8169_private *tp = netdev_priv(dev);
5608
5609 rtl8169_init_ring_indexes(tp);
5610
5611 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5612 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5613
5614 return rtl8169_rx_fill(tp);
5615}
5616
5617static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5618 struct TxDesc *desc)
5619{
5620 unsigned int len = tx_skb->len;
5621
5622 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5623
5624 desc->opts1 = 0x00;
5625 desc->opts2 = 0x00;
5626 desc->addr = 0x00;
5627 tx_skb->len = 0;
5628}
5629
5630static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5631 unsigned int n)
5632{
5633 unsigned int i;
5634
5635 for (i = 0; i < n; i++) {
5636 unsigned int entry = (start + i) % NUM_TX_DESC;
5637 struct ring_info *tx_skb = tp->tx_skb + entry;
5638 unsigned int len = tx_skb->len;
5639
5640 if (len) {
5641 struct sk_buff *skb = tx_skb->skb;
5642
5643 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5644 tp->TxDescArray + entry);
5645 if (skb) {
5646 tp->dev->stats.tx_dropped++;
5647 dev_kfree_skb(skb);
5648 tx_skb->skb = NULL;
5649 }
5650 }
5651 }
5652}
5653
5654static void rtl8169_tx_clear(struct rtl8169_private *tp)
5655{
5656 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5657 tp->cur_tx = tp->dirty_tx = 0;
5658 netdev_reset_queue(tp->dev);
5659}
5660
5661static void rtl_reset_work(struct rtl8169_private *tp)
5662{
5663 struct net_device *dev = tp->dev;
5664 int i;
5665
5666 napi_disable(&tp->napi);
5667 netif_stop_queue(dev);
5668 synchronize_sched();
5669
5670 rtl8169_hw_reset(tp);
5671
5672 for (i = 0; i < NUM_RX_DESC; i++)
5673 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5674
5675 rtl8169_tx_clear(tp);
5676 rtl8169_init_ring_indexes(tp);
5677
5678 napi_enable(&tp->napi);
5679 rtl_hw_start(dev);
5680 netif_wake_queue(dev);
5681 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5682}
5683
5684static void rtl8169_tx_timeout(struct net_device *dev)
5685{
5686 struct rtl8169_private *tp = netdev_priv(dev);
5687
5688 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5689}
5690
5691static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5692 u32 *opts)
5693{
5694 struct skb_shared_info *info = skb_shinfo(skb);
5695 unsigned int cur_frag, entry;
5696 struct TxDesc * uninitialized_var(txd);
5697 struct device *d = &tp->pci_dev->dev;
5698
5699 entry = tp->cur_tx;
5700 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5701 const skb_frag_t *frag = info->frags + cur_frag;
5702 dma_addr_t mapping;
5703 u32 status, len;
5704 void *addr;
5705
5706 entry = (entry + 1) % NUM_TX_DESC;
5707
5708 txd = tp->TxDescArray + entry;
5709 len = skb_frag_size(frag);
5710 addr = skb_frag_address(frag);
5711 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5712 if (unlikely(dma_mapping_error(d, mapping))) {
5713 if (net_ratelimit())
5714 netif_err(tp, drv, tp->dev,
5715 "Failed to map TX fragments DMA!\n");
5716 goto err_out;
5717 }
5718
5719 /* Anti gcc 2.95.3 bugware (sic) */
5720 status = opts[0] | len |
5721 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5722
5723 txd->opts1 = cpu_to_le32(status);
5724 txd->opts2 = cpu_to_le32(opts[1]);
5725 txd->addr = cpu_to_le64(mapping);
5726
5727 tp->tx_skb[entry].len = len;
5728 }
5729
5730 if (cur_frag) {
5731 tp->tx_skb[entry].skb = skb;
5732 txd->opts1 |= cpu_to_le32(LastFrag);
5733 }
5734
5735 return cur_frag;
5736
5737err_out:
5738 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5739 return -EIO;
5740}
5741
5742static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5743 struct sk_buff *skb, u32 *opts)
5744{
5745 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5746 u32 mss = skb_shinfo(skb)->gso_size;
5747 int offset = info->opts_offset;
5748
5749 if (mss) {
5750 opts[0] |= TD_LSO;
5751 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5752 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5753 const struct iphdr *ip = ip_hdr(skb);
5754
5755 if (ip->protocol == IPPROTO_TCP)
5756 opts[offset] |= info->checksum.tcp;
5757 else if (ip->protocol == IPPROTO_UDP)
5758 opts[offset] |= info->checksum.udp;
5759 else
5760 WARN_ON_ONCE(1);
5761 }
5762}
5763
5764static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5765 struct net_device *dev)
5766{
5767 struct rtl8169_private *tp = netdev_priv(dev);
5768 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5769 struct TxDesc *txd = tp->TxDescArray + entry;
5770 void __iomem *ioaddr = tp->mmio_addr;
5771 struct device *d = &tp->pci_dev->dev;
5772 dma_addr_t mapping;
5773 u32 status, len;
5774 u32 opts[2];
5775 int frags;
5776
5777 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5778 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5779 goto err_stop_0;
5780 }
5781
5782 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5783 goto err_stop_0;
5784
5785 len = skb_headlen(skb);
5786 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5787 if (unlikely(dma_mapping_error(d, mapping))) {
5788 if (net_ratelimit())
5789 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5790 goto err_dma_0;
5791 }
5792
5793 tp->tx_skb[entry].len = len;
5794 txd->addr = cpu_to_le64(mapping);
5795
5796 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5797 opts[0] = DescOwn;
5798
5799 rtl8169_tso_csum(tp, skb, opts);
5800
5801 frags = rtl8169_xmit_frags(tp, skb, opts);
5802 if (frags < 0)
5803 goto err_dma_1;
5804 else if (frags)
5805 opts[0] |= FirstFrag;
5806 else {
5807 opts[0] |= FirstFrag | LastFrag;
5808 tp->tx_skb[entry].skb = skb;
5809 }
5810
5811 txd->opts2 = cpu_to_le32(opts[1]);
5812
5813 netdev_sent_queue(dev, skb->len);
5814
5815 skb_tx_timestamp(skb);
5816
5817 wmb();
5818
5819 /* Anti gcc 2.95.3 bugware (sic) */
5820 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5821 txd->opts1 = cpu_to_le32(status);
5822
5823 tp->cur_tx += frags + 1;
5824
5825 wmb();
5826
5827 RTL_W8(TxPoll, NPQ);
5828
5829 mmiowb();
5830
5831 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5832 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5833 * not miss a ring update when it notices a stopped queue.
5834 */
5835 smp_wmb();
5836 netif_stop_queue(dev);
5837 /* Sync with rtl_tx:
5838 * - publish queue status and cur_tx ring index (write barrier)
5839 * - refresh dirty_tx ring index (read barrier).
5840 * May the current thread have a pessimistic view of the ring
5841 * status and forget to wake up queue, a racing rtl_tx thread
5842 * can't.
5843 */
5844 smp_mb();
5845 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5846 netif_wake_queue(dev);
5847 }
5848
5849 return NETDEV_TX_OK;
5850
5851err_dma_1:
5852 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5853err_dma_0:
5854 dev_kfree_skb(skb);
5855 dev->stats.tx_dropped++;
5856 return NETDEV_TX_OK;
5857
5858err_stop_0:
5859 netif_stop_queue(dev);
5860 dev->stats.tx_dropped++;
5861 return NETDEV_TX_BUSY;
5862}
5863
5864static void rtl8169_pcierr_interrupt(struct net_device *dev)
5865{
5866 struct rtl8169_private *tp = netdev_priv(dev);
5867 struct pci_dev *pdev = tp->pci_dev;
5868 u16 pci_status, pci_cmd;
5869
5870 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5871 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5872
5873 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5874 pci_cmd, pci_status);
5875
5876 /*
5877 * The recovery sequence below admits a very elaborated explanation:
5878 * - it seems to work;
5879 * - I did not see what else could be done;
5880 * - it makes iop3xx happy.
5881 *
5882 * Feel free to adjust to your needs.
5883 */
5884 if (pdev->broken_parity_status)
5885 pci_cmd &= ~PCI_COMMAND_PARITY;
5886 else
5887 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5888
5889 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5890
5891 pci_write_config_word(pdev, PCI_STATUS,
5892 pci_status & (PCI_STATUS_DETECTED_PARITY |
5893 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5894 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5895
5896 /* The infamous DAC f*ckup only happens at boot time */
5897 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5898 void __iomem *ioaddr = tp->mmio_addr;
5899
5900 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5901 tp->cp_cmd &= ~PCIDAC;
5902 RTL_W16(CPlusCmd, tp->cp_cmd);
5903 dev->features &= ~NETIF_F_HIGHDMA;
5904 }
5905
5906 rtl8169_hw_reset(tp);
5907
5908 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5909}
5910
5911struct rtl_txc {
5912 int packets;
5913 int bytes;
5914};
5915
5916static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5917{
5918 struct rtl8169_stats *tx_stats = &tp->tx_stats;
5919 unsigned int dirty_tx, tx_left;
5920 struct rtl_txc txc = { 0, 0 };
5921
5922 dirty_tx = tp->dirty_tx;
5923 smp_rmb();
5924 tx_left = tp->cur_tx - dirty_tx;
5925
5926 while (tx_left > 0) {
5927 unsigned int entry = dirty_tx % NUM_TX_DESC;
5928 struct ring_info *tx_skb = tp->tx_skb + entry;
5929 u32 status;
5930
5931 rmb();
5932 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5933 if (status & DescOwn)
5934 break;
5935
5936 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5937 tp->TxDescArray + entry);
5938 if (status & LastFrag) {
5939 struct sk_buff *skb = tx_skb->skb;
5940
5941 txc.packets++;
5942 txc.bytes += skb->len;
5943 dev_kfree_skb(skb);
5944 tx_skb->skb = NULL;
5945 }
5946 dirty_tx++;
5947 tx_left--;
5948 }
5949
5950 u64_stats_update_begin(&tx_stats->syncp);
5951 tx_stats->packets += txc.packets;
5952 tx_stats->bytes += txc.bytes;
5953 u64_stats_update_end(&tx_stats->syncp);
5954
5955 netdev_completed_queue(dev, txc.packets, txc.bytes);
5956
5957 if (tp->dirty_tx != dirty_tx) {
5958 tp->dirty_tx = dirty_tx;
5959 /* Sync with rtl8169_start_xmit:
5960 * - publish dirty_tx ring index (write barrier)
5961 * - refresh cur_tx ring index and queue status (read barrier)
5962 * May the current thread miss the stopped queue condition,
5963 * a racing xmit thread can only have a right view of the
5964 * ring status.
5965 */
5966 smp_mb();
5967 if (netif_queue_stopped(dev) &&
5968 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5969 netif_wake_queue(dev);
5970 }
5971 /*
5972 * 8168 hack: TxPoll requests are lost when the Tx packets are
5973 * too close. Let's kick an extra TxPoll request when a burst
5974 * of start_xmit activity is detected (if it is not detected,
5975 * it is slow enough). -- FR
5976 */
5977 if (tp->cur_tx != dirty_tx) {
5978 void __iomem *ioaddr = tp->mmio_addr;
5979
5980 RTL_W8(TxPoll, NPQ);
5981 }
5982 }
5983}
5984
5985static inline int rtl8169_fragmented_frame(u32 status)
5986{
5987 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5988}
5989
5990static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5991{
5992 u32 status = opts1 & RxProtoMask;
5993
5994 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5995 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5996 skb->ip_summed = CHECKSUM_UNNECESSARY;
5997 else
5998 skb_checksum_none_assert(skb);
5999}
6000
6001static struct sk_buff *rtl8169_try_rx_copy(void *data,
6002 struct rtl8169_private *tp,
6003 int pkt_size,
6004 dma_addr_t addr)
6005{
6006 struct sk_buff *skb;
6007 struct device *d = &tp->pci_dev->dev;
6008
6009 data = rtl8169_align(data);
6010 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6011 prefetch(data);
6012 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6013 if (skb)
6014 memcpy(skb->data, data, pkt_size);
6015 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6016
6017 return skb;
6018}
6019
6020static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6021{
6022 unsigned int cur_rx, rx_left;
6023 unsigned int count;
6024
6025 cur_rx = tp->cur_rx;
6026 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
6027 rx_left = min(rx_left, budget);
6028
6029 for (; rx_left > 0; rx_left--, cur_rx++) {
6030 unsigned int entry = cur_rx % NUM_RX_DESC;
6031 struct RxDesc *desc = tp->RxDescArray + entry;
6032 u32 status;
6033
6034 rmb();
6035 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6036
6037 if (status & DescOwn)
6038 break;
6039 if (unlikely(status & RxRES)) {
6040 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6041 status);
6042 dev->stats.rx_errors++;
6043 if (status & (RxRWT | RxRUNT))
6044 dev->stats.rx_length_errors++;
6045 if (status & RxCRC)
6046 dev->stats.rx_crc_errors++;
6047 if (status & RxFOVF) {
6048 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6049 dev->stats.rx_fifo_errors++;
6050 }
6051 if ((status & (RxRUNT | RxCRC)) &&
6052 !(status & (RxRWT | RxFOVF)) &&
6053 (dev->features & NETIF_F_RXALL))
6054 goto process_pkt;
6055
6056 rtl8169_mark_to_asic(desc, rx_buf_sz);
6057 } else {
6058 struct sk_buff *skb;
6059 dma_addr_t addr;
6060 int pkt_size;
6061
6062process_pkt:
6063 addr = le64_to_cpu(desc->addr);
6064 if (likely(!(dev->features & NETIF_F_RXFCS)))
6065 pkt_size = (status & 0x00003fff) - 4;
6066 else
6067 pkt_size = status & 0x00003fff;
6068
6069 /*
6070 * The driver does not support incoming fragmented
6071 * frames. They are seen as a symptom of over-mtu
6072 * sized frames.
6073 */
6074 if (unlikely(rtl8169_fragmented_frame(status))) {
6075 dev->stats.rx_dropped++;
6076 dev->stats.rx_length_errors++;
6077 rtl8169_mark_to_asic(desc, rx_buf_sz);
6078 continue;
6079 }
6080
6081 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6082 tp, pkt_size, addr);
6083 rtl8169_mark_to_asic(desc, rx_buf_sz);
6084 if (!skb) {
6085 dev->stats.rx_dropped++;
6086 continue;
6087 }
6088
6089 rtl8169_rx_csum(skb, status);
6090 skb_put(skb, pkt_size);
6091 skb->protocol = eth_type_trans(skb, dev);
6092
6093 rtl8169_rx_vlan_tag(desc, skb);
6094
6095 napi_gro_receive(&tp->napi, skb);
6096
6097 u64_stats_update_begin(&tp->rx_stats.syncp);
6098 tp->rx_stats.packets++;
6099 tp->rx_stats.bytes += pkt_size;
6100 u64_stats_update_end(&tp->rx_stats.syncp);
6101 }
6102
6103 /* Work around for AMD plateform. */
6104 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
6105 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
6106 desc->opts2 = 0;
6107 cur_rx++;
6108 }
6109 }
6110
6111 count = cur_rx - tp->cur_rx;
6112 tp->cur_rx = cur_rx;
6113
6114 tp->dirty_rx += count;
6115
6116 return count;
6117}
6118
6119static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6120{
6121 struct net_device *dev = dev_instance;
6122 struct rtl8169_private *tp = netdev_priv(dev);
6123 int handled = 0;
6124 u16 status;
6125
6126 status = rtl_get_events(tp);
6127 if (status && status != 0xffff) {
6128 status &= RTL_EVENT_NAPI | tp->event_slow;
6129 if (status) {
6130 handled = 1;
6131
6132 rtl_irq_disable(tp);
6133 napi_schedule(&tp->napi);
6134 }
6135 }
6136 return IRQ_RETVAL(handled);
6137}
6138
6139/*
6140 * Workqueue context.
6141 */
6142static void rtl_slow_event_work(struct rtl8169_private *tp)
6143{
6144 struct net_device *dev = tp->dev;
6145 u16 status;
6146
6147 status = rtl_get_events(tp) & tp->event_slow;
6148 rtl_ack_events(tp, status);
6149
6150 if (unlikely(status & RxFIFOOver)) {
6151 switch (tp->mac_version) {
6152 /* Work around for rx fifo overflow */
6153 case RTL_GIGA_MAC_VER_11:
6154 netif_stop_queue(dev);
6155 /* XXX - Hack alert. See rtl_task(). */
6156 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6157 default:
6158 break;
6159 }
6160 }
6161
6162 if (unlikely(status & SYSErr))
6163 rtl8169_pcierr_interrupt(dev);
6164
6165 if (status & LinkChg)
6166 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6167
6168 rtl_irq_enable_all(tp);
6169}
6170
6171static void rtl_task(struct work_struct *work)
6172{
6173 static const struct {
6174 int bitnr;
6175 void (*action)(struct rtl8169_private *);
6176 } rtl_work[] = {
6177 /* XXX - keep rtl_slow_event_work() as first element. */
6178 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6179 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6180 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6181 };
6182 struct rtl8169_private *tp =
6183 container_of(work, struct rtl8169_private, wk.work);
6184 struct net_device *dev = tp->dev;
6185 int i;
6186
6187 rtl_lock_work(tp);
6188
6189 if (!netif_running(dev) ||
6190 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6191 goto out_unlock;
6192
6193 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6194 bool pending;
6195
6196 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6197 if (pending)
6198 rtl_work[i].action(tp);
6199 }
6200
6201out_unlock:
6202 rtl_unlock_work(tp);
6203}
6204
6205static int rtl8169_poll(struct napi_struct *napi, int budget)
6206{
6207 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6208 struct net_device *dev = tp->dev;
6209 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6210 int work_done= 0;
6211 u16 status;
6212
6213 status = rtl_get_events(tp);
6214 rtl_ack_events(tp, status & ~tp->event_slow);
6215
6216 if (status & RTL_EVENT_NAPI_RX)
6217 work_done = rtl_rx(dev, tp, (u32) budget);
6218
6219 if (status & RTL_EVENT_NAPI_TX)
6220 rtl_tx(dev, tp);
6221
6222 if (status & tp->event_slow) {
6223 enable_mask &= ~tp->event_slow;
6224
6225 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6226 }
6227
6228 if (work_done < budget) {
6229 napi_complete(napi);
6230
6231 rtl_irq_enable(tp, enable_mask);
6232 mmiowb();
6233 }
6234
6235 return work_done;
6236}
6237
6238static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6239{
6240 struct rtl8169_private *tp = netdev_priv(dev);
6241
6242 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6243 return;
6244
6245 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6246 RTL_W32(RxMissed, 0);
6247}
6248
6249static void rtl8169_down(struct net_device *dev)
6250{
6251 struct rtl8169_private *tp = netdev_priv(dev);
6252 void __iomem *ioaddr = tp->mmio_addr;
6253
6254 del_timer_sync(&tp->timer);
6255
6256 napi_disable(&tp->napi);
6257 netif_stop_queue(dev);
6258
6259 rtl8169_hw_reset(tp);
6260 /*
6261 * At this point device interrupts can not be enabled in any function,
6262 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6263 * and napi is disabled (rtl8169_poll).
6264 */
6265 rtl8169_rx_missed(dev, ioaddr);
6266
6267 /* Give a racing hard_start_xmit a few cycles to complete. */
6268 synchronize_sched();
6269
6270 rtl8169_tx_clear(tp);
6271
6272 rtl8169_rx_clear(tp);
6273
6274 rtl_pll_power_down(tp);
6275}
6276
6277static int rtl8169_close(struct net_device *dev)
6278{
6279 struct rtl8169_private *tp = netdev_priv(dev);
6280 struct pci_dev *pdev = tp->pci_dev;
6281
6282 pm_runtime_get_sync(&pdev->dev);
6283
6284 /* Update counters before going down */
6285 rtl8169_update_counters(dev);
6286
6287 rtl_lock_work(tp);
6288 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6289
6290 rtl8169_down(dev);
6291 rtl_unlock_work(tp);
6292
6293 free_irq(pdev->irq, dev);
6294
6295 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6296 tp->RxPhyAddr);
6297 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6298 tp->TxPhyAddr);
6299 tp->TxDescArray = NULL;
6300 tp->RxDescArray = NULL;
6301
6302 pm_runtime_put_sync(&pdev->dev);
6303
6304 return 0;
6305}
6306
6307#ifdef CONFIG_NET_POLL_CONTROLLER
6308static void rtl8169_netpoll(struct net_device *dev)
6309{
6310 struct rtl8169_private *tp = netdev_priv(dev);
6311
6312 rtl8169_interrupt(tp->pci_dev->irq, dev);
6313}
6314#endif
6315
6316static int rtl_open(struct net_device *dev)
6317{
6318 struct rtl8169_private *tp = netdev_priv(dev);
6319 void __iomem *ioaddr = tp->mmio_addr;
6320 struct pci_dev *pdev = tp->pci_dev;
6321 int retval = -ENOMEM;
6322
6323 pm_runtime_get_sync(&pdev->dev);
6324
6325 /*
6326 * Rx and Tx descriptors needs 256 bytes alignment.
6327 * dma_alloc_coherent provides more.
6328 */
6329 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6330 &tp->TxPhyAddr, GFP_KERNEL);
6331 if (!tp->TxDescArray)
6332 goto err_pm_runtime_put;
6333
6334 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6335 &tp->RxPhyAddr, GFP_KERNEL);
6336 if (!tp->RxDescArray)
6337 goto err_free_tx_0;
6338
6339 retval = rtl8169_init_ring(dev);
6340 if (retval < 0)
6341 goto err_free_rx_1;
6342
6343 INIT_WORK(&tp->wk.work, rtl_task);
6344
6345 smp_mb();
6346
6347 rtl_request_firmware(tp);
6348
6349 retval = request_irq(pdev->irq, rtl8169_interrupt,
6350 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6351 dev->name, dev);
6352 if (retval < 0)
6353 goto err_release_fw_2;
6354
6355 rtl_lock_work(tp);
6356
6357 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6358
6359 napi_enable(&tp->napi);
6360
6361 rtl8169_init_phy(dev, tp);
6362
6363 __rtl8169_set_features(dev, dev->features);
6364
6365 rtl_pll_power_up(tp);
6366
6367 rtl_hw_start(dev);
6368
6369 netif_start_queue(dev);
6370
6371 rtl_unlock_work(tp);
6372
6373 tp->saved_wolopts = 0;
6374 pm_runtime_put_noidle(&pdev->dev);
6375
6376 rtl8169_check_link_status(dev, tp, ioaddr);
6377out:
6378 return retval;
6379
6380err_release_fw_2:
6381 rtl_release_firmware(tp);
6382 rtl8169_rx_clear(tp);
6383err_free_rx_1:
6384 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6385 tp->RxPhyAddr);
6386 tp->RxDescArray = NULL;
6387err_free_tx_0:
6388 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6389 tp->TxPhyAddr);
6390 tp->TxDescArray = NULL;
6391err_pm_runtime_put:
6392 pm_runtime_put_noidle(&pdev->dev);
6393 goto out;
6394}
6395
6396static struct rtnl_link_stats64 *
6397rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6398{
6399 struct rtl8169_private *tp = netdev_priv(dev);
6400 void __iomem *ioaddr = tp->mmio_addr;
6401 unsigned int start;
6402
6403 if (netif_running(dev))
6404 rtl8169_rx_missed(dev, ioaddr);
6405
6406 do {
6407 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6408 stats->rx_packets = tp->rx_stats.packets;
6409 stats->rx_bytes = tp->rx_stats.bytes;
6410 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6411
6412
6413 do {
6414 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6415 stats->tx_packets = tp->tx_stats.packets;
6416 stats->tx_bytes = tp->tx_stats.bytes;
6417 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6418
6419 stats->rx_dropped = dev->stats.rx_dropped;
6420 stats->tx_dropped = dev->stats.tx_dropped;
6421 stats->rx_length_errors = dev->stats.rx_length_errors;
6422 stats->rx_errors = dev->stats.rx_errors;
6423 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6424 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6425 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6426
6427 return stats;
6428}
6429
6430static void rtl8169_net_suspend(struct net_device *dev)
6431{
6432 struct rtl8169_private *tp = netdev_priv(dev);
6433
6434 if (!netif_running(dev))
6435 return;
6436
6437 netif_device_detach(dev);
6438 netif_stop_queue(dev);
6439
6440 rtl_lock_work(tp);
6441 napi_disable(&tp->napi);
6442 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6443 rtl_unlock_work(tp);
6444
6445 rtl_pll_power_down(tp);
6446}
6447
6448#ifdef CONFIG_PM
6449
6450static int rtl8169_suspend(struct device *device)
6451{
6452 struct pci_dev *pdev = to_pci_dev(device);
6453 struct net_device *dev = pci_get_drvdata(pdev);
6454
6455 rtl8169_net_suspend(dev);
6456
6457 return 0;
6458}
6459
6460static void __rtl8169_resume(struct net_device *dev)
6461{
6462 struct rtl8169_private *tp = netdev_priv(dev);
6463
6464 netif_device_attach(dev);
6465
6466 rtl_pll_power_up(tp);
6467
6468 rtl_lock_work(tp);
6469 napi_enable(&tp->napi);
6470 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6471 rtl_unlock_work(tp);
6472
6473 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6474}
6475
6476static int rtl8169_resume(struct device *device)
6477{
6478 struct pci_dev *pdev = to_pci_dev(device);
6479 struct net_device *dev = pci_get_drvdata(pdev);
6480 struct rtl8169_private *tp = netdev_priv(dev);
6481
6482 rtl8169_init_phy(dev, tp);
6483
6484 if (netif_running(dev))
6485 __rtl8169_resume(dev);
6486
6487 return 0;
6488}
6489
6490static int rtl8169_runtime_suspend(struct device *device)
6491{
6492 struct pci_dev *pdev = to_pci_dev(device);
6493 struct net_device *dev = pci_get_drvdata(pdev);
6494 struct rtl8169_private *tp = netdev_priv(dev);
6495
6496 if (!tp->TxDescArray)
6497 return 0;
6498
6499 rtl_lock_work(tp);
6500 tp->saved_wolopts = __rtl8169_get_wol(tp);
6501 __rtl8169_set_wol(tp, WAKE_ANY);
6502 rtl_unlock_work(tp);
6503
6504 rtl8169_net_suspend(dev);
6505
6506 return 0;
6507}
6508
6509static int rtl8169_runtime_resume(struct device *device)
6510{
6511 struct pci_dev *pdev = to_pci_dev(device);
6512 struct net_device *dev = pci_get_drvdata(pdev);
6513 struct rtl8169_private *tp = netdev_priv(dev);
6514
6515 if (!tp->TxDescArray)
6516 return 0;
6517
6518 rtl_lock_work(tp);
6519 __rtl8169_set_wol(tp, tp->saved_wolopts);
6520 tp->saved_wolopts = 0;
6521 rtl_unlock_work(tp);
6522
6523 rtl8169_init_phy(dev, tp);
6524
6525 __rtl8169_resume(dev);
6526
6527 return 0;
6528}
6529
6530static int rtl8169_runtime_idle(struct device *device)
6531{
6532 struct pci_dev *pdev = to_pci_dev(device);
6533 struct net_device *dev = pci_get_drvdata(pdev);
6534 struct rtl8169_private *tp = netdev_priv(dev);
6535
6536 return tp->TxDescArray ? -EBUSY : 0;
6537}
6538
6539static const struct dev_pm_ops rtl8169_pm_ops = {
6540 .suspend = rtl8169_suspend,
6541 .resume = rtl8169_resume,
6542 .freeze = rtl8169_suspend,
6543 .thaw = rtl8169_resume,
6544 .poweroff = rtl8169_suspend,
6545 .restore = rtl8169_resume,
6546 .runtime_suspend = rtl8169_runtime_suspend,
6547 .runtime_resume = rtl8169_runtime_resume,
6548 .runtime_idle = rtl8169_runtime_idle,
6549};
6550
6551#define RTL8169_PM_OPS (&rtl8169_pm_ops)
6552
6553#else /* !CONFIG_PM */
6554
6555#define RTL8169_PM_OPS NULL
6556
6557#endif /* !CONFIG_PM */
6558
6559static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6560{
6561 void __iomem *ioaddr = tp->mmio_addr;
6562
6563 /* WoL fails with 8168b when the receiver is disabled. */
6564 switch (tp->mac_version) {
6565 case RTL_GIGA_MAC_VER_11:
6566 case RTL_GIGA_MAC_VER_12:
6567 case RTL_GIGA_MAC_VER_17:
6568 pci_clear_master(tp->pci_dev);
6569
6570 RTL_W8(ChipCmd, CmdRxEnb);
6571 /* PCI commit */
6572 RTL_R8(ChipCmd);
6573 break;
6574 default:
6575 break;
6576 }
6577}
6578
6579static void rtl_shutdown(struct pci_dev *pdev)
6580{
6581 struct net_device *dev = pci_get_drvdata(pdev);
6582 struct rtl8169_private *tp = netdev_priv(dev);
6583 struct device *d = &pdev->dev;
6584
6585 pm_runtime_get_sync(d);
6586
6587 rtl8169_net_suspend(dev);
6588
6589 /* Restore original MAC address */
6590 rtl_rar_set(tp, dev->perm_addr);
6591
6592 rtl8169_hw_reset(tp);
6593
6594 if (system_state == SYSTEM_POWER_OFF) {
6595 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6596 rtl_wol_suspend_quirk(tp);
6597 rtl_wol_shutdown_quirk(tp);
6598 }
6599
6600 pci_wake_from_d3(pdev, true);
6601 pci_set_power_state(pdev, PCI_D3hot);
6602 }
6603
6604 pm_runtime_put_noidle(d);
6605}
6606
6607static void __devexit rtl_remove_one(struct pci_dev *pdev)
6608{
6609 struct net_device *dev = pci_get_drvdata(pdev);
6610 struct rtl8169_private *tp = netdev_priv(dev);
6611
6612 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6613 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6614 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6615 rtl8168_driver_stop(tp);
6616 }
6617
6618 cancel_work_sync(&tp->wk.work);
6619
6620 netif_napi_del(&tp->napi);
6621
6622 unregister_netdev(dev);
6623
6624 rtl_release_firmware(tp);
6625
6626 if (pci_dev_run_wake(pdev))
6627 pm_runtime_get_noresume(&pdev->dev);
6628
6629 /* restore original MAC address */
6630 rtl_rar_set(tp, dev->perm_addr);
6631
6632 rtl_disable_msi(pdev, tp);
6633 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6634 pci_set_drvdata(pdev, NULL);
6635}
6636
6637static const struct net_device_ops rtl_netdev_ops = {
6638 .ndo_open = rtl_open,
6639 .ndo_stop = rtl8169_close,
6640 .ndo_get_stats64 = rtl8169_get_stats64,
6641 .ndo_start_xmit = rtl8169_start_xmit,
6642 .ndo_tx_timeout = rtl8169_tx_timeout,
6643 .ndo_validate_addr = eth_validate_addr,
6644 .ndo_change_mtu = rtl8169_change_mtu,
6645 .ndo_fix_features = rtl8169_fix_features,
6646 .ndo_set_features = rtl8169_set_features,
6647 .ndo_set_mac_address = rtl_set_mac_address,
6648 .ndo_do_ioctl = rtl8169_ioctl,
6649 .ndo_set_rx_mode = rtl_set_rx_mode,
6650#ifdef CONFIG_NET_POLL_CONTROLLER
6651 .ndo_poll_controller = rtl8169_netpoll,
6652#endif
6653
6654};
6655
6656static const struct rtl_cfg_info {
6657 void (*hw_start)(struct net_device *);
6658 unsigned int region;
6659 unsigned int align;
6660 u16 event_slow;
6661 unsigned features;
6662 u8 default_ver;
6663} rtl_cfg_infos [] = {
6664 [RTL_CFG_0] = {
6665 .hw_start = rtl_hw_start_8169,
6666 .region = 1,
6667 .align = 0,
6668 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6669 .features = RTL_FEATURE_GMII,
6670 .default_ver = RTL_GIGA_MAC_VER_01,
6671 },
6672 [RTL_CFG_1] = {
6673 .hw_start = rtl_hw_start_8168,
6674 .region = 2,
6675 .align = 8,
6676 .event_slow = SYSErr | LinkChg | RxOverflow,
6677 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6678 .default_ver = RTL_GIGA_MAC_VER_11,
6679 },
6680 [RTL_CFG_2] = {
6681 .hw_start = rtl_hw_start_8101,
6682 .region = 2,
6683 .align = 8,
6684 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6685 PCSTimeout,
6686 .features = RTL_FEATURE_MSI,
6687 .default_ver = RTL_GIGA_MAC_VER_13,
6688 }
6689};
6690
6691/* Cfg9346_Unlock assumed. */
6692static unsigned rtl_try_msi(struct rtl8169_private *tp,
6693 const struct rtl_cfg_info *cfg)
6694{
6695 void __iomem *ioaddr = tp->mmio_addr;
6696 unsigned msi = 0;
6697 u8 cfg2;
6698
6699 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6700 if (cfg->features & RTL_FEATURE_MSI) {
6701 if (pci_enable_msi(tp->pci_dev)) {
6702 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6703 } else {
6704 cfg2 |= MSIEnable;
6705 msi = RTL_FEATURE_MSI;
6706 }
6707 }
6708 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6709 RTL_W8(Config2, cfg2);
6710 return msi;
6711}
6712
6713DECLARE_RTL_COND(rtl_link_list_ready_cond)
6714{
6715 void __iomem *ioaddr = tp->mmio_addr;
6716
6717 return RTL_R8(MCU) & LINK_LIST_RDY;
6718}
6719
6720DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6721{
6722 void __iomem *ioaddr = tp->mmio_addr;
6723
6724 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6725}
6726
6727static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
6728{
6729 void __iomem *ioaddr = tp->mmio_addr;
6730 u32 data;
6731
6732 tp->ocp_base = OCP_STD_PHY_BASE;
6733
6734 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6735
6736 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6737 return;
6738
6739 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6740 return;
6741
6742 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6743 msleep(1);
6744 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6745
6746 data = r8168_mac_ocp_read(tp, 0xe8de);
6747 data &= ~(1 << 14);
6748 r8168_mac_ocp_write(tp, 0xe8de, data);
6749
6750 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6751 return;
6752
6753 data = r8168_mac_ocp_read(tp, 0xe8de);
6754 data |= (1 << 15);
6755 r8168_mac_ocp_write(tp, 0xe8de, data);
6756
6757 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6758 return;
6759}
6760
6761static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
6762{
6763 switch (tp->mac_version) {
6764 case RTL_GIGA_MAC_VER_40:
6765 case RTL_GIGA_MAC_VER_41:
6766 rtl_hw_init_8168g(tp);
6767 break;
6768
6769 default:
6770 break;
6771 }
6772}
6773
6774static int __devinit
6775rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6776{
6777 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6778 const unsigned int region = cfg->region;
6779 struct rtl8169_private *tp;
6780 struct mii_if_info *mii;
6781 struct net_device *dev;
6782 void __iomem *ioaddr;
6783 int chipset, i;
6784 int rc;
6785
6786 if (netif_msg_drv(&debug)) {
6787 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6788 MODULENAME, RTL8169_VERSION);
6789 }
6790
6791 dev = alloc_etherdev(sizeof (*tp));
6792 if (!dev) {
6793 rc = -ENOMEM;
6794 goto out;
6795 }
6796
6797 SET_NETDEV_DEV(dev, &pdev->dev);
6798 dev->netdev_ops = &rtl_netdev_ops;
6799 tp = netdev_priv(dev);
6800 tp->dev = dev;
6801 tp->pci_dev = pdev;
6802 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6803
6804 mii = &tp->mii;
6805 mii->dev = dev;
6806 mii->mdio_read = rtl_mdio_read;
6807 mii->mdio_write = rtl_mdio_write;
6808 mii->phy_id_mask = 0x1f;
6809 mii->reg_num_mask = 0x1f;
6810 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6811
6812 /* disable ASPM completely as that cause random device stop working
6813 * problems as well as full system hangs for some PCIe devices users */
6814 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6815 PCIE_LINK_STATE_CLKPM);
6816
6817 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6818 rc = pci_enable_device(pdev);
6819 if (rc < 0) {
6820 netif_err(tp, probe, dev, "enable failure\n");
6821 goto err_out_free_dev_1;
6822 }
6823
6824 if (pci_set_mwi(pdev) < 0)
6825 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6826
6827 /* make sure PCI base addr 1 is MMIO */
6828 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6829 netif_err(tp, probe, dev,
6830 "region #%d not an MMIO resource, aborting\n",
6831 region);
6832 rc = -ENODEV;
6833 goto err_out_mwi_2;
6834 }
6835
6836 /* check for weird/broken PCI region reporting */
6837 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6838 netif_err(tp, probe, dev,
6839 "Invalid PCI region size(s), aborting\n");
6840 rc = -ENODEV;
6841 goto err_out_mwi_2;
6842 }
6843
6844 rc = pci_request_regions(pdev, MODULENAME);
6845 if (rc < 0) {
6846 netif_err(tp, probe, dev, "could not request regions\n");
6847 goto err_out_mwi_2;
6848 }
6849
6850 tp->cp_cmd = RxChkSum;
6851
6852 if ((sizeof(dma_addr_t) > 4) &&
6853 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6854 tp->cp_cmd |= PCIDAC;
6855 dev->features |= NETIF_F_HIGHDMA;
6856 } else {
6857 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6858 if (rc < 0) {
6859 netif_err(tp, probe, dev, "DMA configuration failed\n");
6860 goto err_out_free_res_3;
6861 }
6862 }
6863
6864 /* ioremap MMIO region */
6865 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6866 if (!ioaddr) {
6867 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6868 rc = -EIO;
6869 goto err_out_free_res_3;
6870 }
6871 tp->mmio_addr = ioaddr;
6872
6873 if (!pci_is_pcie(pdev))
6874 netif_info(tp, probe, dev, "not PCI Express\n");
6875
6876 /* Identify chip attached to board */
6877 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6878
6879 rtl_init_rxcfg(tp);
6880
6881 rtl_irq_disable(tp);
6882
6883 rtl_hw_initialize(tp);
6884
6885 rtl_hw_reset(tp);
6886
6887 rtl_ack_events(tp, 0xffff);
6888
6889 pci_set_master(pdev);
6890
6891 /*
6892 * Pretend we are using VLANs; This bypasses a nasty bug where
6893 * Interrupts stop flowing on high load on 8110SCd controllers.
6894 */
6895 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6896 tp->cp_cmd |= RxVlan;
6897
6898 rtl_init_mdio_ops(tp);
6899 rtl_init_pll_power_ops(tp);
6900 rtl_init_jumbo_ops(tp);
6901 rtl_init_csi_ops(tp);
6902
6903 rtl8169_print_mac_version(tp);
6904
6905 chipset = tp->mac_version;
6906 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6907
6908 RTL_W8(Cfg9346, Cfg9346_Unlock);
6909 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6910 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6911 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6912 tp->features |= RTL_FEATURE_WOL;
6913 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6914 tp->features |= RTL_FEATURE_WOL;
6915 tp->features |= rtl_try_msi(tp, cfg);
6916 RTL_W8(Cfg9346, Cfg9346_Lock);
6917
6918 if (rtl_tbi_enabled(tp)) {
6919 tp->set_speed = rtl8169_set_speed_tbi;
6920 tp->get_settings = rtl8169_gset_tbi;
6921 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6922 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6923 tp->link_ok = rtl8169_tbi_link_ok;
6924 tp->do_ioctl = rtl_tbi_ioctl;
6925 } else {
6926 tp->set_speed = rtl8169_set_speed_xmii;
6927 tp->get_settings = rtl8169_gset_xmii;
6928 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6929 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6930 tp->link_ok = rtl8169_xmii_link_ok;
6931 tp->do_ioctl = rtl_xmii_ioctl;
6932 }
6933
6934 mutex_init(&tp->wk.mutex);
6935
6936 /* Get MAC address */
6937 for (i = 0; i < ETH_ALEN; i++)
6938 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6939 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6940
6941 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6942 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6943
6944 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6945
6946 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6947 * properly for all devices */
6948 dev->features |= NETIF_F_RXCSUM |
6949 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6950
6951 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6952 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6953 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6954 NETIF_F_HIGHDMA;
6955
6956 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6957 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6958 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6959
6960 dev->hw_features |= NETIF_F_RXALL;
6961 dev->hw_features |= NETIF_F_RXFCS;
6962
6963 tp->hw_start = cfg->hw_start;
6964 tp->event_slow = cfg->event_slow;
6965
6966 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6967 ~(RxBOVF | RxFOVF) : ~0;
6968
6969 init_timer(&tp->timer);
6970 tp->timer.data = (unsigned long) dev;
6971 tp->timer.function = rtl8169_phy_timer;
6972
6973 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6974
6975 rc = register_netdev(dev);
6976 if (rc < 0)
6977 goto err_out_msi_4;
6978
6979 pci_set_drvdata(pdev, dev);
6980
6981 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6982 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6983 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6984 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6985 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6986 "tx checksumming: %s]\n",
6987 rtl_chip_infos[chipset].jumbo_max,
6988 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6989 }
6990
6991 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6992 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6993 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6994 rtl8168_driver_start(tp);
6995 }
6996
6997 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6998
6999 if (pci_dev_run_wake(pdev))
7000 pm_runtime_put_noidle(&pdev->dev);
7001
7002 netif_carrier_off(dev);
7003
7004out:
7005 return rc;
7006
7007err_out_msi_4:
7008 netif_napi_del(&tp->napi);
7009 rtl_disable_msi(pdev, tp);
7010 iounmap(ioaddr);
7011err_out_free_res_3:
7012 pci_release_regions(pdev);
7013err_out_mwi_2:
7014 pci_clear_mwi(pdev);
7015 pci_disable_device(pdev);
7016err_out_free_dev_1:
7017 free_netdev(dev);
7018 goto out;
7019}
7020
7021static struct pci_driver rtl8169_pci_driver = {
7022 .name = MODULENAME,
7023 .id_table = rtl8169_pci_tbl,
7024 .probe = rtl_init_one,
7025 .remove = __devexit_p(rtl_remove_one),
7026 .shutdown = rtl_shutdown,
7027 .driver.pm = RTL8169_PM_OPS,
7028};
7029
7030static int __init rtl8169_init_module(void)
7031{
7032 return pci_register_driver(&rtl8169_pci_driver);
7033}
7034
7035static void __exit rtl8169_cleanup_module(void)
7036{
7037 pci_unregister_driver(&rtl8169_pci_driver);
7038}
7039
7040module_init(rtl8169_init_module);
7041module_exit(rtl8169_cleanup_module);
This page took 0.048202 seconds and 5 git commands to generate.