Commit | Line | Data |
---|---|---|
b7370112 | 1 | /* |
2 | * drivers/net/ethernet/nxp/lpc_eth.c | |
3 | * | |
4 | * Author: Kevin Wells <kevin.wells@nxp.com> | |
5 | * | |
6 | * Copyright (C) 2010 NXP Semiconductors | |
7 | * Copyright (C) 2012 Roland Stigge <stigge@antcom.de> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
21 | ||
22 | #include <linux/init.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/kernel.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/errno.h> | |
30 | #include <linux/ioport.h> | |
31 | #include <linux/crc32.h> | |
32 | #include <linux/platform_device.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/ethtool.h> | |
35 | #include <linux/mii.h> | |
36 | #include <linux/clk.h> | |
37 | #include <linux/workqueue.h> | |
38 | #include <linux/netdevice.h> | |
39 | #include <linux/etherdevice.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/phy.h> | |
42 | #include <linux/dma-mapping.h> | |
4de02e4a | 43 | #include <linux/of.h> |
b7370112 | 44 | #include <linux/of_net.h> |
45 | #include <linux/types.h> | |
46 | ||
47 | #include <linux/delay.h> | |
48 | #include <linux/io.h> | |
49 | #include <mach/board.h> | |
50 | #include <mach/platform.h> | |
51 | #include <mach/hardware.h> | |
52 | ||
53 | #define MODNAME "lpc-eth" | |
54 | #define DRV_VERSION "1.00" | |
55 | #define PHYDEF_ADDR 0x00 | |
56 | ||
57 | #define ENET_MAXF_SIZE 1536 | |
58 | #define ENET_RX_DESC 48 | |
59 | #define ENET_TX_DESC 16 | |
60 | ||
61 | #define NAPI_WEIGHT 16 | |
62 | ||
63 | /* | |
64 | * Ethernet MAC controller Register offsets | |
65 | */ | |
66 | #define LPC_ENET_MAC1(x) (x + 0x000) | |
67 | #define LPC_ENET_MAC2(x) (x + 0x004) | |
68 | #define LPC_ENET_IPGT(x) (x + 0x008) | |
69 | #define LPC_ENET_IPGR(x) (x + 0x00C) | |
70 | #define LPC_ENET_CLRT(x) (x + 0x010) | |
71 | #define LPC_ENET_MAXF(x) (x + 0x014) | |
72 | #define LPC_ENET_SUPP(x) (x + 0x018) | |
73 | #define LPC_ENET_TEST(x) (x + 0x01C) | |
74 | #define LPC_ENET_MCFG(x) (x + 0x020) | |
75 | #define LPC_ENET_MCMD(x) (x + 0x024) | |
76 | #define LPC_ENET_MADR(x) (x + 0x028) | |
77 | #define LPC_ENET_MWTD(x) (x + 0x02C) | |
78 | #define LPC_ENET_MRDD(x) (x + 0x030) | |
79 | #define LPC_ENET_MIND(x) (x + 0x034) | |
80 | #define LPC_ENET_SA0(x) (x + 0x040) | |
81 | #define LPC_ENET_SA1(x) (x + 0x044) | |
82 | #define LPC_ENET_SA2(x) (x + 0x048) | |
83 | #define LPC_ENET_COMMAND(x) (x + 0x100) | |
84 | #define LPC_ENET_STATUS(x) (x + 0x104) | |
85 | #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108) | |
86 | #define LPC_ENET_RXSTATUS(x) (x + 0x10C) | |
87 | #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110) | |
88 | #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114) | |
89 | #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118) | |
90 | #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C) | |
91 | #define LPC_ENET_TXSTATUS(x) (x + 0x120) | |
92 | #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124) | |
93 | #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128) | |
94 | #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C) | |
95 | #define LPC_ENET_TSV0(x) (x + 0x158) | |
96 | #define LPC_ENET_TSV1(x) (x + 0x15C) | |
97 | #define LPC_ENET_RSV(x) (x + 0x160) | |
98 | #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170) | |
99 | #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174) | |
100 | #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200) | |
101 | #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204) | |
102 | #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208) | |
103 | #define LPC_ENET_HASHFILTERL(x) (x + 0x210) | |
104 | #define LPC_ENET_HASHFILTERH(x) (x + 0x214) | |
105 | #define LPC_ENET_INTSTATUS(x) (x + 0xFE0) | |
106 | #define LPC_ENET_INTENABLE(x) (x + 0xFE4) | |
107 | #define LPC_ENET_INTCLEAR(x) (x + 0xFE8) | |
108 | #define LPC_ENET_INTSET(x) (x + 0xFEC) | |
109 | #define LPC_ENET_POWERDOWN(x) (x + 0xFF4) | |
110 | ||
111 | /* | |
112 | * mac1 register definitions | |
113 | */ | |
114 | #define LPC_MAC1_RECV_ENABLE (1 << 0) | |
115 | #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1) | |
116 | #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2) | |
117 | #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3) | |
118 | #define LPC_MAC1_LOOPBACK (1 << 4) | |
119 | #define LPC_MAC1_RESET_TX (1 << 8) | |
120 | #define LPC_MAC1_RESET_MCS_TX (1 << 9) | |
121 | #define LPC_MAC1_RESET_RX (1 << 10) | |
122 | #define LPC_MAC1_RESET_MCS_RX (1 << 11) | |
123 | #define LPC_MAC1_SIMULATION_RESET (1 << 14) | |
124 | #define LPC_MAC1_SOFT_RESET (1 << 15) | |
125 | ||
126 | /* | |
127 | * mac2 register definitions | |
128 | */ | |
129 | #define LPC_MAC2_FULL_DUPLEX (1 << 0) | |
130 | #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1) | |
131 | #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2) | |
132 | #define LPC_MAC2_DELAYED_CRC (1 << 3) | |
133 | #define LPC_MAC2_CRC_ENABLE (1 << 4) | |
134 | #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5) | |
135 | #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6) | |
136 | #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7) | |
137 | #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8) | |
138 | #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9) | |
139 | #define LPC_MAC2_NO_BACKOFF (1 << 12) | |
140 | #define LPC_MAC2_BACK_PRESSURE (1 << 13) | |
141 | #define LPC_MAC2_EXCESS_DEFER (1 << 14) | |
142 | ||
143 | /* | |
144 | * ipgt register definitions | |
145 | */ | |
146 | #define LPC_IPGT_LOAD(n) ((n) & 0x7F) | |
147 | ||
148 | /* | |
149 | * ipgr register definitions | |
150 | */ | |
151 | #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F) | |
152 | #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8) | |
153 | ||
154 | /* | |
155 | * clrt register definitions | |
156 | */ | |
157 | #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF) | |
158 | #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8) | |
159 | ||
160 | /* | |
161 | * maxf register definitions | |
162 | */ | |
163 | #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF) | |
164 | ||
165 | /* | |
166 | * supp register definitions | |
167 | */ | |
168 | #define LPC_SUPP_SPEED (1 << 8) | |
169 | #define LPC_SUPP_RESET_RMII (1 << 11) | |
170 | ||
171 | /* | |
172 | * test register definitions | |
173 | */ | |
174 | #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0) | |
175 | #define LPC_TEST_PAUSE (1 << 1) | |
176 | #define LPC_TEST_BACKPRESSURE (1 << 2) | |
177 | ||
178 | /* | |
179 | * mcfg register definitions | |
180 | */ | |
181 | #define LPC_MCFG_SCAN_INCREMENT (1 << 0) | |
182 | #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1) | |
183 | #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2) | |
184 | #define LPC_MCFG_CLOCK_HOST_DIV_4 0 | |
185 | #define LPC_MCFG_CLOCK_HOST_DIV_6 2 | |
186 | #define LPC_MCFG_CLOCK_HOST_DIV_8 3 | |
187 | #define LPC_MCFG_CLOCK_HOST_DIV_10 4 | |
188 | #define LPC_MCFG_CLOCK_HOST_DIV_14 5 | |
189 | #define LPC_MCFG_CLOCK_HOST_DIV_20 6 | |
190 | #define LPC_MCFG_CLOCK_HOST_DIV_28 7 | |
191 | #define LPC_MCFG_RESET_MII_MGMT (1 << 15) | |
192 | ||
193 | /* | |
194 | * mcmd register definitions | |
195 | */ | |
196 | #define LPC_MCMD_READ (1 << 0) | |
197 | #define LPC_MCMD_SCAN (1 << 1) | |
198 | ||
199 | /* | |
200 | * madr register definitions | |
201 | */ | |
202 | #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F) | |
203 | #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8) | |
204 | ||
205 | /* | |
206 | * mwtd register definitions | |
207 | */ | |
208 | #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF) | |
209 | ||
210 | /* | |
211 | * mrdd register definitions | |
212 | */ | |
213 | #define LPC_MRDD_READ_MASK 0xFFFF | |
214 | ||
215 | /* | |
216 | * mind register definitions | |
217 | */ | |
218 | #define LPC_MIND_BUSY (1 << 0) | |
219 | #define LPC_MIND_SCANNING (1 << 1) | |
220 | #define LPC_MIND_NOT_VALID (1 << 2) | |
221 | #define LPC_MIND_MII_LINK_FAIL (1 << 3) | |
222 | ||
223 | /* | |
224 | * command register definitions | |
225 | */ | |
226 | #define LPC_COMMAND_RXENABLE (1 << 0) | |
227 | #define LPC_COMMAND_TXENABLE (1 << 1) | |
228 | #define LPC_COMMAND_REG_RESET (1 << 3) | |
229 | #define LPC_COMMAND_TXRESET (1 << 4) | |
230 | #define LPC_COMMAND_RXRESET (1 << 5) | |
231 | #define LPC_COMMAND_PASSRUNTFRAME (1 << 6) | |
232 | #define LPC_COMMAND_PASSRXFILTER (1 << 7) | |
233 | #define LPC_COMMAND_TXFLOWCONTROL (1 << 8) | |
234 | #define LPC_COMMAND_RMII (1 << 9) | |
235 | #define LPC_COMMAND_FULLDUPLEX (1 << 10) | |
236 | ||
237 | /* | |
238 | * status register definitions | |
239 | */ | |
240 | #define LPC_STATUS_RXACTIVE (1 << 0) | |
241 | #define LPC_STATUS_TXACTIVE (1 << 1) | |
242 | ||
243 | /* | |
244 | * tsv0 register definitions | |
245 | */ | |
246 | #define LPC_TSV0_CRC_ERROR (1 << 0) | |
247 | #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1) | |
248 | #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2) | |
249 | #define LPC_TSV0_DONE (1 << 3) | |
250 | #define LPC_TSV0_MULTICAST (1 << 4) | |
251 | #define LPC_TSV0_BROADCAST (1 << 5) | |
252 | #define LPC_TSV0_PACKET_DEFER (1 << 6) | |
253 | #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7) | |
254 | #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8) | |
255 | #define LPC_TSV0_LATE_COLLISION (1 << 9) | |
256 | #define LPC_TSV0_GIANT (1 << 10) | |
257 | #define LPC_TSV0_UNDERRUN (1 << 11) | |
258 | #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF) | |
259 | #define LPC_TSV0_CONTROL_FRAME (1 << 28) | |
260 | #define LPC_TSV0_PAUSE (1 << 29) | |
261 | #define LPC_TSV0_BACKPRESSURE (1 << 30) | |
262 | #define LPC_TSV0_VLAN (1 << 31) | |
263 | ||
264 | /* | |
265 | * tsv1 register definitions | |
266 | */ | |
267 | #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF) | |
268 | #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF) | |
269 | ||
270 | /* | |
271 | * rsv register definitions | |
272 | */ | |
273 | #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF) | |
274 | #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16) | |
275 | #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17) | |
276 | #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18) | |
277 | #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19) | |
278 | #define LPC_RSV_CRC_ERROR (1 << 20) | |
279 | #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21) | |
280 | #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22) | |
281 | #define LPC_RSV_RECEIVE_OK (1 << 23) | |
282 | #define LPC_RSV_MULTICAST (1 << 24) | |
283 | #define LPC_RSV_BROADCAST (1 << 25) | |
284 | #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26) | |
285 | #define LPC_RSV_CONTROL_FRAME (1 << 27) | |
286 | #define LPC_RSV_PAUSE (1 << 28) | |
287 | #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29) | |
288 | #define LPC_RSV_VLAN (1 << 30) | |
289 | ||
290 | /* | |
291 | * flowcontrolcounter register definitions | |
292 | */ | |
293 | #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF) | |
294 | #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF) | |
295 | ||
296 | /* | |
297 | * flowcontrolstatus register definitions | |
298 | */ | |
299 | #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF) | |
300 | ||
301 | /* | |
302 | * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared | |
303 | * register definitions | |
304 | */ | |
305 | #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0) | |
306 | #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1) | |
307 | #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2) | |
308 | #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3) | |
309 | #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4) | |
310 | #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5) | |
311 | ||
312 | /* | |
313 | * rxfliterctrl register definitions | |
314 | */ | |
315 | #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12) | |
316 | #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13) | |
317 | ||
318 | /* | |
319 | * rxfilterwolstatus/rxfilterwolclear register definitions | |
320 | */ | |
321 | #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7) | |
322 | #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8) | |
323 | ||
324 | /* | |
325 | * intstatus, intenable, intclear, and Intset shared register | |
326 | * definitions | |
327 | */ | |
328 | #define LPC_MACINT_RXOVERRUNINTEN (1 << 0) | |
329 | #define LPC_MACINT_RXERRORONINT (1 << 1) | |
330 | #define LPC_MACINT_RXFINISHEDINTEN (1 << 2) | |
331 | #define LPC_MACINT_RXDONEINTEN (1 << 3) | |
332 | #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4) | |
333 | #define LPC_MACINT_TXERRORINTEN (1 << 5) | |
334 | #define LPC_MACINT_TXFINISHEDINTEN (1 << 6) | |
335 | #define LPC_MACINT_TXDONEINTEN (1 << 7) | |
336 | #define LPC_MACINT_SOFTINTEN (1 << 12) | |
337 | #define LPC_MACINT_WAKEUPINTEN (1 << 13) | |
338 | ||
339 | /* | |
340 | * powerdown register definitions | |
341 | */ | |
342 | #define LPC_POWERDOWN_MACAHB (1 << 31) | |
343 | ||
4de02e4a | 344 | static phy_interface_t lpc_phy_interface_mode(struct device *dev) |
b7370112 | 345 | { |
4de02e4a RS |
346 | if (dev && dev->of_node) { |
347 | const char *mode = of_get_property(dev->of_node, | |
348 | "phy-mode", NULL); | |
349 | if (mode && !strcmp(mode, "mii")) | |
350 | return PHY_INTERFACE_MODE_MII; | |
351 | return PHY_INTERFACE_MODE_RMII; | |
352 | } | |
353 | ||
354 | /* non-DT */ | |
b7370112 | 355 | #ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT |
356 | return PHY_INTERFACE_MODE_MII; | |
357 | #else | |
358 | return PHY_INTERFACE_MODE_RMII; | |
359 | #endif | |
360 | } | |
361 | ||
4de02e4a | 362 | static bool use_iram_for_net(struct device *dev) |
b7370112 | 363 | { |
4de02e4a RS |
364 | if (dev && dev->of_node) |
365 | return of_property_read_bool(dev->of_node, "use-iram"); | |
366 | ||
367 | /* non-DT */ | |
b7370112 | 368 | #ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET |
4de02e4a | 369 | return true; |
b7370112 | 370 | #else |
4de02e4a | 371 | return false; |
b7370112 | 372 | #endif |
373 | } | |
374 | ||
375 | /* Receive Status information word */ | |
376 | #define RXSTATUS_SIZE 0x000007FF | |
377 | #define RXSTATUS_CONTROL (1 << 18) | |
378 | #define RXSTATUS_VLAN (1 << 19) | |
379 | #define RXSTATUS_FILTER (1 << 20) | |
380 | #define RXSTATUS_MULTICAST (1 << 21) | |
381 | #define RXSTATUS_BROADCAST (1 << 22) | |
382 | #define RXSTATUS_CRC (1 << 23) | |
383 | #define RXSTATUS_SYMBOL (1 << 24) | |
384 | #define RXSTATUS_LENGTH (1 << 25) | |
385 | #define RXSTATUS_RANGE (1 << 26) | |
386 | #define RXSTATUS_ALIGN (1 << 27) | |
387 | #define RXSTATUS_OVERRUN (1 << 28) | |
388 | #define RXSTATUS_NODESC (1 << 29) | |
389 | #define RXSTATUS_LAST (1 << 30) | |
390 | #define RXSTATUS_ERROR (1 << 31) | |
391 | ||
392 | #define RXSTATUS_STATUS_ERROR \ | |
393 | (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \ | |
394 | RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC) | |
395 | ||
396 | /* Receive Descriptor control word */ | |
397 | #define RXDESC_CONTROL_SIZE 0x000007FF | |
398 | #define RXDESC_CONTROL_INT (1 << 31) | |
399 | ||
400 | /* Transmit Status information word */ | |
401 | #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF) | |
402 | #define TXSTATUS_DEFER (1 << 25) | |
403 | #define TXSTATUS_EXCESSDEFER (1 << 26) | |
404 | #define TXSTATUS_EXCESSCOLL (1 << 27) | |
405 | #define TXSTATUS_LATECOLL (1 << 28) | |
406 | #define TXSTATUS_UNDERRUN (1 << 29) | |
407 | #define TXSTATUS_NODESC (1 << 30) | |
408 | #define TXSTATUS_ERROR (1 << 31) | |
409 | ||
410 | /* Transmit Descriptor control word */ | |
411 | #define TXDESC_CONTROL_SIZE 0x000007FF | |
412 | #define TXDESC_CONTROL_OVERRIDE (1 << 26) | |
413 | #define TXDESC_CONTROL_HUGE (1 << 27) | |
414 | #define TXDESC_CONTROL_PAD (1 << 28) | |
415 | #define TXDESC_CONTROL_CRC (1 << 29) | |
416 | #define TXDESC_CONTROL_LAST (1 << 30) | |
417 | #define TXDESC_CONTROL_INT (1 << 31) | |
418 | ||
419 | static int lpc_eth_hard_start_xmit(struct sk_buff *skb, | |
420 | struct net_device *ndev); | |
421 | ||
422 | /* | |
423 | * Structure of a TX/RX descriptors and RX status | |
424 | */ | |
425 | struct txrx_desc_t { | |
426 | __le32 packet; | |
427 | __le32 control; | |
428 | }; | |
429 | struct rx_status_t { | |
430 | __le32 statusinfo; | |
431 | __le32 statushashcrc; | |
432 | }; | |
433 | ||
434 | /* | |
435 | * Device driver data structure | |
436 | */ | |
437 | struct netdata_local { | |
438 | struct platform_device *pdev; | |
439 | struct net_device *ndev; | |
440 | spinlock_t lock; | |
441 | void __iomem *net_base; | |
442 | u32 msg_enable; | |
443 | struct sk_buff *skb[ENET_TX_DESC]; | |
444 | unsigned int last_tx_idx; | |
445 | unsigned int num_used_tx_buffs; | |
446 | struct mii_bus *mii_bus; | |
447 | struct phy_device *phy_dev; | |
448 | struct clk *clk; | |
449 | dma_addr_t dma_buff_base_p; | |
450 | void *dma_buff_base_v; | |
451 | size_t dma_buff_size; | |
452 | struct txrx_desc_t *tx_desc_v; | |
453 | u32 *tx_stat_v; | |
454 | void *tx_buff_v; | |
455 | struct txrx_desc_t *rx_desc_v; | |
456 | struct rx_status_t *rx_stat_v; | |
457 | void *rx_buff_v; | |
458 | int link; | |
459 | int speed; | |
460 | int duplex; | |
461 | struct napi_struct napi; | |
462 | }; | |
463 | ||
464 | /* | |
465 | * MAC support functions | |
466 | */ | |
467 | static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) | |
468 | { | |
469 | u32 tmp; | |
470 | ||
471 | /* Set station address */ | |
472 | tmp = mac[0] | ((u32)mac[1] << 8); | |
473 | writel(tmp, LPC_ENET_SA2(pldat->net_base)); | |
474 | tmp = mac[2] | ((u32)mac[3] << 8); | |
475 | writel(tmp, LPC_ENET_SA1(pldat->net_base)); | |
476 | tmp = mac[4] | ((u32)mac[5] << 8); | |
477 | writel(tmp, LPC_ENET_SA0(pldat->net_base)); | |
478 | ||
479 | netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac); | |
480 | } | |
481 | ||
482 | static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) | |
483 | { | |
484 | u32 tmp; | |
485 | ||
486 | /* Get station address */ | |
487 | tmp = readl(LPC_ENET_SA2(pldat->net_base)); | |
488 | mac[0] = tmp & 0xFF; | |
489 | mac[1] = tmp >> 8; | |
490 | tmp = readl(LPC_ENET_SA1(pldat->net_base)); | |
491 | mac[2] = tmp & 0xFF; | |
492 | mac[3] = tmp >> 8; | |
493 | tmp = readl(LPC_ENET_SA0(pldat->net_base)); | |
494 | mac[4] = tmp & 0xFF; | |
495 | mac[5] = tmp >> 8; | |
496 | } | |
497 | ||
498 | static void __lpc_eth_clock_enable(struct netdata_local *pldat, | |
499 | bool enable) | |
500 | { | |
501 | if (enable) | |
502 | clk_enable(pldat->clk); | |
503 | else | |
504 | clk_disable(pldat->clk); | |
505 | } | |
506 | ||
507 | static void __lpc_params_setup(struct netdata_local *pldat) | |
508 | { | |
509 | u32 tmp; | |
510 | ||
511 | if (pldat->duplex == DUPLEX_FULL) { | |
512 | tmp = readl(LPC_ENET_MAC2(pldat->net_base)); | |
513 | tmp |= LPC_MAC2_FULL_DUPLEX; | |
514 | writel(tmp, LPC_ENET_MAC2(pldat->net_base)); | |
515 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
516 | tmp |= LPC_COMMAND_FULLDUPLEX; | |
517 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
518 | writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base)); | |
519 | } else { | |
520 | tmp = readl(LPC_ENET_MAC2(pldat->net_base)); | |
521 | tmp &= ~LPC_MAC2_FULL_DUPLEX; | |
522 | writel(tmp, LPC_ENET_MAC2(pldat->net_base)); | |
523 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
524 | tmp &= ~LPC_COMMAND_FULLDUPLEX; | |
525 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
526 | writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base)); | |
527 | } | |
528 | ||
529 | if (pldat->speed == SPEED_100) | |
530 | writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base)); | |
531 | else | |
532 | writel(0, LPC_ENET_SUPP(pldat->net_base)); | |
533 | } | |
534 | ||
535 | static void __lpc_eth_reset(struct netdata_local *pldat) | |
536 | { | |
537 | /* Reset all MAC logic */ | |
538 | writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX | | |
539 | LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET | | |
540 | LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base)); | |
541 | writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET | | |
542 | LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base)); | |
543 | } | |
544 | ||
545 | static int __lpc_mii_mngt_reset(struct netdata_local *pldat) | |
546 | { | |
547 | /* Reset MII management hardware */ | |
548 | writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base)); | |
549 | ||
550 | /* Setup MII clock to slowest rate with a /28 divider */ | |
551 | writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28), | |
552 | LPC_ENET_MCFG(pldat->net_base)); | |
553 | ||
554 | return 0; | |
555 | } | |
556 | ||
557 | static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat) | |
558 | { | |
559 | phys_addr_t phaddr; | |
560 | ||
561 | phaddr = addr - pldat->dma_buff_base_v; | |
562 | phaddr += pldat->dma_buff_base_p; | |
563 | ||
564 | return phaddr; | |
565 | } | |
566 | ||
567 | static void lpc_eth_enable_int(void __iomem *regbase) | |
568 | { | |
569 | writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN), | |
570 | LPC_ENET_INTENABLE(regbase)); | |
571 | } | |
572 | ||
573 | static void lpc_eth_disable_int(void __iomem *regbase) | |
574 | { | |
575 | writel(0, LPC_ENET_INTENABLE(regbase)); | |
576 | } | |
577 | ||
578 | /* Setup TX/RX descriptors */ | |
579 | static void __lpc_txrx_desc_setup(struct netdata_local *pldat) | |
580 | { | |
581 | u32 *ptxstat; | |
582 | void *tbuff; | |
583 | int i; | |
584 | struct txrx_desc_t *ptxrxdesc; | |
585 | struct rx_status_t *prxstat; | |
586 | ||
587 | tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16); | |
588 | ||
589 | /* Setup TX descriptors, status, and buffers */ | |
590 | pldat->tx_desc_v = tbuff; | |
591 | tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC; | |
592 | ||
593 | pldat->tx_stat_v = tbuff; | |
594 | tbuff += sizeof(u32) * ENET_TX_DESC; | |
595 | ||
596 | tbuff = PTR_ALIGN(tbuff, 16); | |
597 | pldat->tx_buff_v = tbuff; | |
598 | tbuff += ENET_MAXF_SIZE * ENET_TX_DESC; | |
599 | ||
600 | /* Setup RX descriptors, status, and buffers */ | |
601 | pldat->rx_desc_v = tbuff; | |
602 | tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC; | |
603 | ||
604 | tbuff = PTR_ALIGN(tbuff, 16); | |
605 | pldat->rx_stat_v = tbuff; | |
606 | tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC; | |
607 | ||
608 | tbuff = PTR_ALIGN(tbuff, 16); | |
609 | pldat->rx_buff_v = tbuff; | |
610 | tbuff += ENET_MAXF_SIZE * ENET_RX_DESC; | |
611 | ||
612 | /* Map the TX descriptors to the TX buffers in hardware */ | |
613 | for (i = 0; i < ENET_TX_DESC; i++) { | |
614 | ptxstat = &pldat->tx_stat_v[i]; | |
615 | ptxrxdesc = &pldat->tx_desc_v[i]; | |
616 | ||
617 | ptxrxdesc->packet = __va_to_pa( | |
618 | pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat); | |
619 | ptxrxdesc->control = 0; | |
620 | *ptxstat = 0; | |
621 | } | |
622 | ||
623 | /* Map the RX descriptors to the RX buffers in hardware */ | |
624 | for (i = 0; i < ENET_RX_DESC; i++) { | |
625 | prxstat = &pldat->rx_stat_v[i]; | |
626 | ptxrxdesc = &pldat->rx_desc_v[i]; | |
627 | ||
628 | ptxrxdesc->packet = __va_to_pa( | |
629 | pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat); | |
630 | ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1); | |
631 | prxstat->statusinfo = 0; | |
632 | prxstat->statushashcrc = 0; | |
633 | } | |
634 | ||
635 | /* Setup base addresses in hardware to point to buffers and | |
636 | * descriptors | |
637 | */ | |
638 | writel((ENET_TX_DESC - 1), | |
639 | LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base)); | |
640 | writel(__va_to_pa(pldat->tx_desc_v, pldat), | |
641 | LPC_ENET_TXDESCRIPTOR(pldat->net_base)); | |
642 | writel(__va_to_pa(pldat->tx_stat_v, pldat), | |
643 | LPC_ENET_TXSTATUS(pldat->net_base)); | |
644 | writel((ENET_RX_DESC - 1), | |
645 | LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base)); | |
646 | writel(__va_to_pa(pldat->rx_desc_v, pldat), | |
647 | LPC_ENET_RXDESCRIPTOR(pldat->net_base)); | |
648 | writel(__va_to_pa(pldat->rx_stat_v, pldat), | |
649 | LPC_ENET_RXSTATUS(pldat->net_base)); | |
650 | } | |
651 | ||
652 | static void __lpc_eth_init(struct netdata_local *pldat) | |
653 | { | |
654 | u32 tmp; | |
655 | ||
656 | /* Disable controller and reset */ | |
657 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
658 | tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; | |
659 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
660 | tmp = readl(LPC_ENET_MAC1(pldat->net_base)); | |
661 | tmp &= ~LPC_MAC1_RECV_ENABLE; | |
662 | writel(tmp, LPC_ENET_MAC1(pldat->net_base)); | |
663 | ||
664 | /* Initial MAC setup */ | |
665 | writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base)); | |
666 | writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE), | |
667 | LPC_ENET_MAC2(pldat->net_base)); | |
668 | writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base)); | |
669 | ||
670 | /* Collision window, gap */ | |
671 | writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) | | |
672 | LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)), | |
673 | LPC_ENET_CLRT(pldat->net_base)); | |
674 | writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base)); | |
675 | ||
4de02e4a | 676 | if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) |
b7370112 | 677 | writel(LPC_COMMAND_PASSRUNTFRAME, |
678 | LPC_ENET_COMMAND(pldat->net_base)); | |
679 | else { | |
680 | writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), | |
681 | LPC_ENET_COMMAND(pldat->net_base)); | |
682 | writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); | |
683 | } | |
684 | ||
685 | __lpc_params_setup(pldat); | |
686 | ||
687 | /* Setup TX and RX descriptors */ | |
688 | __lpc_txrx_desc_setup(pldat); | |
689 | ||
690 | /* Setup packet filtering */ | |
691 | writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT), | |
692 | LPC_ENET_RXFILTER_CTRL(pldat->net_base)); | |
693 | ||
694 | /* Get the next TX buffer output index */ | |
695 | pldat->num_used_tx_buffs = 0; | |
696 | pldat->last_tx_idx = | |
697 | readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | |
698 | ||
699 | /* Clear and enable interrupts */ | |
700 | writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base)); | |
701 | smp_wmb(); | |
702 | lpc_eth_enable_int(pldat->net_base); | |
703 | ||
704 | /* Enable controller */ | |
705 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
706 | tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; | |
707 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
708 | tmp = readl(LPC_ENET_MAC1(pldat->net_base)); | |
709 | tmp |= LPC_MAC1_RECV_ENABLE; | |
710 | writel(tmp, LPC_ENET_MAC1(pldat->net_base)); | |
711 | } | |
712 | ||
713 | static void __lpc_eth_shutdown(struct netdata_local *pldat) | |
714 | { | |
715 | /* Reset ethernet and power down PHY */ | |
716 | __lpc_eth_reset(pldat); | |
717 | writel(0, LPC_ENET_MAC1(pldat->net_base)); | |
718 | writel(0, LPC_ENET_MAC2(pldat->net_base)); | |
719 | } | |
720 | ||
721 | /* | |
722 | * MAC<--->PHY support functions | |
723 | */ | |
724 | static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg) | |
725 | { | |
726 | struct netdata_local *pldat = bus->priv; | |
727 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | |
728 | int lps; | |
729 | ||
730 | writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); | |
731 | writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base)); | |
732 | ||
733 | /* Wait for unbusy status */ | |
734 | while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { | |
735 | if (time_after(jiffies, timeout)) | |
736 | return -EIO; | |
737 | cpu_relax(); | |
738 | } | |
739 | ||
740 | lps = readl(LPC_ENET_MRDD(pldat->net_base)); | |
741 | writel(0, LPC_ENET_MCMD(pldat->net_base)); | |
742 | ||
743 | return lps; | |
744 | } | |
745 | ||
746 | static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg, | |
747 | u16 phydata) | |
748 | { | |
749 | struct netdata_local *pldat = bus->priv; | |
750 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | |
751 | ||
752 | writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); | |
753 | writel(phydata, LPC_ENET_MWTD(pldat->net_base)); | |
754 | ||
755 | /* Wait for completion */ | |
756 | while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { | |
757 | if (time_after(jiffies, timeout)) | |
758 | return -EIO; | |
759 | cpu_relax(); | |
760 | } | |
761 | ||
762 | return 0; | |
763 | } | |
764 | ||
765 | static int lpc_mdio_reset(struct mii_bus *bus) | |
766 | { | |
767 | return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv); | |
768 | } | |
769 | ||
770 | static void lpc_handle_link_change(struct net_device *ndev) | |
771 | { | |
772 | struct netdata_local *pldat = netdev_priv(ndev); | |
773 | struct phy_device *phydev = pldat->phy_dev; | |
774 | unsigned long flags; | |
775 | ||
776 | bool status_change = false; | |
777 | ||
778 | spin_lock_irqsave(&pldat->lock, flags); | |
779 | ||
780 | if (phydev->link) { | |
781 | if ((pldat->speed != phydev->speed) || | |
782 | (pldat->duplex != phydev->duplex)) { | |
783 | pldat->speed = phydev->speed; | |
784 | pldat->duplex = phydev->duplex; | |
785 | status_change = true; | |
786 | } | |
787 | } | |
788 | ||
789 | if (phydev->link != pldat->link) { | |
790 | if (!phydev->link) { | |
791 | pldat->speed = 0; | |
792 | pldat->duplex = -1; | |
793 | } | |
794 | pldat->link = phydev->link; | |
795 | ||
796 | status_change = true; | |
797 | } | |
798 | ||
799 | spin_unlock_irqrestore(&pldat->lock, flags); | |
800 | ||
801 | if (status_change) | |
802 | __lpc_params_setup(pldat); | |
803 | } | |
804 | ||
805 | static int lpc_mii_probe(struct net_device *ndev) | |
806 | { | |
807 | struct netdata_local *pldat = netdev_priv(ndev); | |
808 | struct phy_device *phydev = phy_find_first(pldat->mii_bus); | |
809 | ||
810 | if (!phydev) { | |
811 | netdev_err(ndev, "no PHY found\n"); | |
812 | return -ENODEV; | |
813 | } | |
814 | ||
815 | /* Attach to the PHY */ | |
4de02e4a | 816 | if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) |
b7370112 | 817 | netdev_info(ndev, "using MII interface\n"); |
818 | else | |
819 | netdev_info(ndev, "using RMII interface\n"); | |
820 | phydev = phy_connect(ndev, dev_name(&phydev->dev), | |
4de02e4a RS |
821 | &lpc_handle_link_change, 0, |
822 | lpc_phy_interface_mode(&pldat->pdev->dev)); | |
b7370112 | 823 | |
824 | if (IS_ERR(phydev)) { | |
825 | netdev_err(ndev, "Could not attach to PHY\n"); | |
826 | return PTR_ERR(phydev); | |
827 | } | |
828 | ||
829 | /* mask with MAC supported features */ | |
830 | phydev->supported &= PHY_BASIC_FEATURES; | |
831 | ||
832 | phydev->advertising = phydev->supported; | |
833 | ||
834 | pldat->link = 0; | |
835 | pldat->speed = 0; | |
836 | pldat->duplex = -1; | |
837 | pldat->phy_dev = phydev; | |
838 | ||
839 | netdev_info(ndev, | |
840 | "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", | |
841 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); | |
842 | return 0; | |
843 | } | |
844 | ||
845 | static int lpc_mii_init(struct netdata_local *pldat) | |
846 | { | |
847 | int err = -ENXIO, i; | |
848 | ||
849 | pldat->mii_bus = mdiobus_alloc(); | |
850 | if (!pldat->mii_bus) { | |
851 | err = -ENOMEM; | |
852 | goto err_out; | |
853 | } | |
854 | ||
855 | /* Setup MII mode */ | |
4de02e4a | 856 | if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) |
b7370112 | 857 | writel(LPC_COMMAND_PASSRUNTFRAME, |
858 | LPC_ENET_COMMAND(pldat->net_base)); | |
859 | else { | |
860 | writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), | |
861 | LPC_ENET_COMMAND(pldat->net_base)); | |
862 | writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); | |
863 | } | |
864 | ||
865 | pldat->mii_bus->name = "lpc_mii_bus"; | |
866 | pldat->mii_bus->read = &lpc_mdio_read; | |
867 | pldat->mii_bus->write = &lpc_mdio_write; | |
868 | pldat->mii_bus->reset = &lpc_mdio_reset; | |
869 | snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", | |
870 | pldat->pdev->name, pldat->pdev->id); | |
871 | pldat->mii_bus->priv = pldat; | |
872 | pldat->mii_bus->parent = &pldat->pdev->dev; | |
873 | ||
874 | pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | |
875 | if (!pldat->mii_bus->irq) { | |
876 | err = -ENOMEM; | |
877 | goto err_out_1; | |
878 | } | |
879 | ||
880 | for (i = 0; i < PHY_MAX_ADDR; i++) | |
881 | pldat->mii_bus->irq[i] = PHY_POLL; | |
882 | ||
883 | platform_set_drvdata(pldat->pdev, pldat->mii_bus); | |
884 | ||
885 | if (mdiobus_register(pldat->mii_bus)) | |
886 | goto err_out_free_mdio_irq; | |
887 | ||
888 | if (lpc_mii_probe(pldat->ndev) != 0) | |
889 | goto err_out_unregister_bus; | |
890 | ||
891 | return 0; | |
892 | ||
893 | err_out_unregister_bus: | |
894 | mdiobus_unregister(pldat->mii_bus); | |
895 | err_out_free_mdio_irq: | |
896 | kfree(pldat->mii_bus->irq); | |
897 | err_out_1: | |
898 | mdiobus_free(pldat->mii_bus); | |
899 | err_out: | |
900 | return err; | |
901 | } | |
902 | ||
903 | static void __lpc_handle_xmit(struct net_device *ndev) | |
904 | { | |
905 | struct netdata_local *pldat = netdev_priv(ndev); | |
906 | struct sk_buff *skb; | |
907 | u32 txcidx, *ptxstat, txstat; | |
908 | ||
909 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | |
910 | while (pldat->last_tx_idx != txcidx) { | |
911 | skb = pldat->skb[pldat->last_tx_idx]; | |
912 | ||
913 | /* A buffer is available, get buffer status */ | |
914 | ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx]; | |
915 | txstat = *ptxstat; | |
916 | ||
917 | /* Next buffer and decrement used buffer counter */ | |
918 | pldat->num_used_tx_buffs--; | |
919 | pldat->last_tx_idx++; | |
920 | if (pldat->last_tx_idx >= ENET_TX_DESC) | |
921 | pldat->last_tx_idx = 0; | |
922 | ||
923 | /* Update collision counter */ | |
924 | ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat); | |
925 | ||
926 | /* Any errors occurred? */ | |
927 | if (txstat & TXSTATUS_ERROR) { | |
928 | if (txstat & TXSTATUS_UNDERRUN) { | |
929 | /* FIFO underrun */ | |
930 | ndev->stats.tx_fifo_errors++; | |
931 | } | |
932 | if (txstat & TXSTATUS_LATECOLL) { | |
933 | /* Late collision */ | |
934 | ndev->stats.tx_aborted_errors++; | |
935 | } | |
936 | if (txstat & TXSTATUS_EXCESSCOLL) { | |
937 | /* Excessive collision */ | |
938 | ndev->stats.tx_aborted_errors++; | |
939 | } | |
940 | if (txstat & TXSTATUS_EXCESSDEFER) { | |
941 | /* Defer limit */ | |
942 | ndev->stats.tx_aborted_errors++; | |
943 | } | |
944 | ndev->stats.tx_errors++; | |
945 | } else { | |
946 | /* Update stats */ | |
947 | ndev->stats.tx_packets++; | |
948 | ndev->stats.tx_bytes += skb->len; | |
b7370112 | 949 | } |
3f16da51 | 950 | dev_kfree_skb_irq(skb); |
b7370112 | 951 | |
952 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | |
953 | } | |
954 | ||
3f16da51 ED |
955 | if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { |
956 | if (netif_queue_stopped(ndev)) | |
957 | netif_wake_queue(ndev); | |
958 | } | |
b7370112 | 959 | } |
960 | ||
961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) | |
962 | { | |
963 | struct netdata_local *pldat = netdev_priv(ndev); | |
964 | struct sk_buff *skb; | |
965 | u32 rxconsidx, len, ethst; | |
966 | struct rx_status_t *prxstat; | |
967 | u8 *prdbuf; | |
968 | int rx_done = 0; | |
969 | ||
970 | /* Get the current RX buffer indexes */ | |
971 | rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); | |
972 | while (rx_done < budget && rxconsidx != | |
973 | readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) { | |
974 | /* Get pointer to receive status */ | |
975 | prxstat = &pldat->rx_stat_v[rxconsidx]; | |
976 | len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1; | |
977 | ||
978 | /* Status error? */ | |
979 | ethst = prxstat->statusinfo; | |
980 | if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) == | |
981 | (RXSTATUS_ERROR | RXSTATUS_RANGE)) | |
982 | ethst &= ~RXSTATUS_ERROR; | |
983 | ||
984 | if (ethst & RXSTATUS_ERROR) { | |
985 | int si = prxstat->statusinfo; | |
986 | /* Check statuses */ | |
987 | if (si & RXSTATUS_OVERRUN) { | |
988 | /* Overrun error */ | |
989 | ndev->stats.rx_fifo_errors++; | |
990 | } else if (si & RXSTATUS_CRC) { | |
991 | /* CRC error */ | |
992 | ndev->stats.rx_crc_errors++; | |
993 | } else if (si & RXSTATUS_LENGTH) { | |
994 | /* Length error */ | |
995 | ndev->stats.rx_length_errors++; | |
996 | } else if (si & RXSTATUS_ERROR) { | |
997 | /* Other error */ | |
998 | ndev->stats.rx_length_errors++; | |
999 | } | |
1000 | ndev->stats.rx_errors++; | |
1001 | } else { | |
1002 | /* Packet is good */ | |
e7f8c1fe ED |
1003 | skb = dev_alloc_skb(len); |
1004 | if (!skb) { | |
b7370112 | 1005 | ndev->stats.rx_dropped++; |
e7f8c1fe | 1006 | } else { |
b7370112 | 1007 | prdbuf = skb_put(skb, len); |
1008 | ||
1009 | /* Copy packet from buffer */ | |
1010 | memcpy(prdbuf, pldat->rx_buff_v + | |
1011 | rxconsidx * ENET_MAXF_SIZE, len); | |
1012 | ||
1013 | /* Pass to upper layer */ | |
1014 | skb->protocol = eth_type_trans(skb, ndev); | |
1015 | netif_receive_skb(skb); | |
1016 | ndev->stats.rx_packets++; | |
1017 | ndev->stats.rx_bytes += len; | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | /* Increment consume index */ | |
1022 | rxconsidx = rxconsidx + 1; | |
1023 | if (rxconsidx >= ENET_RX_DESC) | |
1024 | rxconsidx = 0; | |
1025 | writel(rxconsidx, | |
1026 | LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); | |
1027 | rx_done++; | |
1028 | } | |
1029 | ||
1030 | return rx_done; | |
1031 | } | |
1032 | ||
1033 | static int lpc_eth_poll(struct napi_struct *napi, int budget) | |
1034 | { | |
1035 | struct netdata_local *pldat = container_of(napi, | |
1036 | struct netdata_local, napi); | |
1037 | struct net_device *ndev = pldat->ndev; | |
1038 | int rx_done = 0; | |
1039 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0); | |
1040 | ||
1041 | __netif_tx_lock(txq, smp_processor_id()); | |
1042 | __lpc_handle_xmit(ndev); | |
1043 | __netif_tx_unlock(txq); | |
1044 | rx_done = __lpc_handle_recv(ndev, budget); | |
1045 | ||
1046 | if (rx_done < budget) { | |
1047 | napi_complete(napi); | |
1048 | lpc_eth_enable_int(pldat->net_base); | |
1049 | } | |
1050 | ||
1051 | return rx_done; | |
1052 | } | |
1053 | ||
1054 | static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id) | |
1055 | { | |
1056 | struct net_device *ndev = dev_id; | |
1057 | struct netdata_local *pldat = netdev_priv(ndev); | |
1058 | u32 tmp; | |
1059 | ||
1060 | spin_lock(&pldat->lock); | |
1061 | ||
1062 | tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base)); | |
1063 | /* Clear interrupts */ | |
1064 | writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base)); | |
1065 | ||
1066 | lpc_eth_disable_int(pldat->net_base); | |
1067 | if (likely(napi_schedule_prep(&pldat->napi))) | |
1068 | __napi_schedule(&pldat->napi); | |
1069 | ||
1070 | spin_unlock(&pldat->lock); | |
1071 | ||
1072 | return IRQ_HANDLED; | |
1073 | } | |
1074 | ||
1075 | static int lpc_eth_close(struct net_device *ndev) | |
1076 | { | |
1077 | unsigned long flags; | |
1078 | struct netdata_local *pldat = netdev_priv(ndev); | |
1079 | ||
1080 | if (netif_msg_ifdown(pldat)) | |
1081 | dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name); | |
1082 | ||
1083 | napi_disable(&pldat->napi); | |
1084 | netif_stop_queue(ndev); | |
1085 | ||
1086 | if (pldat->phy_dev) | |
1087 | phy_stop(pldat->phy_dev); | |
1088 | ||
1089 | spin_lock_irqsave(&pldat->lock, flags); | |
1090 | __lpc_eth_reset(pldat); | |
1091 | netif_carrier_off(ndev); | |
1092 | writel(0, LPC_ENET_MAC1(pldat->net_base)); | |
1093 | writel(0, LPC_ENET_MAC2(pldat->net_base)); | |
1094 | spin_unlock_irqrestore(&pldat->lock, flags); | |
1095 | ||
1096 | __lpc_eth_clock_enable(pldat, false); | |
1097 | ||
1098 | return 0; | |
1099 | } | |
1100 | ||
1101 | static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |
1102 | { | |
1103 | struct netdata_local *pldat = netdev_priv(ndev); | |
1104 | u32 len, txidx; | |
1105 | u32 *ptxstat; | |
1106 | struct txrx_desc_t *ptxrxdesc; | |
1107 | ||
1108 | len = skb->len; | |
1109 | ||
1110 | spin_lock_irq(&pldat->lock); | |
1111 | ||
1112 | if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) { | |
1113 | /* This function should never be called when there are no | |
1114 | buffers */ | |
1115 | netif_stop_queue(ndev); | |
1116 | spin_unlock_irq(&pldat->lock); | |
1117 | WARN(1, "BUG! TX request when no free TX buffers!\n"); | |
1118 | return NETDEV_TX_BUSY; | |
1119 | } | |
1120 | ||
1121 | /* Get the next TX descriptor index */ | |
1122 | txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); | |
1123 | ||
1124 | /* Setup control for the transfer */ | |
1125 | ptxstat = &pldat->tx_stat_v[txidx]; | |
1126 | *ptxstat = 0; | |
1127 | ptxrxdesc = &pldat->tx_desc_v[txidx]; | |
1128 | ptxrxdesc->control = | |
1129 | (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT; | |
1130 | ||
1131 | /* Copy data to the DMA buffer */ | |
1132 | memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len); | |
1133 | ||
1134 | /* Save the buffer and increment the buffer counter */ | |
1135 | pldat->skb[txidx] = skb; | |
1136 | pldat->num_used_tx_buffs++; | |
1137 | ||
1138 | /* Start transmit */ | |
1139 | txidx++; | |
1140 | if (txidx >= ENET_TX_DESC) | |
1141 | txidx = 0; | |
1142 | writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); | |
1143 | ||
1144 | /* Stop queue if no more TX buffers */ | |
1145 | if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) | |
1146 | netif_stop_queue(ndev); | |
1147 | ||
1148 | spin_unlock_irq(&pldat->lock); | |
1149 | ||
1150 | return NETDEV_TX_OK; | |
1151 | } | |
1152 | ||
1153 | static int lpc_set_mac_address(struct net_device *ndev, void *p) | |
1154 | { | |
1155 | struct sockaddr *addr = p; | |
1156 | struct netdata_local *pldat = netdev_priv(ndev); | |
1157 | unsigned long flags; | |
1158 | ||
1159 | if (!is_valid_ether_addr(addr->sa_data)) | |
1160 | return -EADDRNOTAVAIL; | |
1161 | memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); | |
1162 | ||
1163 | spin_lock_irqsave(&pldat->lock, flags); | |
1164 | ||
1165 | /* Set station address */ | |
1166 | __lpc_set_mac(pldat, ndev->dev_addr); | |
1167 | ||
1168 | spin_unlock_irqrestore(&pldat->lock, flags); | |
1169 | ||
1170 | return 0; | |
1171 | } | |
1172 | ||
1173 | static void lpc_eth_set_multicast_list(struct net_device *ndev) | |
1174 | { | |
1175 | struct netdata_local *pldat = netdev_priv(ndev); | |
1176 | struct netdev_hw_addr_list *mcptr = &ndev->mc; | |
1177 | struct netdev_hw_addr *ha; | |
1178 | u32 tmp32, hash_val, hashlo, hashhi; | |
1179 | unsigned long flags; | |
1180 | ||
1181 | spin_lock_irqsave(&pldat->lock, flags); | |
1182 | ||
1183 | /* Set station address */ | |
1184 | __lpc_set_mac(pldat, ndev->dev_addr); | |
1185 | ||
1186 | tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT; | |
1187 | ||
1188 | if (ndev->flags & IFF_PROMISC) | |
1189 | tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST | | |
1190 | LPC_RXFLTRW_ACCEPTUMULTICAST; | |
1191 | if (ndev->flags & IFF_ALLMULTI) | |
1192 | tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST; | |
1193 | ||
1194 | if (netdev_hw_addr_list_count(mcptr)) | |
1195 | tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH; | |
1196 | ||
1197 | writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base)); | |
1198 | ||
1199 | ||
1200 | /* Set initial hash table */ | |
1201 | hashlo = 0x0; | |
1202 | hashhi = 0x0; | |
1203 | ||
1204 | /* 64 bits : multicast address in hash table */ | |
1205 | netdev_hw_addr_list_for_each(ha, mcptr) { | |
1206 | hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F; | |
1207 | ||
1208 | if (hash_val >= 32) | |
1209 | hashhi |= 1 << (hash_val - 32); | |
1210 | else | |
1211 | hashlo |= 1 << hash_val; | |
1212 | } | |
1213 | ||
1214 | writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base)); | |
1215 | writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base)); | |
1216 | ||
1217 | spin_unlock_irqrestore(&pldat->lock, flags); | |
1218 | } | |
1219 | ||
1220 | static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) | |
1221 | { | |
1222 | struct netdata_local *pldat = netdev_priv(ndev); | |
1223 | struct phy_device *phydev = pldat->phy_dev; | |
1224 | ||
1225 | if (!netif_running(ndev)) | |
1226 | return -EINVAL; | |
1227 | ||
1228 | if (!phydev) | |
1229 | return -ENODEV; | |
1230 | ||
1231 | return phy_mii_ioctl(phydev, req, cmd); | |
1232 | } | |
1233 | ||
1234 | static int lpc_eth_open(struct net_device *ndev) | |
1235 | { | |
1236 | struct netdata_local *pldat = netdev_priv(ndev); | |
1237 | ||
1238 | if (netif_msg_ifup(pldat)) | |
1239 | dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); | |
1240 | ||
1241 | if (!is_valid_ether_addr(ndev->dev_addr)) | |
1242 | return -EADDRNOTAVAIL; | |
1243 | ||
1244 | __lpc_eth_clock_enable(pldat, true); | |
1245 | ||
1246 | /* Reset and initialize */ | |
1247 | __lpc_eth_reset(pldat); | |
1248 | __lpc_eth_init(pldat); | |
1249 | ||
1250 | /* schedule a link state check */ | |
1251 | phy_start(pldat->phy_dev); | |
1252 | netif_start_queue(ndev); | |
1253 | napi_enable(&pldat->napi); | |
1254 | ||
1255 | return 0; | |
1256 | } | |
1257 | ||
1258 | /* | |
1259 | * Ethtool ops | |
1260 | */ | |
1261 | static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev, | |
1262 | struct ethtool_drvinfo *info) | |
1263 | { | |
1264 | strcpy(info->driver, MODNAME); | |
1265 | strcpy(info->version, DRV_VERSION); | |
1266 | strcpy(info->bus_info, dev_name(ndev->dev.parent)); | |
1267 | } | |
1268 | ||
1269 | static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev) | |
1270 | { | |
1271 | struct netdata_local *pldat = netdev_priv(ndev); | |
1272 | ||
1273 | return pldat->msg_enable; | |
1274 | } | |
1275 | ||
1276 | static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level) | |
1277 | { | |
1278 | struct netdata_local *pldat = netdev_priv(ndev); | |
1279 | ||
1280 | pldat->msg_enable = level; | |
1281 | } | |
1282 | ||
1283 | static int lpc_eth_ethtool_getsettings(struct net_device *ndev, | |
1284 | struct ethtool_cmd *cmd) | |
1285 | { | |
1286 | struct netdata_local *pldat = netdev_priv(ndev); | |
1287 | struct phy_device *phydev = pldat->phy_dev; | |
1288 | ||
1289 | if (!phydev) | |
1290 | return -EOPNOTSUPP; | |
1291 | ||
1292 | return phy_ethtool_gset(phydev, cmd); | |
1293 | } | |
1294 | ||
1295 | static int lpc_eth_ethtool_setsettings(struct net_device *ndev, | |
1296 | struct ethtool_cmd *cmd) | |
1297 | { | |
1298 | struct netdata_local *pldat = netdev_priv(ndev); | |
1299 | struct phy_device *phydev = pldat->phy_dev; | |
1300 | ||
1301 | if (!phydev) | |
1302 | return -EOPNOTSUPP; | |
1303 | ||
1304 | return phy_ethtool_sset(phydev, cmd); | |
1305 | } | |
1306 | ||
1307 | static const struct ethtool_ops lpc_eth_ethtool_ops = { | |
1308 | .get_drvinfo = lpc_eth_ethtool_getdrvinfo, | |
1309 | .get_settings = lpc_eth_ethtool_getsettings, | |
1310 | .set_settings = lpc_eth_ethtool_setsettings, | |
1311 | .get_msglevel = lpc_eth_ethtool_getmsglevel, | |
1312 | .set_msglevel = lpc_eth_ethtool_setmsglevel, | |
1313 | .get_link = ethtool_op_get_link, | |
1314 | }; | |
1315 | ||
1316 | static const struct net_device_ops lpc_netdev_ops = { | |
1317 | .ndo_open = lpc_eth_open, | |
1318 | .ndo_stop = lpc_eth_close, | |
1319 | .ndo_start_xmit = lpc_eth_hard_start_xmit, | |
1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, | |
1321 | .ndo_do_ioctl = lpc_eth_ioctl, | |
1322 | .ndo_set_mac_address = lpc_set_mac_address, | |
e3047859 | 1323 | .ndo_change_mtu = eth_change_mtu, |
b7370112 | 1324 | }; |
1325 | ||
1326 | static int lpc_eth_drv_probe(struct platform_device *pdev) | |
1327 | { | |
1328 | struct resource *res; | |
b7370112 | 1329 | struct net_device *ndev; |
1330 | struct netdata_local *pldat; | |
1331 | struct phy_device *phydev; | |
1332 | dma_addr_t dma_handle; | |
1333 | int irq, ret; | |
4de02e4a RS |
1334 | u32 tmp; |
1335 | ||
1336 | /* Setup network interface for RMII or MII mode */ | |
1337 | tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL); | |
1338 | tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK; | |
1339 | if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII) | |
1340 | tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS; | |
1341 | else | |
1342 | tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS; | |
1343 | __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL); | |
b7370112 | 1344 | |
1345 | /* Get platform resources */ | |
1346 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
b7370112 | 1347 | irq = platform_get_irq(pdev, 0); |
4de02e4a | 1348 | if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { |
b7370112 | 1349 | dev_err(&pdev->dev, "error getting resources.\n"); |
1350 | ret = -ENXIO; | |
1351 | goto err_exit; | |
1352 | } | |
1353 | ||
1354 | /* Allocate net driver data structure */ | |
1355 | ndev = alloc_etherdev(sizeof(struct netdata_local)); | |
1356 | if (!ndev) { | |
1357 | dev_err(&pdev->dev, "could not allocate device.\n"); | |
1358 | ret = -ENOMEM; | |
1359 | goto err_exit; | |
1360 | } | |
1361 | ||
1362 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1363 | ||
1364 | pldat = netdev_priv(ndev); | |
1365 | pldat->pdev = pdev; | |
1366 | pldat->ndev = ndev; | |
1367 | ||
1368 | spin_lock_init(&pldat->lock); | |
1369 | ||
1370 | /* Save resources */ | |
1371 | ndev->irq = irq; | |
1372 | ||
1373 | /* Get clock for the device */ | |
1374 | pldat->clk = clk_get(&pdev->dev, NULL); | |
1375 | if (IS_ERR(pldat->clk)) { | |
1376 | dev_err(&pdev->dev, "error getting clock.\n"); | |
1377 | ret = PTR_ERR(pldat->clk); | |
1378 | goto err_out_free_dev; | |
1379 | } | |
1380 | ||
1381 | /* Enable network clock */ | |
1382 | __lpc_eth_clock_enable(pldat, true); | |
1383 | ||
1384 | /* Map IO space */ | |
1385 | pldat->net_base = ioremap(res->start, res->end - res->start + 1); | |
1386 | if (!pldat->net_base) { | |
1387 | dev_err(&pdev->dev, "failed to map registers\n"); | |
1388 | ret = -ENOMEM; | |
1389 | goto err_out_disable_clocks; | |
1390 | } | |
1391 | ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0, | |
1392 | ndev->name, ndev); | |
1393 | if (ret) { | |
1394 | dev_err(&pdev->dev, "error requesting interrupt.\n"); | |
1395 | goto err_out_iounmap; | |
1396 | } | |
1397 | ||
1398 | /* Fill in the fields of the device structure with ethernet values. */ | |
1399 | ether_setup(ndev); | |
1400 | ||
1401 | /* Setup driver functions */ | |
1402 | ndev->netdev_ops = &lpc_netdev_ops; | |
1403 | ndev->ethtool_ops = &lpc_eth_ethtool_ops; | |
1404 | ndev->watchdog_timeo = msecs_to_jiffies(2500); | |
1405 | ||
1406 | /* Get size of DMA buffers/descriptors region */ | |
1407 | pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE + | |
1408 | sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t)); | |
1409 | pldat->dma_buff_base_v = 0; | |
1410 | ||
4de02e4a RS |
1411 | if (use_iram_for_net(&pldat->pdev->dev)) { |
1412 | dma_handle = LPC32XX_IRAM_BASE; | |
b7370112 | 1413 | if (pldat->dma_buff_size <= lpc32xx_return_iram_size()) |
1414 | pldat->dma_buff_base_v = | |
4de02e4a | 1415 | io_p2v(LPC32XX_IRAM_BASE); |
b7370112 | 1416 | else |
1417 | netdev_err(ndev, | |
1418 | "IRAM not big enough for net buffers, using SDRAM instead.\n"); | |
1419 | } | |
1420 | ||
1421 | if (pldat->dma_buff_base_v == 0) { | |
4de02e4a RS |
1422 | pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF; |
1423 | pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask; | |
b7370112 | 1424 | pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); |
1425 | ||
1426 | /* Allocate a chunk of memory for the DMA ethernet buffers | |
1427 | and descriptors */ | |
1428 | pldat->dma_buff_base_v = | |
1429 | dma_alloc_coherent(&pldat->pdev->dev, | |
1430 | pldat->dma_buff_size, &dma_handle, | |
1431 | GFP_KERNEL); | |
1432 | ||
1433 | if (pldat->dma_buff_base_v == NULL) { | |
1434 | dev_err(&pdev->dev, "error getting DMA region.\n"); | |
1435 | ret = -ENOMEM; | |
1436 | goto err_out_free_irq; | |
1437 | } | |
1438 | } | |
1439 | pldat->dma_buff_base_p = dma_handle; | |
1440 | ||
1441 | netdev_dbg(ndev, "IO address start :0x%08x\n", | |
1442 | res->start); | |
1443 | netdev_dbg(ndev, "IO address size :%d\n", | |
1444 | res->end - res->start + 1); | |
1445 | netdev_err(ndev, "IO address (mapped) :0x%p\n", | |
1446 | pldat->net_base); | |
1447 | netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); | |
1448 | netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); | |
1449 | netdev_dbg(ndev, "DMA buffer P address :0x%08x\n", | |
1450 | pldat->dma_buff_base_p); | |
1451 | netdev_dbg(ndev, "DMA buffer V address :0x%p\n", | |
1452 | pldat->dma_buff_base_v); | |
1453 | ||
1454 | /* Get MAC address from current HW setting (POR state is all zeros) */ | |
1455 | __lpc_get_mac(pldat, ndev->dev_addr); | |
1456 | ||
1457 | #ifdef CONFIG_OF_NET | |
1458 | if (!is_valid_ether_addr(ndev->dev_addr)) { | |
1459 | const char *macaddr = of_get_mac_address(pdev->dev.of_node); | |
1460 | if (macaddr) | |
1461 | memcpy(ndev->dev_addr, macaddr, ETH_ALEN); | |
1462 | } | |
1463 | #endif | |
1464 | if (!is_valid_ether_addr(ndev->dev_addr)) | |
cdaf0b83 | 1465 | eth_hw_addr_random(ndev); |
b7370112 | 1466 | |
1467 | /* Reset the ethernet controller */ | |
1468 | __lpc_eth_reset(pldat); | |
1469 | ||
1470 | /* then shut everything down to save power */ | |
1471 | __lpc_eth_shutdown(pldat); | |
1472 | ||
1473 | /* Set default parameters */ | |
1474 | pldat->msg_enable = NETIF_MSG_LINK; | |
1475 | ||
1476 | /* Force an MII interface reset and clock setup */ | |
1477 | __lpc_mii_mngt_reset(pldat); | |
1478 | ||
1479 | /* Force default PHY interface setup in chip, this will probably be | |
1480 | changed by the PHY driver */ | |
1481 | pldat->link = 0; | |
1482 | pldat->speed = 100; | |
1483 | pldat->duplex = DUPLEX_FULL; | |
1484 | __lpc_params_setup(pldat); | |
1485 | ||
1486 | netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT); | |
1487 | ||
1488 | ret = register_netdev(ndev); | |
1489 | if (ret) { | |
1490 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | |
1491 | goto err_out_dma_unmap; | |
1492 | } | |
1493 | platform_set_drvdata(pdev, ndev); | |
1494 | ||
1495 | if (lpc_mii_init(pldat) != 0) | |
1496 | goto err_out_unregister_netdev; | |
1497 | ||
1498 | netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", | |
1499 | res->start, ndev->irq); | |
1500 | ||
1501 | phydev = pldat->phy_dev; | |
1502 | ||
1503 | device_init_wakeup(&pdev->dev, 1); | |
1504 | device_set_wakeup_enable(&pdev->dev, 0); | |
1505 | ||
1506 | return 0; | |
1507 | ||
1508 | err_out_unregister_netdev: | |
1509 | platform_set_drvdata(pdev, NULL); | |
1510 | unregister_netdev(ndev); | |
1511 | err_out_dma_unmap: | |
4de02e4a | 1512 | if (!use_iram_for_net(&pldat->pdev->dev) || |
b7370112 | 1513 | pldat->dma_buff_size > lpc32xx_return_iram_size()) |
1514 | dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, | |
1515 | pldat->dma_buff_base_v, | |
1516 | pldat->dma_buff_base_p); | |
1517 | err_out_free_irq: | |
1518 | free_irq(ndev->irq, ndev); | |
1519 | err_out_iounmap: | |
1520 | iounmap(pldat->net_base); | |
1521 | err_out_disable_clocks: | |
1522 | clk_disable(pldat->clk); | |
1523 | clk_put(pldat->clk); | |
1524 | err_out_free_dev: | |
1525 | free_netdev(ndev); | |
1526 | err_exit: | |
1527 | pr_err("%s: not found (%d).\n", MODNAME, ret); | |
1528 | return ret; | |
1529 | } | |
1530 | ||
1531 | static int lpc_eth_drv_remove(struct platform_device *pdev) | |
1532 | { | |
1533 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1534 | struct netdata_local *pldat = netdev_priv(ndev); | |
1535 | ||
1536 | unregister_netdev(ndev); | |
1537 | platform_set_drvdata(pdev, NULL); | |
1538 | ||
4de02e4a | 1539 | if (!use_iram_for_net(&pldat->pdev->dev) || |
b7370112 | 1540 | pldat->dma_buff_size > lpc32xx_return_iram_size()) |
1541 | dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, | |
1542 | pldat->dma_buff_base_v, | |
1543 | pldat->dma_buff_base_p); | |
1544 | free_irq(ndev->irq, ndev); | |
1545 | iounmap(pldat->net_base); | |
1546 | mdiobus_free(pldat->mii_bus); | |
1547 | clk_disable(pldat->clk); | |
1548 | clk_put(pldat->clk); | |
1549 | free_netdev(ndev); | |
1550 | ||
1551 | return 0; | |
1552 | } | |
1553 | ||
1554 | #ifdef CONFIG_PM | |
1555 | static int lpc_eth_drv_suspend(struct platform_device *pdev, | |
1556 | pm_message_t state) | |
1557 | { | |
1558 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1559 | struct netdata_local *pldat = netdev_priv(ndev); | |
1560 | ||
1561 | if (device_may_wakeup(&pdev->dev)) | |
1562 | enable_irq_wake(ndev->irq); | |
1563 | ||
1564 | if (ndev) { | |
1565 | if (netif_running(ndev)) { | |
1566 | netif_device_detach(ndev); | |
1567 | __lpc_eth_shutdown(pldat); | |
1568 | clk_disable(pldat->clk); | |
1569 | ||
1570 | /* | |
1571 | * Reset again now clock is disable to be sure | |
1572 | * EMC_MDC is down | |
1573 | */ | |
1574 | __lpc_eth_reset(pldat); | |
1575 | } | |
1576 | } | |
1577 | ||
1578 | return 0; | |
1579 | } | |
1580 | ||
1581 | static int lpc_eth_drv_resume(struct platform_device *pdev) | |
1582 | { | |
1583 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1584 | struct netdata_local *pldat; | |
1585 | ||
1586 | if (device_may_wakeup(&pdev->dev)) | |
1587 | disable_irq_wake(ndev->irq); | |
1588 | ||
1589 | if (ndev) { | |
1590 | if (netif_running(ndev)) { | |
1591 | pldat = netdev_priv(ndev); | |
1592 | ||
1593 | /* Enable interface clock */ | |
1594 | clk_enable(pldat->clk); | |
1595 | ||
1596 | /* Reset and initialize */ | |
1597 | __lpc_eth_reset(pldat); | |
1598 | __lpc_eth_init(pldat); | |
1599 | ||
1600 | netif_device_attach(ndev); | |
1601 | } | |
1602 | } | |
1603 | ||
1604 | return 0; | |
1605 | } | |
1606 | #endif | |
1607 | ||
4de02e4a RS |
1608 | #ifdef CONFIG_OF |
1609 | static const struct of_device_id lpc_eth_match[] = { | |
1610 | { .compatible = "nxp,lpc-eth" }, | |
1611 | { } | |
1612 | }; | |
1613 | MODULE_DEVICE_TABLE(of, lpc_eth_match); | |
1614 | #endif | |
1615 | ||
b7370112 | 1616 | static struct platform_driver lpc_eth_driver = { |
1617 | .probe = lpc_eth_drv_probe, | |
1618 | .remove = __devexit_p(lpc_eth_drv_remove), | |
1619 | #ifdef CONFIG_PM | |
1620 | .suspend = lpc_eth_drv_suspend, | |
1621 | .resume = lpc_eth_drv_resume, | |
1622 | #endif | |
1623 | .driver = { | |
1624 | .name = MODNAME, | |
4de02e4a | 1625 | .of_match_table = of_match_ptr(lpc_eth_match), |
b7370112 | 1626 | }, |
1627 | }; | |
1628 | ||
1629 | module_platform_driver(lpc_eth_driver); | |
1630 | ||
1631 | MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); | |
1632 | MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); | |
1633 | MODULE_DESCRIPTION("LPC Ethernet Driver"); | |
1634 | MODULE_LICENSE("GPL"); |