Commit | Line | Data |
---|---|---|
b7370112 | 1 | /* |
2 | * drivers/net/ethernet/nxp/lpc_eth.c | |
3 | * | |
4 | * Author: Kevin Wells <kevin.wells@nxp.com> | |
5 | * | |
6 | * Copyright (C) 2010 NXP Semiconductors | |
7 | * Copyright (C) 2012 Roland Stigge <stigge@antcom.de> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
21 | ||
22 | #include <linux/init.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/kernel.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/errno.h> | |
30 | #include <linux/ioport.h> | |
31 | #include <linux/crc32.h> | |
32 | #include <linux/platform_device.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/ethtool.h> | |
35 | #include <linux/mii.h> | |
36 | #include <linux/clk.h> | |
37 | #include <linux/workqueue.h> | |
38 | #include <linux/netdevice.h> | |
39 | #include <linux/etherdevice.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/phy.h> | |
42 | #include <linux/dma-mapping.h> | |
4de02e4a | 43 | #include <linux/of.h> |
b7370112 | 44 | #include <linux/of_net.h> |
45 | #include <linux/types.h> | |
46 | ||
b7370112 | 47 | #include <linux/io.h> |
48 | #include <mach/board.h> | |
49 | #include <mach/platform.h> | |
50 | #include <mach/hardware.h> | |
51 | ||
52 | #define MODNAME "lpc-eth" | |
53 | #define DRV_VERSION "1.00" | |
b7370112 | 54 | |
55 | #define ENET_MAXF_SIZE 1536 | |
56 | #define ENET_RX_DESC 48 | |
57 | #define ENET_TX_DESC 16 | |
58 | ||
59 | #define NAPI_WEIGHT 16 | |
60 | ||
61 | /* | |
62 | * Ethernet MAC controller Register offsets | |
63 | */ | |
64 | #define LPC_ENET_MAC1(x) (x + 0x000) | |
65 | #define LPC_ENET_MAC2(x) (x + 0x004) | |
66 | #define LPC_ENET_IPGT(x) (x + 0x008) | |
67 | #define LPC_ENET_IPGR(x) (x + 0x00C) | |
68 | #define LPC_ENET_CLRT(x) (x + 0x010) | |
69 | #define LPC_ENET_MAXF(x) (x + 0x014) | |
70 | #define LPC_ENET_SUPP(x) (x + 0x018) | |
71 | #define LPC_ENET_TEST(x) (x + 0x01C) | |
72 | #define LPC_ENET_MCFG(x) (x + 0x020) | |
73 | #define LPC_ENET_MCMD(x) (x + 0x024) | |
74 | #define LPC_ENET_MADR(x) (x + 0x028) | |
75 | #define LPC_ENET_MWTD(x) (x + 0x02C) | |
76 | #define LPC_ENET_MRDD(x) (x + 0x030) | |
77 | #define LPC_ENET_MIND(x) (x + 0x034) | |
78 | #define LPC_ENET_SA0(x) (x + 0x040) | |
79 | #define LPC_ENET_SA1(x) (x + 0x044) | |
80 | #define LPC_ENET_SA2(x) (x + 0x048) | |
81 | #define LPC_ENET_COMMAND(x) (x + 0x100) | |
82 | #define LPC_ENET_STATUS(x) (x + 0x104) | |
83 | #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108) | |
84 | #define LPC_ENET_RXSTATUS(x) (x + 0x10C) | |
85 | #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110) | |
86 | #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114) | |
87 | #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118) | |
88 | #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C) | |
89 | #define LPC_ENET_TXSTATUS(x) (x + 0x120) | |
90 | #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124) | |
91 | #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128) | |
92 | #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C) | |
93 | #define LPC_ENET_TSV0(x) (x + 0x158) | |
94 | #define LPC_ENET_TSV1(x) (x + 0x15C) | |
95 | #define LPC_ENET_RSV(x) (x + 0x160) | |
96 | #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170) | |
97 | #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174) | |
98 | #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200) | |
99 | #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204) | |
100 | #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208) | |
101 | #define LPC_ENET_HASHFILTERL(x) (x + 0x210) | |
102 | #define LPC_ENET_HASHFILTERH(x) (x + 0x214) | |
103 | #define LPC_ENET_INTSTATUS(x) (x + 0xFE0) | |
104 | #define LPC_ENET_INTENABLE(x) (x + 0xFE4) | |
105 | #define LPC_ENET_INTCLEAR(x) (x + 0xFE8) | |
106 | #define LPC_ENET_INTSET(x) (x + 0xFEC) | |
107 | #define LPC_ENET_POWERDOWN(x) (x + 0xFF4) | |
108 | ||
109 | /* | |
110 | * mac1 register definitions | |
111 | */ | |
112 | #define LPC_MAC1_RECV_ENABLE (1 << 0) | |
113 | #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1) | |
114 | #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2) | |
115 | #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3) | |
116 | #define LPC_MAC1_LOOPBACK (1 << 4) | |
117 | #define LPC_MAC1_RESET_TX (1 << 8) | |
118 | #define LPC_MAC1_RESET_MCS_TX (1 << 9) | |
119 | #define LPC_MAC1_RESET_RX (1 << 10) | |
120 | #define LPC_MAC1_RESET_MCS_RX (1 << 11) | |
121 | #define LPC_MAC1_SIMULATION_RESET (1 << 14) | |
122 | #define LPC_MAC1_SOFT_RESET (1 << 15) | |
123 | ||
124 | /* | |
125 | * mac2 register definitions | |
126 | */ | |
127 | #define LPC_MAC2_FULL_DUPLEX (1 << 0) | |
128 | #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1) | |
129 | #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2) | |
130 | #define LPC_MAC2_DELAYED_CRC (1 << 3) | |
131 | #define LPC_MAC2_CRC_ENABLE (1 << 4) | |
132 | #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5) | |
133 | #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6) | |
134 | #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7) | |
135 | #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8) | |
136 | #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9) | |
137 | #define LPC_MAC2_NO_BACKOFF (1 << 12) | |
138 | #define LPC_MAC2_BACK_PRESSURE (1 << 13) | |
139 | #define LPC_MAC2_EXCESS_DEFER (1 << 14) | |
140 | ||
141 | /* | |
142 | * ipgt register definitions | |
143 | */ | |
144 | #define LPC_IPGT_LOAD(n) ((n) & 0x7F) | |
145 | ||
146 | /* | |
147 | * ipgr register definitions | |
148 | */ | |
149 | #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F) | |
150 | #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8) | |
151 | ||
152 | /* | |
153 | * clrt register definitions | |
154 | */ | |
155 | #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF) | |
156 | #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8) | |
157 | ||
158 | /* | |
159 | * maxf register definitions | |
160 | */ | |
161 | #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF) | |
162 | ||
163 | /* | |
164 | * supp register definitions | |
165 | */ | |
166 | #define LPC_SUPP_SPEED (1 << 8) | |
167 | #define LPC_SUPP_RESET_RMII (1 << 11) | |
168 | ||
169 | /* | |
170 | * test register definitions | |
171 | */ | |
172 | #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0) | |
173 | #define LPC_TEST_PAUSE (1 << 1) | |
174 | #define LPC_TEST_BACKPRESSURE (1 << 2) | |
175 | ||
176 | /* | |
177 | * mcfg register definitions | |
178 | */ | |
179 | #define LPC_MCFG_SCAN_INCREMENT (1 << 0) | |
180 | #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1) | |
181 | #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2) | |
182 | #define LPC_MCFG_CLOCK_HOST_DIV_4 0 | |
183 | #define LPC_MCFG_CLOCK_HOST_DIV_6 2 | |
184 | #define LPC_MCFG_CLOCK_HOST_DIV_8 3 | |
185 | #define LPC_MCFG_CLOCK_HOST_DIV_10 4 | |
186 | #define LPC_MCFG_CLOCK_HOST_DIV_14 5 | |
187 | #define LPC_MCFG_CLOCK_HOST_DIV_20 6 | |
188 | #define LPC_MCFG_CLOCK_HOST_DIV_28 7 | |
189 | #define LPC_MCFG_RESET_MII_MGMT (1 << 15) | |
190 | ||
191 | /* | |
192 | * mcmd register definitions | |
193 | */ | |
194 | #define LPC_MCMD_READ (1 << 0) | |
195 | #define LPC_MCMD_SCAN (1 << 1) | |
196 | ||
197 | /* | |
198 | * madr register definitions | |
199 | */ | |
200 | #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F) | |
201 | #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8) | |
202 | ||
203 | /* | |
204 | * mwtd register definitions | |
205 | */ | |
206 | #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF) | |
207 | ||
208 | /* | |
209 | * mrdd register definitions | |
210 | */ | |
211 | #define LPC_MRDD_READ_MASK 0xFFFF | |
212 | ||
213 | /* | |
214 | * mind register definitions | |
215 | */ | |
216 | #define LPC_MIND_BUSY (1 << 0) | |
217 | #define LPC_MIND_SCANNING (1 << 1) | |
218 | #define LPC_MIND_NOT_VALID (1 << 2) | |
219 | #define LPC_MIND_MII_LINK_FAIL (1 << 3) | |
220 | ||
221 | /* | |
222 | * command register definitions | |
223 | */ | |
224 | #define LPC_COMMAND_RXENABLE (1 << 0) | |
225 | #define LPC_COMMAND_TXENABLE (1 << 1) | |
226 | #define LPC_COMMAND_REG_RESET (1 << 3) | |
227 | #define LPC_COMMAND_TXRESET (1 << 4) | |
228 | #define LPC_COMMAND_RXRESET (1 << 5) | |
229 | #define LPC_COMMAND_PASSRUNTFRAME (1 << 6) | |
230 | #define LPC_COMMAND_PASSRXFILTER (1 << 7) | |
231 | #define LPC_COMMAND_TXFLOWCONTROL (1 << 8) | |
232 | #define LPC_COMMAND_RMII (1 << 9) | |
233 | #define LPC_COMMAND_FULLDUPLEX (1 << 10) | |
234 | ||
235 | /* | |
236 | * status register definitions | |
237 | */ | |
238 | #define LPC_STATUS_RXACTIVE (1 << 0) | |
239 | #define LPC_STATUS_TXACTIVE (1 << 1) | |
240 | ||
241 | /* | |
242 | * tsv0 register definitions | |
243 | */ | |
244 | #define LPC_TSV0_CRC_ERROR (1 << 0) | |
245 | #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1) | |
246 | #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2) | |
247 | #define LPC_TSV0_DONE (1 << 3) | |
248 | #define LPC_TSV0_MULTICAST (1 << 4) | |
249 | #define LPC_TSV0_BROADCAST (1 << 5) | |
250 | #define LPC_TSV0_PACKET_DEFER (1 << 6) | |
251 | #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7) | |
252 | #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8) | |
253 | #define LPC_TSV0_LATE_COLLISION (1 << 9) | |
254 | #define LPC_TSV0_GIANT (1 << 10) | |
255 | #define LPC_TSV0_UNDERRUN (1 << 11) | |
256 | #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF) | |
257 | #define LPC_TSV0_CONTROL_FRAME (1 << 28) | |
258 | #define LPC_TSV0_PAUSE (1 << 29) | |
259 | #define LPC_TSV0_BACKPRESSURE (1 << 30) | |
260 | #define LPC_TSV0_VLAN (1 << 31) | |
261 | ||
262 | /* | |
263 | * tsv1 register definitions | |
264 | */ | |
265 | #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF) | |
266 | #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF) | |
267 | ||
268 | /* | |
269 | * rsv register definitions | |
270 | */ | |
271 | #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF) | |
272 | #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16) | |
273 | #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17) | |
274 | #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18) | |
275 | #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19) | |
276 | #define LPC_RSV_CRC_ERROR (1 << 20) | |
277 | #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21) | |
278 | #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22) | |
279 | #define LPC_RSV_RECEIVE_OK (1 << 23) | |
280 | #define LPC_RSV_MULTICAST (1 << 24) | |
281 | #define LPC_RSV_BROADCAST (1 << 25) | |
282 | #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26) | |
283 | #define LPC_RSV_CONTROL_FRAME (1 << 27) | |
284 | #define LPC_RSV_PAUSE (1 << 28) | |
285 | #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29) | |
286 | #define LPC_RSV_VLAN (1 << 30) | |
287 | ||
288 | /* | |
289 | * flowcontrolcounter register definitions | |
290 | */ | |
291 | #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF) | |
292 | #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF) | |
293 | ||
294 | /* | |
295 | * flowcontrolstatus register definitions | |
296 | */ | |
297 | #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF) | |
298 | ||
299 | /* | |
300 | * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared | |
301 | * register definitions | |
302 | */ | |
303 | #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0) | |
304 | #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1) | |
305 | #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2) | |
306 | #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3) | |
307 | #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4) | |
308 | #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5) | |
309 | ||
310 | /* | |
311 | * rxfliterctrl register definitions | |
312 | */ | |
313 | #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12) | |
314 | #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13) | |
315 | ||
316 | /* | |
317 | * rxfilterwolstatus/rxfilterwolclear register definitions | |
318 | */ | |
319 | #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7) | |
320 | #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8) | |
321 | ||
322 | /* | |
323 | * intstatus, intenable, intclear, and Intset shared register | |
324 | * definitions | |
325 | */ | |
326 | #define LPC_MACINT_RXOVERRUNINTEN (1 << 0) | |
327 | #define LPC_MACINT_RXERRORONINT (1 << 1) | |
328 | #define LPC_MACINT_RXFINISHEDINTEN (1 << 2) | |
329 | #define LPC_MACINT_RXDONEINTEN (1 << 3) | |
330 | #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4) | |
331 | #define LPC_MACINT_TXERRORINTEN (1 << 5) | |
332 | #define LPC_MACINT_TXFINISHEDINTEN (1 << 6) | |
333 | #define LPC_MACINT_TXDONEINTEN (1 << 7) | |
334 | #define LPC_MACINT_SOFTINTEN (1 << 12) | |
335 | #define LPC_MACINT_WAKEUPINTEN (1 << 13) | |
336 | ||
337 | /* | |
338 | * powerdown register definitions | |
339 | */ | |
340 | #define LPC_POWERDOWN_MACAHB (1 << 31) | |
341 | ||
4de02e4a | 342 | static phy_interface_t lpc_phy_interface_mode(struct device *dev) |
b7370112 | 343 | { |
4de02e4a RS |
344 | if (dev && dev->of_node) { |
345 | const char *mode = of_get_property(dev->of_node, | |
346 | "phy-mode", NULL); | |
347 | if (mode && !strcmp(mode, "mii")) | |
348 | return PHY_INTERFACE_MODE_MII; | |
4de02e4a | 349 | } |
b7370112 | 350 | return PHY_INTERFACE_MODE_RMII; |
b7370112 | 351 | } |
352 | ||
4de02e4a | 353 | static bool use_iram_for_net(struct device *dev) |
b7370112 | 354 | { |
4de02e4a RS |
355 | if (dev && dev->of_node) |
356 | return of_property_read_bool(dev->of_node, "use-iram"); | |
4de02e4a | 357 | return false; |
b7370112 | 358 | } |
359 | ||
360 | /* Receive Status information word */ | |
361 | #define RXSTATUS_SIZE 0x000007FF | |
362 | #define RXSTATUS_CONTROL (1 << 18) | |
363 | #define RXSTATUS_VLAN (1 << 19) | |
364 | #define RXSTATUS_FILTER (1 << 20) | |
365 | #define RXSTATUS_MULTICAST (1 << 21) | |
366 | #define RXSTATUS_BROADCAST (1 << 22) | |
367 | #define RXSTATUS_CRC (1 << 23) | |
368 | #define RXSTATUS_SYMBOL (1 << 24) | |
369 | #define RXSTATUS_LENGTH (1 << 25) | |
370 | #define RXSTATUS_RANGE (1 << 26) | |
371 | #define RXSTATUS_ALIGN (1 << 27) | |
372 | #define RXSTATUS_OVERRUN (1 << 28) | |
373 | #define RXSTATUS_NODESC (1 << 29) | |
374 | #define RXSTATUS_LAST (1 << 30) | |
375 | #define RXSTATUS_ERROR (1 << 31) | |
376 | ||
377 | #define RXSTATUS_STATUS_ERROR \ | |
378 | (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \ | |
379 | RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC) | |
380 | ||
381 | /* Receive Descriptor control word */ | |
382 | #define RXDESC_CONTROL_SIZE 0x000007FF | |
383 | #define RXDESC_CONTROL_INT (1 << 31) | |
384 | ||
385 | /* Transmit Status information word */ | |
386 | #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF) | |
387 | #define TXSTATUS_DEFER (1 << 25) | |
388 | #define TXSTATUS_EXCESSDEFER (1 << 26) | |
389 | #define TXSTATUS_EXCESSCOLL (1 << 27) | |
390 | #define TXSTATUS_LATECOLL (1 << 28) | |
391 | #define TXSTATUS_UNDERRUN (1 << 29) | |
392 | #define TXSTATUS_NODESC (1 << 30) | |
393 | #define TXSTATUS_ERROR (1 << 31) | |
394 | ||
395 | /* Transmit Descriptor control word */ | |
396 | #define TXDESC_CONTROL_SIZE 0x000007FF | |
397 | #define TXDESC_CONTROL_OVERRIDE (1 << 26) | |
398 | #define TXDESC_CONTROL_HUGE (1 << 27) | |
399 | #define TXDESC_CONTROL_PAD (1 << 28) | |
400 | #define TXDESC_CONTROL_CRC (1 << 29) | |
401 | #define TXDESC_CONTROL_LAST (1 << 30) | |
402 | #define TXDESC_CONTROL_INT (1 << 31) | |
403 | ||
b7370112 | 404 | /* |
405 | * Structure of a TX/RX descriptors and RX status | |
406 | */ | |
407 | struct txrx_desc_t { | |
408 | __le32 packet; | |
409 | __le32 control; | |
410 | }; | |
411 | struct rx_status_t { | |
412 | __le32 statusinfo; | |
413 | __le32 statushashcrc; | |
414 | }; | |
415 | ||
416 | /* | |
417 | * Device driver data structure | |
418 | */ | |
419 | struct netdata_local { | |
420 | struct platform_device *pdev; | |
421 | struct net_device *ndev; | |
422 | spinlock_t lock; | |
423 | void __iomem *net_base; | |
424 | u32 msg_enable; | |
a7e2eaad | 425 | unsigned int skblen[ENET_TX_DESC]; |
b7370112 | 426 | unsigned int last_tx_idx; |
427 | unsigned int num_used_tx_buffs; | |
428 | struct mii_bus *mii_bus; | |
429 | struct phy_device *phy_dev; | |
430 | struct clk *clk; | |
431 | dma_addr_t dma_buff_base_p; | |
432 | void *dma_buff_base_v; | |
433 | size_t dma_buff_size; | |
434 | struct txrx_desc_t *tx_desc_v; | |
435 | u32 *tx_stat_v; | |
436 | void *tx_buff_v; | |
437 | struct txrx_desc_t *rx_desc_v; | |
438 | struct rx_status_t *rx_stat_v; | |
439 | void *rx_buff_v; | |
440 | int link; | |
441 | int speed; | |
442 | int duplex; | |
443 | struct napi_struct napi; | |
444 | }; | |
445 | ||
446 | /* | |
447 | * MAC support functions | |
448 | */ | |
449 | static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) | |
450 | { | |
451 | u32 tmp; | |
452 | ||
453 | /* Set station address */ | |
454 | tmp = mac[0] | ((u32)mac[1] << 8); | |
455 | writel(tmp, LPC_ENET_SA2(pldat->net_base)); | |
456 | tmp = mac[2] | ((u32)mac[3] << 8); | |
457 | writel(tmp, LPC_ENET_SA1(pldat->net_base)); | |
458 | tmp = mac[4] | ((u32)mac[5] << 8); | |
459 | writel(tmp, LPC_ENET_SA0(pldat->net_base)); | |
460 | ||
461 | netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac); | |
462 | } | |
463 | ||
464 | static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) | |
465 | { | |
466 | u32 tmp; | |
467 | ||
468 | /* Get station address */ | |
469 | tmp = readl(LPC_ENET_SA2(pldat->net_base)); | |
470 | mac[0] = tmp & 0xFF; | |
471 | mac[1] = tmp >> 8; | |
472 | tmp = readl(LPC_ENET_SA1(pldat->net_base)); | |
473 | mac[2] = tmp & 0xFF; | |
474 | mac[3] = tmp >> 8; | |
475 | tmp = readl(LPC_ENET_SA0(pldat->net_base)); | |
476 | mac[4] = tmp & 0xFF; | |
477 | mac[5] = tmp >> 8; | |
478 | } | |
479 | ||
480 | static void __lpc_eth_clock_enable(struct netdata_local *pldat, | |
481 | bool enable) | |
482 | { | |
483 | if (enable) | |
484 | clk_enable(pldat->clk); | |
485 | else | |
486 | clk_disable(pldat->clk); | |
487 | } | |
488 | ||
489 | static void __lpc_params_setup(struct netdata_local *pldat) | |
490 | { | |
491 | u32 tmp; | |
492 | ||
493 | if (pldat->duplex == DUPLEX_FULL) { | |
494 | tmp = readl(LPC_ENET_MAC2(pldat->net_base)); | |
495 | tmp |= LPC_MAC2_FULL_DUPLEX; | |
496 | writel(tmp, LPC_ENET_MAC2(pldat->net_base)); | |
497 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
498 | tmp |= LPC_COMMAND_FULLDUPLEX; | |
499 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
500 | writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base)); | |
501 | } else { | |
502 | tmp = readl(LPC_ENET_MAC2(pldat->net_base)); | |
503 | tmp &= ~LPC_MAC2_FULL_DUPLEX; | |
504 | writel(tmp, LPC_ENET_MAC2(pldat->net_base)); | |
505 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
506 | tmp &= ~LPC_COMMAND_FULLDUPLEX; | |
507 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
508 | writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base)); | |
509 | } | |
510 | ||
511 | if (pldat->speed == SPEED_100) | |
512 | writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base)); | |
513 | else | |
514 | writel(0, LPC_ENET_SUPP(pldat->net_base)); | |
515 | } | |
516 | ||
517 | static void __lpc_eth_reset(struct netdata_local *pldat) | |
518 | { | |
519 | /* Reset all MAC logic */ | |
520 | writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX | | |
521 | LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET | | |
522 | LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base)); | |
523 | writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET | | |
524 | LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base)); | |
525 | } | |
526 | ||
527 | static int __lpc_mii_mngt_reset(struct netdata_local *pldat) | |
528 | { | |
529 | /* Reset MII management hardware */ | |
530 | writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base)); | |
531 | ||
532 | /* Setup MII clock to slowest rate with a /28 divider */ | |
533 | writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28), | |
534 | LPC_ENET_MCFG(pldat->net_base)); | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat) | |
540 | { | |
541 | phys_addr_t phaddr; | |
542 | ||
543 | phaddr = addr - pldat->dma_buff_base_v; | |
544 | phaddr += pldat->dma_buff_base_p; | |
545 | ||
546 | return phaddr; | |
547 | } | |
548 | ||
549 | static void lpc_eth_enable_int(void __iomem *regbase) | |
550 | { | |
551 | writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN), | |
552 | LPC_ENET_INTENABLE(regbase)); | |
553 | } | |
554 | ||
555 | static void lpc_eth_disable_int(void __iomem *regbase) | |
556 | { | |
557 | writel(0, LPC_ENET_INTENABLE(regbase)); | |
558 | } | |
559 | ||
560 | /* Setup TX/RX descriptors */ | |
561 | static void __lpc_txrx_desc_setup(struct netdata_local *pldat) | |
562 | { | |
563 | u32 *ptxstat; | |
564 | void *tbuff; | |
565 | int i; | |
566 | struct txrx_desc_t *ptxrxdesc; | |
567 | struct rx_status_t *prxstat; | |
568 | ||
569 | tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16); | |
570 | ||
571 | /* Setup TX descriptors, status, and buffers */ | |
572 | pldat->tx_desc_v = tbuff; | |
573 | tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC; | |
574 | ||
575 | pldat->tx_stat_v = tbuff; | |
576 | tbuff += sizeof(u32) * ENET_TX_DESC; | |
577 | ||
578 | tbuff = PTR_ALIGN(tbuff, 16); | |
579 | pldat->tx_buff_v = tbuff; | |
580 | tbuff += ENET_MAXF_SIZE * ENET_TX_DESC; | |
581 | ||
582 | /* Setup RX descriptors, status, and buffers */ | |
583 | pldat->rx_desc_v = tbuff; | |
584 | tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC; | |
585 | ||
586 | tbuff = PTR_ALIGN(tbuff, 16); | |
587 | pldat->rx_stat_v = tbuff; | |
588 | tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC; | |
589 | ||
590 | tbuff = PTR_ALIGN(tbuff, 16); | |
591 | pldat->rx_buff_v = tbuff; | |
592 | tbuff += ENET_MAXF_SIZE * ENET_RX_DESC; | |
593 | ||
594 | /* Map the TX descriptors to the TX buffers in hardware */ | |
595 | for (i = 0; i < ENET_TX_DESC; i++) { | |
596 | ptxstat = &pldat->tx_stat_v[i]; | |
597 | ptxrxdesc = &pldat->tx_desc_v[i]; | |
598 | ||
599 | ptxrxdesc->packet = __va_to_pa( | |
600 | pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat); | |
601 | ptxrxdesc->control = 0; | |
602 | *ptxstat = 0; | |
603 | } | |
604 | ||
605 | /* Map the RX descriptors to the RX buffers in hardware */ | |
606 | for (i = 0; i < ENET_RX_DESC; i++) { | |
607 | prxstat = &pldat->rx_stat_v[i]; | |
608 | ptxrxdesc = &pldat->rx_desc_v[i]; | |
609 | ||
610 | ptxrxdesc->packet = __va_to_pa( | |
611 | pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat); | |
612 | ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1); | |
613 | prxstat->statusinfo = 0; | |
614 | prxstat->statushashcrc = 0; | |
615 | } | |
616 | ||
617 | /* Setup base addresses in hardware to point to buffers and | |
618 | * descriptors | |
619 | */ | |
620 | writel((ENET_TX_DESC - 1), | |
621 | LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base)); | |
622 | writel(__va_to_pa(pldat->tx_desc_v, pldat), | |
623 | LPC_ENET_TXDESCRIPTOR(pldat->net_base)); | |
624 | writel(__va_to_pa(pldat->tx_stat_v, pldat), | |
625 | LPC_ENET_TXSTATUS(pldat->net_base)); | |
626 | writel((ENET_RX_DESC - 1), | |
627 | LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base)); | |
628 | writel(__va_to_pa(pldat->rx_desc_v, pldat), | |
629 | LPC_ENET_RXDESCRIPTOR(pldat->net_base)); | |
630 | writel(__va_to_pa(pldat->rx_stat_v, pldat), | |
631 | LPC_ENET_RXSTATUS(pldat->net_base)); | |
632 | } | |
633 | ||
634 | static void __lpc_eth_init(struct netdata_local *pldat) | |
635 | { | |
636 | u32 tmp; | |
637 | ||
638 | /* Disable controller and reset */ | |
639 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
640 | tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; | |
641 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
642 | tmp = readl(LPC_ENET_MAC1(pldat->net_base)); | |
643 | tmp &= ~LPC_MAC1_RECV_ENABLE; | |
644 | writel(tmp, LPC_ENET_MAC1(pldat->net_base)); | |
645 | ||
646 | /* Initial MAC setup */ | |
647 | writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base)); | |
648 | writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE), | |
649 | LPC_ENET_MAC2(pldat->net_base)); | |
650 | writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base)); | |
651 | ||
652 | /* Collision window, gap */ | |
653 | writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) | | |
654 | LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)), | |
655 | LPC_ENET_CLRT(pldat->net_base)); | |
656 | writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base)); | |
657 | ||
4de02e4a | 658 | if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) |
b7370112 | 659 | writel(LPC_COMMAND_PASSRUNTFRAME, |
660 | LPC_ENET_COMMAND(pldat->net_base)); | |
661 | else { | |
662 | writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), | |
663 | LPC_ENET_COMMAND(pldat->net_base)); | |
664 | writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); | |
665 | } | |
666 | ||
667 | __lpc_params_setup(pldat); | |
668 | ||
669 | /* Setup TX and RX descriptors */ | |
670 | __lpc_txrx_desc_setup(pldat); | |
671 | ||
672 | /* Setup packet filtering */ | |
673 | writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT), | |
674 | LPC_ENET_RXFILTER_CTRL(pldat->net_base)); | |
675 | ||
676 | /* Get the next TX buffer output index */ | |
677 | pldat->num_used_tx_buffs = 0; | |
678 | pldat->last_tx_idx = | |
679 | readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | |
680 | ||
681 | /* Clear and enable interrupts */ | |
682 | writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base)); | |
683 | smp_wmb(); | |
684 | lpc_eth_enable_int(pldat->net_base); | |
685 | ||
686 | /* Enable controller */ | |
687 | tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); | |
688 | tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; | |
689 | writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); | |
690 | tmp = readl(LPC_ENET_MAC1(pldat->net_base)); | |
691 | tmp |= LPC_MAC1_RECV_ENABLE; | |
692 | writel(tmp, LPC_ENET_MAC1(pldat->net_base)); | |
693 | } | |
694 | ||
695 | static void __lpc_eth_shutdown(struct netdata_local *pldat) | |
696 | { | |
697 | /* Reset ethernet and power down PHY */ | |
698 | __lpc_eth_reset(pldat); | |
699 | writel(0, LPC_ENET_MAC1(pldat->net_base)); | |
700 | writel(0, LPC_ENET_MAC2(pldat->net_base)); | |
701 | } | |
702 | ||
703 | /* | |
704 | * MAC<--->PHY support functions | |
705 | */ | |
706 | static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg) | |
707 | { | |
708 | struct netdata_local *pldat = bus->priv; | |
709 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | |
710 | int lps; | |
711 | ||
712 | writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); | |
713 | writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base)); | |
714 | ||
715 | /* Wait for unbusy status */ | |
716 | while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { | |
717 | if (time_after(jiffies, timeout)) | |
718 | return -EIO; | |
719 | cpu_relax(); | |
720 | } | |
721 | ||
722 | lps = readl(LPC_ENET_MRDD(pldat->net_base)); | |
723 | writel(0, LPC_ENET_MCMD(pldat->net_base)); | |
724 | ||
725 | return lps; | |
726 | } | |
727 | ||
728 | static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg, | |
729 | u16 phydata) | |
730 | { | |
731 | struct netdata_local *pldat = bus->priv; | |
732 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | |
733 | ||
734 | writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); | |
735 | writel(phydata, LPC_ENET_MWTD(pldat->net_base)); | |
736 | ||
737 | /* Wait for completion */ | |
738 | while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { | |
739 | if (time_after(jiffies, timeout)) | |
740 | return -EIO; | |
741 | cpu_relax(); | |
742 | } | |
743 | ||
744 | return 0; | |
745 | } | |
746 | ||
747 | static int lpc_mdio_reset(struct mii_bus *bus) | |
748 | { | |
749 | return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv); | |
750 | } | |
751 | ||
752 | static void lpc_handle_link_change(struct net_device *ndev) | |
753 | { | |
754 | struct netdata_local *pldat = netdev_priv(ndev); | |
755 | struct phy_device *phydev = pldat->phy_dev; | |
756 | unsigned long flags; | |
757 | ||
758 | bool status_change = false; | |
759 | ||
760 | spin_lock_irqsave(&pldat->lock, flags); | |
761 | ||
762 | if (phydev->link) { | |
763 | if ((pldat->speed != phydev->speed) || | |
764 | (pldat->duplex != phydev->duplex)) { | |
765 | pldat->speed = phydev->speed; | |
766 | pldat->duplex = phydev->duplex; | |
767 | status_change = true; | |
768 | } | |
769 | } | |
770 | ||
771 | if (phydev->link != pldat->link) { | |
772 | if (!phydev->link) { | |
773 | pldat->speed = 0; | |
774 | pldat->duplex = -1; | |
775 | } | |
776 | pldat->link = phydev->link; | |
777 | ||
778 | status_change = true; | |
779 | } | |
780 | ||
781 | spin_unlock_irqrestore(&pldat->lock, flags); | |
782 | ||
783 | if (status_change) | |
784 | __lpc_params_setup(pldat); | |
785 | } | |
786 | ||
787 | static int lpc_mii_probe(struct net_device *ndev) | |
788 | { | |
789 | struct netdata_local *pldat = netdev_priv(ndev); | |
790 | struct phy_device *phydev = phy_find_first(pldat->mii_bus); | |
791 | ||
792 | if (!phydev) { | |
793 | netdev_err(ndev, "no PHY found\n"); | |
794 | return -ENODEV; | |
795 | } | |
796 | ||
797 | /* Attach to the PHY */ | |
4de02e4a | 798 | if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) |
b7370112 | 799 | netdev_info(ndev, "using MII interface\n"); |
800 | else | |
801 | netdev_info(ndev, "using RMII interface\n"); | |
802 | phydev = phy_connect(ndev, dev_name(&phydev->dev), | |
f9a8f83b | 803 | &lpc_handle_link_change, |
4de02e4a | 804 | lpc_phy_interface_mode(&pldat->pdev->dev)); |
b7370112 | 805 | |
806 | if (IS_ERR(phydev)) { | |
807 | netdev_err(ndev, "Could not attach to PHY\n"); | |
808 | return PTR_ERR(phydev); | |
809 | } | |
810 | ||
811 | /* mask with MAC supported features */ | |
812 | phydev->supported &= PHY_BASIC_FEATURES; | |
813 | ||
814 | phydev->advertising = phydev->supported; | |
815 | ||
816 | pldat->link = 0; | |
817 | pldat->speed = 0; | |
818 | pldat->duplex = -1; | |
819 | pldat->phy_dev = phydev; | |
820 | ||
821 | netdev_info(ndev, | |
822 | "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", | |
823 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); | |
824 | return 0; | |
825 | } | |
826 | ||
827 | static int lpc_mii_init(struct netdata_local *pldat) | |
828 | { | |
829 | int err = -ENXIO, i; | |
830 | ||
831 | pldat->mii_bus = mdiobus_alloc(); | |
832 | if (!pldat->mii_bus) { | |
833 | err = -ENOMEM; | |
834 | goto err_out; | |
835 | } | |
836 | ||
837 | /* Setup MII mode */ | |
4de02e4a | 838 | if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) |
b7370112 | 839 | writel(LPC_COMMAND_PASSRUNTFRAME, |
840 | LPC_ENET_COMMAND(pldat->net_base)); | |
841 | else { | |
842 | writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), | |
843 | LPC_ENET_COMMAND(pldat->net_base)); | |
844 | writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); | |
845 | } | |
846 | ||
847 | pldat->mii_bus->name = "lpc_mii_bus"; | |
848 | pldat->mii_bus->read = &lpc_mdio_read; | |
849 | pldat->mii_bus->write = &lpc_mdio_write; | |
850 | pldat->mii_bus->reset = &lpc_mdio_reset; | |
851 | snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", | |
852 | pldat->pdev->name, pldat->pdev->id); | |
853 | pldat->mii_bus->priv = pldat; | |
854 | pldat->mii_bus->parent = &pldat->pdev->dev; | |
855 | ||
856 | pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | |
857 | if (!pldat->mii_bus->irq) { | |
858 | err = -ENOMEM; | |
859 | goto err_out_1; | |
860 | } | |
861 | ||
862 | for (i = 0; i < PHY_MAX_ADDR; i++) | |
863 | pldat->mii_bus->irq[i] = PHY_POLL; | |
864 | ||
865 | platform_set_drvdata(pldat->pdev, pldat->mii_bus); | |
866 | ||
867 | if (mdiobus_register(pldat->mii_bus)) | |
868 | goto err_out_free_mdio_irq; | |
869 | ||
870 | if (lpc_mii_probe(pldat->ndev) != 0) | |
871 | goto err_out_unregister_bus; | |
872 | ||
873 | return 0; | |
874 | ||
875 | err_out_unregister_bus: | |
876 | mdiobus_unregister(pldat->mii_bus); | |
877 | err_out_free_mdio_irq: | |
878 | kfree(pldat->mii_bus->irq); | |
879 | err_out_1: | |
880 | mdiobus_free(pldat->mii_bus); | |
881 | err_out: | |
882 | return err; | |
883 | } | |
884 | ||
885 | static void __lpc_handle_xmit(struct net_device *ndev) | |
886 | { | |
887 | struct netdata_local *pldat = netdev_priv(ndev); | |
b7370112 | 888 | u32 txcidx, *ptxstat, txstat; |
889 | ||
890 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | |
891 | while (pldat->last_tx_idx != txcidx) { | |
a7e2eaad | 892 | unsigned int skblen = pldat->skblen[pldat->last_tx_idx]; |
b7370112 | 893 | |
894 | /* A buffer is available, get buffer status */ | |
895 | ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx]; | |
896 | txstat = *ptxstat; | |
897 | ||
898 | /* Next buffer and decrement used buffer counter */ | |
899 | pldat->num_used_tx_buffs--; | |
900 | pldat->last_tx_idx++; | |
901 | if (pldat->last_tx_idx >= ENET_TX_DESC) | |
902 | pldat->last_tx_idx = 0; | |
903 | ||
904 | /* Update collision counter */ | |
905 | ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat); | |
906 | ||
907 | /* Any errors occurred? */ | |
908 | if (txstat & TXSTATUS_ERROR) { | |
909 | if (txstat & TXSTATUS_UNDERRUN) { | |
910 | /* FIFO underrun */ | |
911 | ndev->stats.tx_fifo_errors++; | |
912 | } | |
913 | if (txstat & TXSTATUS_LATECOLL) { | |
914 | /* Late collision */ | |
915 | ndev->stats.tx_aborted_errors++; | |
916 | } | |
917 | if (txstat & TXSTATUS_EXCESSCOLL) { | |
918 | /* Excessive collision */ | |
919 | ndev->stats.tx_aborted_errors++; | |
920 | } | |
921 | if (txstat & TXSTATUS_EXCESSDEFER) { | |
922 | /* Defer limit */ | |
923 | ndev->stats.tx_aborted_errors++; | |
924 | } | |
925 | ndev->stats.tx_errors++; | |
926 | } else { | |
927 | /* Update stats */ | |
928 | ndev->stats.tx_packets++; | |
a7e2eaad | 929 | ndev->stats.tx_bytes += skblen; |
b7370112 | 930 | } |
931 | ||
932 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | |
933 | } | |
934 | ||
3f16da51 ED |
935 | if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { |
936 | if (netif_queue_stopped(ndev)) | |
937 | netif_wake_queue(ndev); | |
938 | } | |
b7370112 | 939 | } |
940 | ||
941 | static int __lpc_handle_recv(struct net_device *ndev, int budget) | |
942 | { | |
943 | struct netdata_local *pldat = netdev_priv(ndev); | |
944 | struct sk_buff *skb; | |
945 | u32 rxconsidx, len, ethst; | |
946 | struct rx_status_t *prxstat; | |
947 | u8 *prdbuf; | |
948 | int rx_done = 0; | |
949 | ||
950 | /* Get the current RX buffer indexes */ | |
951 | rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); | |
952 | while (rx_done < budget && rxconsidx != | |
953 | readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) { | |
954 | /* Get pointer to receive status */ | |
955 | prxstat = &pldat->rx_stat_v[rxconsidx]; | |
956 | len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1; | |
957 | ||
958 | /* Status error? */ | |
959 | ethst = prxstat->statusinfo; | |
960 | if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) == | |
961 | (RXSTATUS_ERROR | RXSTATUS_RANGE)) | |
962 | ethst &= ~RXSTATUS_ERROR; | |
963 | ||
964 | if (ethst & RXSTATUS_ERROR) { | |
965 | int si = prxstat->statusinfo; | |
966 | /* Check statuses */ | |
967 | if (si & RXSTATUS_OVERRUN) { | |
968 | /* Overrun error */ | |
969 | ndev->stats.rx_fifo_errors++; | |
970 | } else if (si & RXSTATUS_CRC) { | |
971 | /* CRC error */ | |
972 | ndev->stats.rx_crc_errors++; | |
973 | } else if (si & RXSTATUS_LENGTH) { | |
974 | /* Length error */ | |
975 | ndev->stats.rx_length_errors++; | |
976 | } else if (si & RXSTATUS_ERROR) { | |
977 | /* Other error */ | |
978 | ndev->stats.rx_length_errors++; | |
979 | } | |
980 | ndev->stats.rx_errors++; | |
981 | } else { | |
982 | /* Packet is good */ | |
e7f8c1fe ED |
983 | skb = dev_alloc_skb(len); |
984 | if (!skb) { | |
b7370112 | 985 | ndev->stats.rx_dropped++; |
e7f8c1fe | 986 | } else { |
b7370112 | 987 | prdbuf = skb_put(skb, len); |
988 | ||
989 | /* Copy packet from buffer */ | |
990 | memcpy(prdbuf, pldat->rx_buff_v + | |
991 | rxconsidx * ENET_MAXF_SIZE, len); | |
992 | ||
993 | /* Pass to upper layer */ | |
994 | skb->protocol = eth_type_trans(skb, ndev); | |
995 | netif_receive_skb(skb); | |
996 | ndev->stats.rx_packets++; | |
997 | ndev->stats.rx_bytes += len; | |
998 | } | |
999 | } | |
1000 | ||
1001 | /* Increment consume index */ | |
1002 | rxconsidx = rxconsidx + 1; | |
1003 | if (rxconsidx >= ENET_RX_DESC) | |
1004 | rxconsidx = 0; | |
1005 | writel(rxconsidx, | |
1006 | LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); | |
1007 | rx_done++; | |
1008 | } | |
1009 | ||
1010 | return rx_done; | |
1011 | } | |
1012 | ||
1013 | static int lpc_eth_poll(struct napi_struct *napi, int budget) | |
1014 | { | |
1015 | struct netdata_local *pldat = container_of(napi, | |
1016 | struct netdata_local, napi); | |
1017 | struct net_device *ndev = pldat->ndev; | |
1018 | int rx_done = 0; | |
1019 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0); | |
1020 | ||
1021 | __netif_tx_lock(txq, smp_processor_id()); | |
1022 | __lpc_handle_xmit(ndev); | |
1023 | __netif_tx_unlock(txq); | |
1024 | rx_done = __lpc_handle_recv(ndev, budget); | |
1025 | ||
1026 | if (rx_done < budget) { | |
1027 | napi_complete(napi); | |
1028 | lpc_eth_enable_int(pldat->net_base); | |
1029 | } | |
1030 | ||
1031 | return rx_done; | |
1032 | } | |
1033 | ||
1034 | static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id) | |
1035 | { | |
1036 | struct net_device *ndev = dev_id; | |
1037 | struct netdata_local *pldat = netdev_priv(ndev); | |
1038 | u32 tmp; | |
1039 | ||
1040 | spin_lock(&pldat->lock); | |
1041 | ||
1042 | tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base)); | |
1043 | /* Clear interrupts */ | |
1044 | writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base)); | |
1045 | ||
1046 | lpc_eth_disable_int(pldat->net_base); | |
1047 | if (likely(napi_schedule_prep(&pldat->napi))) | |
1048 | __napi_schedule(&pldat->napi); | |
1049 | ||
1050 | spin_unlock(&pldat->lock); | |
1051 | ||
1052 | return IRQ_HANDLED; | |
1053 | } | |
1054 | ||
1055 | static int lpc_eth_close(struct net_device *ndev) | |
1056 | { | |
1057 | unsigned long flags; | |
1058 | struct netdata_local *pldat = netdev_priv(ndev); | |
1059 | ||
1060 | if (netif_msg_ifdown(pldat)) | |
1061 | dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name); | |
1062 | ||
1063 | napi_disable(&pldat->napi); | |
1064 | netif_stop_queue(ndev); | |
1065 | ||
1066 | if (pldat->phy_dev) | |
1067 | phy_stop(pldat->phy_dev); | |
1068 | ||
1069 | spin_lock_irqsave(&pldat->lock, flags); | |
1070 | __lpc_eth_reset(pldat); | |
1071 | netif_carrier_off(ndev); | |
1072 | writel(0, LPC_ENET_MAC1(pldat->net_base)); | |
1073 | writel(0, LPC_ENET_MAC2(pldat->net_base)); | |
1074 | spin_unlock_irqrestore(&pldat->lock, flags); | |
1075 | ||
1076 | __lpc_eth_clock_enable(pldat, false); | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |
1082 | { | |
1083 | struct netdata_local *pldat = netdev_priv(ndev); | |
1084 | u32 len, txidx; | |
1085 | u32 *ptxstat; | |
1086 | struct txrx_desc_t *ptxrxdesc; | |
1087 | ||
1088 | len = skb->len; | |
1089 | ||
1090 | spin_lock_irq(&pldat->lock); | |
1091 | ||
1092 | if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) { | |
1093 | /* This function should never be called when there are no | |
1094 | buffers */ | |
1095 | netif_stop_queue(ndev); | |
1096 | spin_unlock_irq(&pldat->lock); | |
1097 | WARN(1, "BUG! TX request when no free TX buffers!\n"); | |
1098 | return NETDEV_TX_BUSY; | |
1099 | } | |
1100 | ||
1101 | /* Get the next TX descriptor index */ | |
1102 | txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); | |
1103 | ||
1104 | /* Setup control for the transfer */ | |
1105 | ptxstat = &pldat->tx_stat_v[txidx]; | |
1106 | *ptxstat = 0; | |
1107 | ptxrxdesc = &pldat->tx_desc_v[txidx]; | |
1108 | ptxrxdesc->control = | |
1109 | (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT; | |
1110 | ||
1111 | /* Copy data to the DMA buffer */ | |
1112 | memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len); | |
1113 | ||
1114 | /* Save the buffer and increment the buffer counter */ | |
a7e2eaad | 1115 | pldat->skblen[txidx] = len; |
b7370112 | 1116 | pldat->num_used_tx_buffs++; |
1117 | ||
1118 | /* Start transmit */ | |
1119 | txidx++; | |
1120 | if (txidx >= ENET_TX_DESC) | |
1121 | txidx = 0; | |
1122 | writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); | |
1123 | ||
1124 | /* Stop queue if no more TX buffers */ | |
1125 | if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) | |
1126 | netif_stop_queue(ndev); | |
1127 | ||
1128 | spin_unlock_irq(&pldat->lock); | |
1129 | ||
a7e2eaad | 1130 | dev_kfree_skb(skb); |
b7370112 | 1131 | return NETDEV_TX_OK; |
1132 | } | |
1133 | ||
1134 | static int lpc_set_mac_address(struct net_device *ndev, void *p) | |
1135 | { | |
1136 | struct sockaddr *addr = p; | |
1137 | struct netdata_local *pldat = netdev_priv(ndev); | |
1138 | unsigned long flags; | |
1139 | ||
1140 | if (!is_valid_ether_addr(addr->sa_data)) | |
1141 | return -EADDRNOTAVAIL; | |
1142 | memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); | |
1143 | ||
1144 | spin_lock_irqsave(&pldat->lock, flags); | |
1145 | ||
1146 | /* Set station address */ | |
1147 | __lpc_set_mac(pldat, ndev->dev_addr); | |
1148 | ||
1149 | spin_unlock_irqrestore(&pldat->lock, flags); | |
1150 | ||
1151 | return 0; | |
1152 | } | |
1153 | ||
1154 | static void lpc_eth_set_multicast_list(struct net_device *ndev) | |
1155 | { | |
1156 | struct netdata_local *pldat = netdev_priv(ndev); | |
1157 | struct netdev_hw_addr_list *mcptr = &ndev->mc; | |
1158 | struct netdev_hw_addr *ha; | |
1159 | u32 tmp32, hash_val, hashlo, hashhi; | |
1160 | unsigned long flags; | |
1161 | ||
1162 | spin_lock_irqsave(&pldat->lock, flags); | |
1163 | ||
1164 | /* Set station address */ | |
1165 | __lpc_set_mac(pldat, ndev->dev_addr); | |
1166 | ||
1167 | tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT; | |
1168 | ||
1169 | if (ndev->flags & IFF_PROMISC) | |
1170 | tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST | | |
1171 | LPC_RXFLTRW_ACCEPTUMULTICAST; | |
1172 | if (ndev->flags & IFF_ALLMULTI) | |
1173 | tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST; | |
1174 | ||
1175 | if (netdev_hw_addr_list_count(mcptr)) | |
1176 | tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH; | |
1177 | ||
1178 | writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base)); | |
1179 | ||
1180 | ||
1181 | /* Set initial hash table */ | |
1182 | hashlo = 0x0; | |
1183 | hashhi = 0x0; | |
1184 | ||
1185 | /* 64 bits : multicast address in hash table */ | |
1186 | netdev_hw_addr_list_for_each(ha, mcptr) { | |
1187 | hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F; | |
1188 | ||
1189 | if (hash_val >= 32) | |
1190 | hashhi |= 1 << (hash_val - 32); | |
1191 | else | |
1192 | hashlo |= 1 << hash_val; | |
1193 | } | |
1194 | ||
1195 | writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base)); | |
1196 | writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base)); | |
1197 | ||
1198 | spin_unlock_irqrestore(&pldat->lock, flags); | |
1199 | } | |
1200 | ||
1201 | static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) | |
1202 | { | |
1203 | struct netdata_local *pldat = netdev_priv(ndev); | |
1204 | struct phy_device *phydev = pldat->phy_dev; | |
1205 | ||
1206 | if (!netif_running(ndev)) | |
1207 | return -EINVAL; | |
1208 | ||
1209 | if (!phydev) | |
1210 | return -ENODEV; | |
1211 | ||
1212 | return phy_mii_ioctl(phydev, req, cmd); | |
1213 | } | |
1214 | ||
1215 | static int lpc_eth_open(struct net_device *ndev) | |
1216 | { | |
1217 | struct netdata_local *pldat = netdev_priv(ndev); | |
1218 | ||
1219 | if (netif_msg_ifup(pldat)) | |
1220 | dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); | |
1221 | ||
b7370112 | 1222 | __lpc_eth_clock_enable(pldat, true); |
1223 | ||
1224 | /* Reset and initialize */ | |
1225 | __lpc_eth_reset(pldat); | |
1226 | __lpc_eth_init(pldat); | |
1227 | ||
1228 | /* schedule a link state check */ | |
1229 | phy_start(pldat->phy_dev); | |
1230 | netif_start_queue(ndev); | |
1231 | napi_enable(&pldat->napi); | |
1232 | ||
1233 | return 0; | |
1234 | } | |
1235 | ||
1236 | /* | |
1237 | * Ethtool ops | |
1238 | */ | |
1239 | static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev, | |
1240 | struct ethtool_drvinfo *info) | |
1241 | { | |
7826d43f JP |
1242 | strlcpy(info->driver, MODNAME, sizeof(info->driver)); |
1243 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
1244 | strlcpy(info->bus_info, dev_name(ndev->dev.parent), | |
1245 | sizeof(info->bus_info)); | |
b7370112 | 1246 | } |
1247 | ||
1248 | static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev) | |
1249 | { | |
1250 | struct netdata_local *pldat = netdev_priv(ndev); | |
1251 | ||
1252 | return pldat->msg_enable; | |
1253 | } | |
1254 | ||
1255 | static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level) | |
1256 | { | |
1257 | struct netdata_local *pldat = netdev_priv(ndev); | |
1258 | ||
1259 | pldat->msg_enable = level; | |
1260 | } | |
1261 | ||
1262 | static int lpc_eth_ethtool_getsettings(struct net_device *ndev, | |
1263 | struct ethtool_cmd *cmd) | |
1264 | { | |
1265 | struct netdata_local *pldat = netdev_priv(ndev); | |
1266 | struct phy_device *phydev = pldat->phy_dev; | |
1267 | ||
1268 | if (!phydev) | |
1269 | return -EOPNOTSUPP; | |
1270 | ||
1271 | return phy_ethtool_gset(phydev, cmd); | |
1272 | } | |
1273 | ||
1274 | static int lpc_eth_ethtool_setsettings(struct net_device *ndev, | |
1275 | struct ethtool_cmd *cmd) | |
1276 | { | |
1277 | struct netdata_local *pldat = netdev_priv(ndev); | |
1278 | struct phy_device *phydev = pldat->phy_dev; | |
1279 | ||
1280 | if (!phydev) | |
1281 | return -EOPNOTSUPP; | |
1282 | ||
1283 | return phy_ethtool_sset(phydev, cmd); | |
1284 | } | |
1285 | ||
1286 | static const struct ethtool_ops lpc_eth_ethtool_ops = { | |
1287 | .get_drvinfo = lpc_eth_ethtool_getdrvinfo, | |
1288 | .get_settings = lpc_eth_ethtool_getsettings, | |
1289 | .set_settings = lpc_eth_ethtool_setsettings, | |
1290 | .get_msglevel = lpc_eth_ethtool_getmsglevel, | |
1291 | .set_msglevel = lpc_eth_ethtool_setmsglevel, | |
1292 | .get_link = ethtool_op_get_link, | |
1293 | }; | |
1294 | ||
1295 | static const struct net_device_ops lpc_netdev_ops = { | |
1296 | .ndo_open = lpc_eth_open, | |
1297 | .ndo_stop = lpc_eth_close, | |
1298 | .ndo_start_xmit = lpc_eth_hard_start_xmit, | |
1299 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, | |
1300 | .ndo_do_ioctl = lpc_eth_ioctl, | |
1301 | .ndo_set_mac_address = lpc_set_mac_address, | |
c867b55e | 1302 | .ndo_validate_addr = eth_validate_addr, |
e3047859 | 1303 | .ndo_change_mtu = eth_change_mtu, |
b7370112 | 1304 | }; |
1305 | ||
1306 | static int lpc_eth_drv_probe(struct platform_device *pdev) | |
1307 | { | |
1308 | struct resource *res; | |
b7370112 | 1309 | struct net_device *ndev; |
1310 | struct netdata_local *pldat; | |
1311 | struct phy_device *phydev; | |
1312 | dma_addr_t dma_handle; | |
1313 | int irq, ret; | |
4de02e4a RS |
1314 | u32 tmp; |
1315 | ||
1316 | /* Setup network interface for RMII or MII mode */ | |
1317 | tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL); | |
1318 | tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK; | |
1319 | if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII) | |
1320 | tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS; | |
1321 | else | |
1322 | tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS; | |
1323 | __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL); | |
b7370112 | 1324 | |
1325 | /* Get platform resources */ | |
1326 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
b7370112 | 1327 | irq = platform_get_irq(pdev, 0); |
4de02e4a | 1328 | if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { |
b7370112 | 1329 | dev_err(&pdev->dev, "error getting resources.\n"); |
1330 | ret = -ENXIO; | |
1331 | goto err_exit; | |
1332 | } | |
1333 | ||
1334 | /* Allocate net driver data structure */ | |
1335 | ndev = alloc_etherdev(sizeof(struct netdata_local)); | |
1336 | if (!ndev) { | |
1337 | dev_err(&pdev->dev, "could not allocate device.\n"); | |
1338 | ret = -ENOMEM; | |
1339 | goto err_exit; | |
1340 | } | |
1341 | ||
1342 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1343 | ||
1344 | pldat = netdev_priv(ndev); | |
1345 | pldat->pdev = pdev; | |
1346 | pldat->ndev = ndev; | |
1347 | ||
1348 | spin_lock_init(&pldat->lock); | |
1349 | ||
1350 | /* Save resources */ | |
1351 | ndev->irq = irq; | |
1352 | ||
1353 | /* Get clock for the device */ | |
1354 | pldat->clk = clk_get(&pdev->dev, NULL); | |
1355 | if (IS_ERR(pldat->clk)) { | |
1356 | dev_err(&pdev->dev, "error getting clock.\n"); | |
1357 | ret = PTR_ERR(pldat->clk); | |
1358 | goto err_out_free_dev; | |
1359 | } | |
1360 | ||
1361 | /* Enable network clock */ | |
1362 | __lpc_eth_clock_enable(pldat, true); | |
1363 | ||
1364 | /* Map IO space */ | |
1365 | pldat->net_base = ioremap(res->start, res->end - res->start + 1); | |
1366 | if (!pldat->net_base) { | |
1367 | dev_err(&pdev->dev, "failed to map registers\n"); | |
1368 | ret = -ENOMEM; | |
1369 | goto err_out_disable_clocks; | |
1370 | } | |
1371 | ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0, | |
1372 | ndev->name, ndev); | |
1373 | if (ret) { | |
1374 | dev_err(&pdev->dev, "error requesting interrupt.\n"); | |
1375 | goto err_out_iounmap; | |
1376 | } | |
1377 | ||
1378 | /* Fill in the fields of the device structure with ethernet values. */ | |
1379 | ether_setup(ndev); | |
1380 | ||
1381 | /* Setup driver functions */ | |
1382 | ndev->netdev_ops = &lpc_netdev_ops; | |
1383 | ndev->ethtool_ops = &lpc_eth_ethtool_ops; | |
1384 | ndev->watchdog_timeo = msecs_to_jiffies(2500); | |
1385 | ||
1386 | /* Get size of DMA buffers/descriptors region */ | |
1387 | pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE + | |
1388 | sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t)); | |
1389 | pldat->dma_buff_base_v = 0; | |
1390 | ||
4de02e4a RS |
1391 | if (use_iram_for_net(&pldat->pdev->dev)) { |
1392 | dma_handle = LPC32XX_IRAM_BASE; | |
b7370112 | 1393 | if (pldat->dma_buff_size <= lpc32xx_return_iram_size()) |
1394 | pldat->dma_buff_base_v = | |
4de02e4a | 1395 | io_p2v(LPC32XX_IRAM_BASE); |
b7370112 | 1396 | else |
1397 | netdev_err(ndev, | |
1398 | "IRAM not big enough for net buffers, using SDRAM instead.\n"); | |
1399 | } | |
1400 | ||
1401 | if (pldat->dma_buff_base_v == 0) { | |
4de02e4a RS |
1402 | pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF; |
1403 | pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask; | |
b7370112 | 1404 | pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); |
1405 | ||
1406 | /* Allocate a chunk of memory for the DMA ethernet buffers | |
1407 | and descriptors */ | |
1408 | pldat->dma_buff_base_v = | |
1409 | dma_alloc_coherent(&pldat->pdev->dev, | |
1410 | pldat->dma_buff_size, &dma_handle, | |
1411 | GFP_KERNEL); | |
b7370112 | 1412 | if (pldat->dma_buff_base_v == NULL) { |
b7370112 | 1413 | ret = -ENOMEM; |
1414 | goto err_out_free_irq; | |
1415 | } | |
1416 | } | |
1417 | pldat->dma_buff_base_p = dma_handle; | |
1418 | ||
1419 | netdev_dbg(ndev, "IO address start :0x%08x\n", | |
1420 | res->start); | |
1421 | netdev_dbg(ndev, "IO address size :%d\n", | |
1422 | res->end - res->start + 1); | |
b31525d1 | 1423 | netdev_dbg(ndev, "IO address (mapped) :0x%p\n", |
b7370112 | 1424 | pldat->net_base); |
1425 | netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); | |
1426 | netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); | |
1427 | netdev_dbg(ndev, "DMA buffer P address :0x%08x\n", | |
1428 | pldat->dma_buff_base_p); | |
1429 | netdev_dbg(ndev, "DMA buffer V address :0x%p\n", | |
1430 | pldat->dma_buff_base_v); | |
1431 | ||
1432 | /* Get MAC address from current HW setting (POR state is all zeros) */ | |
1433 | __lpc_get_mac(pldat, ndev->dev_addr); | |
1434 | ||
b7370112 | 1435 | if (!is_valid_ether_addr(ndev->dev_addr)) { |
1436 | const char *macaddr = of_get_mac_address(pdev->dev.of_node); | |
1437 | if (macaddr) | |
1438 | memcpy(ndev->dev_addr, macaddr, ETH_ALEN); | |
1439 | } | |
b7370112 | 1440 | if (!is_valid_ether_addr(ndev->dev_addr)) |
cdaf0b83 | 1441 | eth_hw_addr_random(ndev); |
b7370112 | 1442 | |
1443 | /* Reset the ethernet controller */ | |
1444 | __lpc_eth_reset(pldat); | |
1445 | ||
1446 | /* then shut everything down to save power */ | |
1447 | __lpc_eth_shutdown(pldat); | |
1448 | ||
1449 | /* Set default parameters */ | |
1450 | pldat->msg_enable = NETIF_MSG_LINK; | |
1451 | ||
1452 | /* Force an MII interface reset and clock setup */ | |
1453 | __lpc_mii_mngt_reset(pldat); | |
1454 | ||
1455 | /* Force default PHY interface setup in chip, this will probably be | |
1456 | changed by the PHY driver */ | |
1457 | pldat->link = 0; | |
1458 | pldat->speed = 100; | |
1459 | pldat->duplex = DUPLEX_FULL; | |
1460 | __lpc_params_setup(pldat); | |
1461 | ||
1462 | netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT); | |
1463 | ||
1464 | ret = register_netdev(ndev); | |
1465 | if (ret) { | |
1466 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | |
1467 | goto err_out_dma_unmap; | |
1468 | } | |
1469 | platform_set_drvdata(pdev, ndev); | |
1470 | ||
fa90b077 WY |
1471 | ret = lpc_mii_init(pldat); |
1472 | if (ret) | |
b7370112 | 1473 | goto err_out_unregister_netdev; |
1474 | ||
1475 | netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", | |
1476 | res->start, ndev->irq); | |
1477 | ||
1478 | phydev = pldat->phy_dev; | |
1479 | ||
1480 | device_init_wakeup(&pdev->dev, 1); | |
1481 | device_set_wakeup_enable(&pdev->dev, 0); | |
1482 | ||
1483 | return 0; | |
1484 | ||
1485 | err_out_unregister_netdev: | |
b7370112 | 1486 | unregister_netdev(ndev); |
1487 | err_out_dma_unmap: | |
4de02e4a | 1488 | if (!use_iram_for_net(&pldat->pdev->dev) || |
b7370112 | 1489 | pldat->dma_buff_size > lpc32xx_return_iram_size()) |
1490 | dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, | |
1491 | pldat->dma_buff_base_v, | |
1492 | pldat->dma_buff_base_p); | |
1493 | err_out_free_irq: | |
1494 | free_irq(ndev->irq, ndev); | |
1495 | err_out_iounmap: | |
1496 | iounmap(pldat->net_base); | |
1497 | err_out_disable_clocks: | |
1498 | clk_disable(pldat->clk); | |
1499 | clk_put(pldat->clk); | |
1500 | err_out_free_dev: | |
1501 | free_netdev(ndev); | |
1502 | err_exit: | |
1503 | pr_err("%s: not found (%d).\n", MODNAME, ret); | |
1504 | return ret; | |
1505 | } | |
1506 | ||
1507 | static int lpc_eth_drv_remove(struct platform_device *pdev) | |
1508 | { | |
1509 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1510 | struct netdata_local *pldat = netdev_priv(ndev); | |
1511 | ||
1512 | unregister_netdev(ndev); | |
b7370112 | 1513 | |
4de02e4a | 1514 | if (!use_iram_for_net(&pldat->pdev->dev) || |
b7370112 | 1515 | pldat->dma_buff_size > lpc32xx_return_iram_size()) |
1516 | dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, | |
1517 | pldat->dma_buff_base_v, | |
1518 | pldat->dma_buff_base_p); | |
1519 | free_irq(ndev->irq, ndev); | |
1520 | iounmap(pldat->net_base); | |
57c10b61 | 1521 | mdiobus_unregister(pldat->mii_bus); |
b7370112 | 1522 | mdiobus_free(pldat->mii_bus); |
1523 | clk_disable(pldat->clk); | |
1524 | clk_put(pldat->clk); | |
1525 | free_netdev(ndev); | |
1526 | ||
1527 | return 0; | |
1528 | } | |
1529 | ||
1530 | #ifdef CONFIG_PM | |
1531 | static int lpc_eth_drv_suspend(struct platform_device *pdev, | |
1532 | pm_message_t state) | |
1533 | { | |
1534 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1535 | struct netdata_local *pldat = netdev_priv(ndev); | |
1536 | ||
1537 | if (device_may_wakeup(&pdev->dev)) | |
1538 | enable_irq_wake(ndev->irq); | |
1539 | ||
1540 | if (ndev) { | |
1541 | if (netif_running(ndev)) { | |
1542 | netif_device_detach(ndev); | |
1543 | __lpc_eth_shutdown(pldat); | |
1544 | clk_disable(pldat->clk); | |
1545 | ||
1546 | /* | |
1547 | * Reset again now clock is disable to be sure | |
1548 | * EMC_MDC is down | |
1549 | */ | |
1550 | __lpc_eth_reset(pldat); | |
1551 | } | |
1552 | } | |
1553 | ||
1554 | return 0; | |
1555 | } | |
1556 | ||
1557 | static int lpc_eth_drv_resume(struct platform_device *pdev) | |
1558 | { | |
1559 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1560 | struct netdata_local *pldat; | |
1561 | ||
1562 | if (device_may_wakeup(&pdev->dev)) | |
1563 | disable_irq_wake(ndev->irq); | |
1564 | ||
1565 | if (ndev) { | |
1566 | if (netif_running(ndev)) { | |
1567 | pldat = netdev_priv(ndev); | |
1568 | ||
1569 | /* Enable interface clock */ | |
1570 | clk_enable(pldat->clk); | |
1571 | ||
1572 | /* Reset and initialize */ | |
1573 | __lpc_eth_reset(pldat); | |
1574 | __lpc_eth_init(pldat); | |
1575 | ||
1576 | netif_device_attach(ndev); | |
1577 | } | |
1578 | } | |
1579 | ||
1580 | return 0; | |
1581 | } | |
1582 | #endif | |
1583 | ||
4de02e4a RS |
1584 | #ifdef CONFIG_OF |
1585 | static const struct of_device_id lpc_eth_match[] = { | |
1586 | { .compatible = "nxp,lpc-eth" }, | |
1587 | { } | |
1588 | }; | |
1589 | MODULE_DEVICE_TABLE(of, lpc_eth_match); | |
1590 | #endif | |
1591 | ||
b7370112 | 1592 | static struct platform_driver lpc_eth_driver = { |
1593 | .probe = lpc_eth_drv_probe, | |
21524526 | 1594 | .remove = lpc_eth_drv_remove, |
b7370112 | 1595 | #ifdef CONFIG_PM |
1596 | .suspend = lpc_eth_drv_suspend, | |
1597 | .resume = lpc_eth_drv_resume, | |
1598 | #endif | |
1599 | .driver = { | |
1600 | .name = MODNAME, | |
4de02e4a | 1601 | .of_match_table = of_match_ptr(lpc_eth_match), |
b7370112 | 1602 | }, |
1603 | }; | |
1604 | ||
1605 | module_platform_driver(lpc_eth_driver); | |
1606 | ||
1607 | MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); | |
1608 | MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); | |
1609 | MODULE_DESCRIPTION("LPC Ethernet Driver"); | |
1610 | MODULE_LICENSE("GPL"); |