net: don't grab a mutex within a timer context in gianfar
[deliverable/linux.git] / drivers / net / ibm_newemac / core.c
CommitLineData
1d3bb996
DG
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
17cf803a
BH
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
1d3bb996
DG
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
27#include <linux/sched.h>
28#include <linux/string.h>
29#include <linux/errno.h>
30#include <linux/delay.h>
31#include <linux/types.h>
32#include <linux/pci.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/crc32.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#include <linux/bitops.h>
39#include <linux/workqueue.h>
283029d1 40#include <linux/of.h>
1d3bb996
DG
41
42#include <asm/processor.h>
43#include <asm/io.h>
44#include <asm/dma.h>
45#include <asm/uaccess.h>
0925ab5d
VB
46#include <asm/dcr.h>
47#include <asm/dcr-regs.h>
1d3bb996
DG
48
49#include "core.h"
50
51/*
52 * Lack of dma_unmap_???? calls is intentional.
53 *
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
66 */
67
68#define DRV_NAME "emac"
69#define DRV_VERSION "3.54"
70#define DRV_DESC "PPC 4xx OCP EMAC driver"
71
72MODULE_DESCRIPTION(DRV_DESC);
73MODULE_AUTHOR
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75MODULE_LICENSE("GPL");
76
77/*
78 * PPC64 doesn't (yet) have a cacheable_memcpy
79 */
80#ifdef CONFIG_PPC64
81#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82#endif
83
84/* minimum number of free TX descriptors required to wake up TX process */
85#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
86
87/* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
89 */
90#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
91
92/* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
95 *
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
99 */
100static u32 busy_phy_map;
101static DEFINE_MUTEX(emac_phy_map_lock);
102
103/* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
105 */
106static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
107
108/* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
115 * cell_index.
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 */
119
120#define EMAC_BOOT_LIST_SIZE 4
121static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
122
123/* How long should I wait for dependent devices ? */
124#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
125
126/* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
128 */
129static inline void emac_report_timeout_error(struct emac_instance *dev,
130 const char *error)
131{
11121e30
VB
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
1d3bb996
DG
136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
137}
138
11121e30
VB
139/* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
142 */
143static inline void emac_rx_clk_tx(struct emac_instance *dev)
144{
145#ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
149#endif
150}
151
152static inline void emac_rx_clk_default(struct emac_instance *dev)
153{
154#ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
158#endif
159}
160
1d3bb996
DG
161/* PHY polling intervals */
162#define PHY_POLL_LINK_ON HZ
163#define PHY_POLL_LINK_OFF (HZ / 5)
164
165/* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167 */
168#define STOP_TIMEOUT_10 1230
169#define STOP_TIMEOUT_100 124
170#define STOP_TIMEOUT_1000 13
171#define STOP_TIMEOUT_1000_JUMBO 73
172
4373c932
PB
173static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175};
176
1d3bb996
DG
177/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193 "tx_errors"
194};
195
196static irqreturn_t emac_irq(int irq, void *dev_instance);
197static void emac_clean_tx_ring(struct emac_instance *dev);
198static void __emac_set_multicast_list(struct emac_instance *dev);
199
200static inline int emac_phy_supports_gige(int phy_mode)
201{
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_TBI ||
205 phy_mode == PHY_MODE_RTBI;
206}
207
208static inline int emac_phy_gpcs(int phy_mode)
209{
210 return phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
212}
213
214static inline void emac_tx_enable(struct emac_instance *dev)
215{
216 struct emac_regs __iomem *p = dev->emacp;
217 u32 r;
218
219 DBG(dev, "tx_enable" NL);
220
221 r = in_be32(&p->mr0);
222 if (!(r & EMAC_MR0_TXE))
223 out_be32(&p->mr0, r | EMAC_MR0_TXE);
224}
225
226static void emac_tx_disable(struct emac_instance *dev)
227{
228 struct emac_regs __iomem *p = dev->emacp;
229 u32 r;
230
231 DBG(dev, "tx_disable" NL);
232
233 r = in_be32(&p->mr0);
234 if (r & EMAC_MR0_TXE) {
235 int n = dev->stop_timeout;
236 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
238 udelay(1);
239 --n;
240 }
241 if (unlikely(!n))
242 emac_report_timeout_error(dev, "TX disable timeout");
243 }
244}
245
246static void emac_rx_enable(struct emac_instance *dev)
247{
248 struct emac_regs __iomem *p = dev->emacp;
249 u32 r;
250
251 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
252 goto out;
253
254 DBG(dev, "rx_enable" NL);
255
256 r = in_be32(&p->mr0);
257 if (!(r & EMAC_MR0_RXE)) {
258 if (unlikely(!(r & EMAC_MR0_RXI))) {
259 /* Wait if previous async disable is still in progress */
260 int n = dev->stop_timeout;
261 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
262 udelay(1);
263 --n;
264 }
265 if (unlikely(!n))
266 emac_report_timeout_error(dev,
267 "RX disable timeout");
268 }
269 out_be32(&p->mr0, r | EMAC_MR0_RXE);
270 }
271 out:
272 ;
273}
274
275static void emac_rx_disable(struct emac_instance *dev)
276{
277 struct emac_regs __iomem *p = dev->emacp;
278 u32 r;
279
280 DBG(dev, "rx_disable" NL);
281
282 r = in_be32(&p->mr0);
283 if (r & EMAC_MR0_RXE) {
284 int n = dev->stop_timeout;
285 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
287 udelay(1);
288 --n;
289 }
290 if (unlikely(!n))
291 emac_report_timeout_error(dev, "RX disable timeout");
292 }
293}
294
295static inline void emac_netif_stop(struct emac_instance *dev)
296{
297 netif_tx_lock_bh(dev->ndev);
e308a5d8 298 netif_addr_lock(dev->ndev);
1d3bb996 299 dev->no_mcast = 1;
e308a5d8 300 netif_addr_unlock(dev->ndev);
1d3bb996
DG
301 netif_tx_unlock_bh(dev->ndev);
302 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
303 mal_poll_disable(dev->mal, &dev->commac);
304 netif_tx_disable(dev->ndev);
305}
306
307static inline void emac_netif_start(struct emac_instance *dev)
308{
309 netif_tx_lock_bh(dev->ndev);
e308a5d8 310 netif_addr_lock(dev->ndev);
1d3bb996
DG
311 dev->no_mcast = 0;
312 if (dev->mcast_pending && netif_running(dev->ndev))
313 __emac_set_multicast_list(dev);
e308a5d8 314 netif_addr_unlock(dev->ndev);
1d3bb996
DG
315 netif_tx_unlock_bh(dev->ndev);
316
317 netif_wake_queue(dev->ndev);
318
319 /* NOTE: unconditional netif_wake_queue is only appropriate
320 * so long as all callers are assured to have free tx slots
321 * (taken from tg3... though the case where that is wrong is
322 * not terribly harmful)
323 */
324 mal_poll_enable(dev->mal, &dev->commac);
325}
326
327static inline void emac_rx_disable_async(struct emac_instance *dev)
328{
329 struct emac_regs __iomem *p = dev->emacp;
330 u32 r;
331
332 DBG(dev, "rx_disable_async" NL);
333
334 r = in_be32(&p->mr0);
335 if (r & EMAC_MR0_RXE)
336 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
337}
338
339static int emac_reset(struct emac_instance *dev)
340{
341 struct emac_regs __iomem *p = dev->emacp;
342 int n = 20;
343
344 DBG(dev, "reset" NL);
345
346 if (!dev->reset_failed) {
347 /* 40x erratum suggests stopping RX channel before reset,
348 * we stop TX as well
349 */
350 emac_rx_disable(dev);
351 emac_tx_disable(dev);
352 }
353
354 out_be32(&p->mr0, EMAC_MR0_SRST);
355 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
356 --n;
357
358 if (n) {
359 dev->reset_failed = 0;
360 return 0;
361 } else {
362 emac_report_timeout_error(dev, "reset timeout");
363 dev->reset_failed = 1;
364 return -ETIMEDOUT;
365 }
366}
367
368static void emac_hash_mc(struct emac_instance *dev)
369{
05781ccd
GE
370 const int regs = EMAC_XAHT_REGS(dev);
371 u32 *gaht_base = emac_gaht_base(dev);
372 u32 gaht_temp[regs];
1d3bb996 373 struct dev_mc_list *dmi;
05781ccd 374 int i;
1d3bb996
DG
375
376 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
377
05781ccd
GE
378 memset(gaht_temp, 0, sizeof (gaht_temp));
379
1d3bb996 380 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
05781ccd 381 int slot, reg, mask;
1d3bb996
DG
382 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
383 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
384 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
385
05781ccd
GE
386 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
387 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
388 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
389
390 gaht_temp[reg] |= mask;
1d3bb996 391 }
05781ccd
GE
392
393 for (i = 0; i < regs; i++)
394 out_be32(gaht_base + i, gaht_temp[i]);
1d3bb996
DG
395}
396
397static inline u32 emac_iff2rmr(struct net_device *ndev)
398{
399 struct emac_instance *dev = netdev_priv(ndev);
400 u32 r;
401
402 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
403
404 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
405 r |= EMAC4_RMR_BASE;
406 else
407 r |= EMAC_RMR_BASE;
408
409 if (ndev->flags & IFF_PROMISC)
410 r |= EMAC_RMR_PME;
05781ccd
GE
411 else if (ndev->flags & IFF_ALLMULTI ||
412 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
1d3bb996
DG
413 r |= EMAC_RMR_PMME;
414 else if (ndev->mc_count > 0)
415 r |= EMAC_RMR_MAE;
416
417 return r;
418}
419
420static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
421{
422 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
423
424 DBG2(dev, "__emac_calc_base_mr1" NL);
425
426 switch(tx_size) {
427 case 2048:
428 ret |= EMAC_MR1_TFS_2K;
429 break;
430 default:
431 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
432 dev->ndev->name, tx_size);
433 }
434
435 switch(rx_size) {
436 case 16384:
437 ret |= EMAC_MR1_RFS_16K;
438 break;
439 case 4096:
440 ret |= EMAC_MR1_RFS_4K;
441 break;
442 default:
443 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
444 dev->ndev->name, rx_size);
445 }
446
447 return ret;
448}
449
450static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
451{
452 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
4696c3c4 453 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
1d3bb996
DG
454
455 DBG2(dev, "__emac4_calc_base_mr1" NL);
456
457 switch(tx_size) {
458 case 4096:
459 ret |= EMAC4_MR1_TFS_4K;
460 break;
461 case 2048:
462 ret |= EMAC4_MR1_TFS_2K;
463 break;
464 default:
465 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
466 dev->ndev->name, tx_size);
467 }
468
469 switch(rx_size) {
470 case 16384:
471 ret |= EMAC4_MR1_RFS_16K;
472 break;
473 case 4096:
474 ret |= EMAC4_MR1_RFS_4K;
475 break;
476 case 2048:
477 ret |= EMAC4_MR1_RFS_2K;
478 break;
479 default:
480 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
481 dev->ndev->name, rx_size);
482 }
483
484 return ret;
485}
486
487static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
488{
489 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
490 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
491 __emac_calc_base_mr1(dev, tx_size, rx_size);
492}
493
494static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
495{
496 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
497 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
498 else
499 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
500}
501
502static inline u32 emac_calc_rwmr(struct emac_instance *dev,
503 unsigned int low, unsigned int high)
504{
505 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
506 return (low << 22) | ( (high & 0x3ff) << 6);
507 else
508 return (low << 23) | ( (high & 0x1ff) << 7);
509}
510
511static int emac_configure(struct emac_instance *dev)
512{
513 struct emac_regs __iomem *p = dev->emacp;
514 struct net_device *ndev = dev->ndev;
911b237d 515 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
1d3bb996
DG
516 u32 r, mr1 = 0;
517
518 DBG(dev, "configure" NL);
519
911b237d
BH
520 if (!link) {
521 out_be32(&p->mr1, in_be32(&p->mr1)
522 | EMAC_MR1_FDE | EMAC_MR1_ILE);
523 udelay(100);
524 } else if (emac_reset(dev) < 0)
1d3bb996
DG
525 return -ETIMEDOUT;
526
527 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
528 tah_reset(dev->tah_dev);
529
911b237d
BH
530 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
531 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
1d3bb996
DG
532
533 /* Default fifo sizes */
534 tx_size = dev->tx_fifo_size;
535 rx_size = dev->rx_fifo_size;
536
911b237d
BH
537 /* No link, force loopback */
538 if (!link)
539 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
540
1d3bb996 541 /* Check for full duplex */
911b237d 542 else if (dev->phy.duplex == DUPLEX_FULL)
1d3bb996
DG
543 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
544
545 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
546 dev->stop_timeout = STOP_TIMEOUT_10;
547 switch (dev->phy.speed) {
548 case SPEED_1000:
549 if (emac_phy_gpcs(dev->phy.mode)) {
550 mr1 |= EMAC_MR1_MF_1000GPCS |
551 EMAC_MR1_MF_IPPA(dev->phy.address);
552
553 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
554 * identify this GPCS PHY later.
555 */
05781ccd 556 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
1d3bb996
DG
557 } else
558 mr1 |= EMAC_MR1_MF_1000;
559
560 /* Extended fifo sizes */
561 tx_size = dev->tx_fifo_size_gige;
562 rx_size = dev->rx_fifo_size_gige;
563
564 if (dev->ndev->mtu > ETH_DATA_LEN) {
f34ebab6
SR
565 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
566 mr1 |= EMAC4_MR1_JPSM;
567 else
568 mr1 |= EMAC_MR1_JPSM;
1d3bb996
DG
569 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
570 } else
571 dev->stop_timeout = STOP_TIMEOUT_1000;
572 break;
573 case SPEED_100:
574 mr1 |= EMAC_MR1_MF_100;
575 dev->stop_timeout = STOP_TIMEOUT_100;
576 break;
577 default: /* make gcc happy */
578 break;
579 }
580
581 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
582 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
583 dev->phy.speed);
584 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
585 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
586
587 /* on 40x erratum forces us to NOT use integrated flow control,
588 * let's hope it works on 44x ;)
589 */
590 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
591 dev->phy.duplex == DUPLEX_FULL) {
592 if (dev->phy.pause)
593 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
594 else if (dev->phy.asym_pause)
595 mr1 |= EMAC_MR1_APP;
596 }
597
598 /* Add base settings & fifo sizes & program MR1 */
599 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
600 out_be32(&p->mr1, mr1);
601
602 /* Set individual MAC address */
603 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
604 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
605 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
606 ndev->dev_addr[5]);
607
608 /* VLAN Tag Protocol ID */
609 out_be32(&p->vtpid, 0x8100);
610
611 /* Receive mode register */
612 r = emac_iff2rmr(ndev);
613 if (r & EMAC_RMR_MAE)
614 emac_hash_mc(dev);
615 out_be32(&p->rmr, r);
616
617 /* FIFOs thresholds */
618 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
619 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
620 tx_size / 2 / dev->fifo_entry_size);
621 else
622 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
623 tx_size / 2 / dev->fifo_entry_size);
624 out_be32(&p->tmr1, r);
625 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
626
627 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
628 there should be still enough space in FIFO to allow the our link
629 partner time to process this frame and also time to send PAUSE
630 frame itself.
631
632 Here is the worst case scenario for the RX FIFO "headroom"
633 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
634
635 1) One maximum-length frame on TX 1522 bytes
636 2) One PAUSE frame time 64 bytes
637 3) PAUSE frame decode time allowance 64 bytes
638 4) One maximum-length frame on RX 1522 bytes
639 5) Round-trip propagation delay of the link (100Mb) 15 bytes
640 ----------
641 3187 bytes
642
643 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
644 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
645 */
646 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
647 rx_size / 4 / dev->fifo_entry_size);
648 out_be32(&p->rwmr, r);
649
650 /* Set PAUSE timer to the maximum */
651 out_be32(&p->ptr, 0xffff);
652
653 /* IRQ sources */
654 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
655 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
656 EMAC_ISR_IRE | EMAC_ISR_TE;
657 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
658 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
659 EMAC4_ISR_RXOE | */;
660 out_be32(&p->iser, r);
661
662 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
663 if (emac_phy_gpcs(dev->phy.mode))
664 emac_mii_reset_phy(&dev->phy);
665
4373c932
PB
666 /* Required for Pause packet support in EMAC */
667 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
668
1d3bb996
DG
669 return 0;
670}
671
672static void emac_reinitialize(struct emac_instance *dev)
673{
674 DBG(dev, "reinitialize" NL);
675
676 emac_netif_stop(dev);
677 if (!emac_configure(dev)) {
678 emac_tx_enable(dev);
679 emac_rx_enable(dev);
680 }
681 emac_netif_start(dev);
682}
683
684static void emac_full_tx_reset(struct emac_instance *dev)
685{
686 DBG(dev, "full_tx_reset" NL);
687
688 emac_tx_disable(dev);
689 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
690 emac_clean_tx_ring(dev);
691 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
692
693 emac_configure(dev);
694
695 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
696 emac_tx_enable(dev);
697 emac_rx_enable(dev);
698}
699
700static void emac_reset_work(struct work_struct *work)
701{
702 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
703
704 DBG(dev, "reset_work" NL);
705
706 mutex_lock(&dev->link_lock);
61dbcece
BH
707 if (dev->opened) {
708 emac_netif_stop(dev);
709 emac_full_tx_reset(dev);
710 emac_netif_start(dev);
711 }
1d3bb996
DG
712 mutex_unlock(&dev->link_lock);
713}
714
715static void emac_tx_timeout(struct net_device *ndev)
716{
717 struct emac_instance *dev = netdev_priv(ndev);
718
719 DBG(dev, "tx_timeout" NL);
720
721 schedule_work(&dev->reset_work);
722}
723
724
725static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
726{
727 int done = !!(stacr & EMAC_STACR_OC);
728
729 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
730 done = !done;
731
732 return done;
733};
734
735static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
736{
737 struct emac_regs __iomem *p = dev->emacp;
738 u32 r = 0;
739 int n, err = -ETIMEDOUT;
740
741 mutex_lock(&dev->mdio_lock);
742
743 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
744
745 /* Enable proper MDIO port */
746 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
747 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
749 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
750
751 /* Wait for management interface to become idle */
cca87c18 752 n = 20;
1d3bb996
DG
753 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
754 udelay(1);
755 if (!--n) {
756 DBG2(dev, " -> timeout wait idle\n");
757 goto bail;
758 }
759 }
760
761 /* Issue read command */
762 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
763 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
764 else
765 r = EMAC_STACR_BASE(dev->opb_bus_freq);
766 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
767 r |= EMAC_STACR_OC;
bff713b5 768 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
769 r |= EMACX_STACR_STAC_READ;
770 else
771 r |= EMAC_STACR_STAC_READ;
772 r |= (reg & EMAC_STACR_PRA_MASK)
773 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
774 out_be32(&p->stacr, r);
775
776 /* Wait for read to complete */
cca87c18 777 n = 200;
1d3bb996
DG
778 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
779 udelay(1);
780 if (!--n) {
781 DBG2(dev, " -> timeout wait complete\n");
782 goto bail;
783 }
784 }
785
786 if (unlikely(r & EMAC_STACR_PHYE)) {
787 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
788 err = -EREMOTEIO;
789 goto bail;
790 }
791
792 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
793
794 DBG2(dev, "mdio_read -> %04x" NL, r);
795 err = 0;
796 bail:
797 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
798 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
799 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
800 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
801 mutex_unlock(&dev->mdio_lock);
802
803 return err == 0 ? r : err;
804}
805
806static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
807 u16 val)
808{
809 struct emac_regs __iomem *p = dev->emacp;
810 u32 r = 0;
811 int n, err = -ETIMEDOUT;
812
813 mutex_lock(&dev->mdio_lock);
814
815 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
816
817 /* Enable proper MDIO port */
818 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
819 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
820 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
821 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
822
823 /* Wait for management interface to be idle */
cca87c18 824 n = 20;
1d3bb996
DG
825 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
826 udelay(1);
827 if (!--n) {
828 DBG2(dev, " -> timeout wait idle\n");
829 goto bail;
830 }
831 }
832
833 /* Issue write command */
834 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
835 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
836 else
837 r = EMAC_STACR_BASE(dev->opb_bus_freq);
838 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
839 r |= EMAC_STACR_OC;
bff713b5 840 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
841 r |= EMACX_STACR_STAC_WRITE;
842 else
843 r |= EMAC_STACR_STAC_WRITE;
844 r |= (reg & EMAC_STACR_PRA_MASK) |
845 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
846 (val << EMAC_STACR_PHYD_SHIFT);
847 out_be32(&p->stacr, r);
848
849 /* Wait for write to complete */
cca87c18 850 n = 200;
1d3bb996
DG
851 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
852 udelay(1);
853 if (!--n) {
854 DBG2(dev, " -> timeout wait complete\n");
855 goto bail;
856 }
857 }
858 err = 0;
859 bail:
860 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
861 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
862 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
863 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
864 mutex_unlock(&dev->mdio_lock);
865}
866
867static int emac_mdio_read(struct net_device *ndev, int id, int reg)
868{
869 struct emac_instance *dev = netdev_priv(ndev);
870 int res;
871
872 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
873 (u8) id, (u8) reg);
874 return res;
875}
876
877static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
878{
879 struct emac_instance *dev = netdev_priv(ndev);
880
881 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
882 (u8) id, (u8) reg, (u16) val);
883}
884
885/* Tx lock BH */
886static void __emac_set_multicast_list(struct emac_instance *dev)
887{
888 struct emac_regs __iomem *p = dev->emacp;
889 u32 rmr = emac_iff2rmr(dev->ndev);
890
891 DBG(dev, "__multicast %08x" NL, rmr);
892
893 /* I decided to relax register access rules here to avoid
894 * full EMAC reset.
895 *
896 * There is a real problem with EMAC4 core if we use MWSW_001 bit
897 * in MR1 register and do a full EMAC reset.
898 * One TX BD status update is delayed and, after EMAC reset, it
899 * never happens, resulting in TX hung (it'll be recovered by TX
900 * timeout handler eventually, but this is just gross).
901 * So we either have to do full TX reset or try to cheat here :)
902 *
903 * The only required change is to RX mode register, so I *think* all
904 * we need is just to stop RX channel. This seems to work on all
905 * tested SoCs. --ebs
906 *
907 * If we need the full reset, we might just trigger the workqueue
908 * and do it async... a bit nasty but should work --BenH
909 */
910 dev->mcast_pending = 0;
911 emac_rx_disable(dev);
912 if (rmr & EMAC_RMR_MAE)
913 emac_hash_mc(dev);
914 out_be32(&p->rmr, rmr);
915 emac_rx_enable(dev);
916}
917
918/* Tx lock BH */
919static void emac_set_multicast_list(struct net_device *ndev)
920{
921 struct emac_instance *dev = netdev_priv(ndev);
922
923 DBG(dev, "multicast" NL);
924
925 BUG_ON(!netif_running(dev->ndev));
926
927 if (dev->no_mcast) {
928 dev->mcast_pending = 1;
929 return;
930 }
931 __emac_set_multicast_list(dev);
932}
933
934static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
935{
936 int rx_sync_size = emac_rx_sync_size(new_mtu);
937 int rx_skb_size = emac_rx_skb_size(new_mtu);
938 int i, ret = 0;
939
940 mutex_lock(&dev->link_lock);
941 emac_netif_stop(dev);
942 emac_rx_disable(dev);
943 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
944
945 if (dev->rx_sg_skb) {
946 ++dev->estats.rx_dropped_resize;
947 dev_kfree_skb(dev->rx_sg_skb);
948 dev->rx_sg_skb = NULL;
949 }
950
951 /* Make a first pass over RX ring and mark BDs ready, dropping
952 * non-processed packets on the way. We need this as a separate pass
953 * to simplify error recovery in the case of allocation failure later.
954 */
955 for (i = 0; i < NUM_RX_BUFF; ++i) {
956 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
957 ++dev->estats.rx_dropped_resize;
958
959 dev->rx_desc[i].data_len = 0;
960 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
961 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
962 }
963
964 /* Reallocate RX ring only if bigger skb buffers are required */
965 if (rx_skb_size <= dev->rx_skb_size)
966 goto skip;
967
968 /* Second pass, allocate new skbs */
969 for (i = 0; i < NUM_RX_BUFF; ++i) {
970 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
971 if (!skb) {
972 ret = -ENOMEM;
973 goto oom;
974 }
975
976 BUG_ON(!dev->rx_skb[i]);
977 dev_kfree_skb(dev->rx_skb[i]);
978
979 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
980 dev->rx_desc[i].data_ptr =
981 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
982 DMA_FROM_DEVICE) + 2;
983 dev->rx_skb[i] = skb;
984 }
985 skip:
986 /* Check if we need to change "Jumbo" bit in MR1 */
987 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
988 /* This is to prevent starting RX channel in emac_rx_enable() */
989 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
990
991 dev->ndev->mtu = new_mtu;
992 emac_full_tx_reset(dev);
993 }
994
995 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
996 oom:
997 /* Restart RX */
998 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
999 dev->rx_slot = 0;
1000 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1001 emac_rx_enable(dev);
1002 emac_netif_start(dev);
1003 mutex_unlock(&dev->link_lock);
1004
1005 return ret;
1006}
1007
1008/* Process ctx, rtnl_lock semaphore */
1009static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1010{
1011 struct emac_instance *dev = netdev_priv(ndev);
1012 int ret = 0;
1013
1014 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1015 return -EINVAL;
1016
1017 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1018
1019 if (netif_running(ndev)) {
1020 /* Check if we really need to reinitalize RX ring */
1021 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1022 ret = emac_resize_rx_ring(dev, new_mtu);
1023 }
1024
1025 if (!ret) {
1026 ndev->mtu = new_mtu;
1027 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1028 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1029 }
1030
1031 return ret;
1032}
1033
1034static void emac_clean_tx_ring(struct emac_instance *dev)
1035{
1036 int i;
1037
1038 for (i = 0; i < NUM_TX_BUFF; ++i) {
1039 if (dev->tx_skb[i]) {
1040 dev_kfree_skb(dev->tx_skb[i]);
1041 dev->tx_skb[i] = NULL;
1042 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1043 ++dev->estats.tx_dropped;
1044 }
1045 dev->tx_desc[i].ctrl = 0;
1046 dev->tx_desc[i].data_ptr = 0;
1047 }
1048}
1049
1050static void emac_clean_rx_ring(struct emac_instance *dev)
1051{
1052 int i;
1053
1054 for (i = 0; i < NUM_RX_BUFF; ++i)
1055 if (dev->rx_skb[i]) {
1056 dev->rx_desc[i].ctrl = 0;
1057 dev_kfree_skb(dev->rx_skb[i]);
1058 dev->rx_skb[i] = NULL;
1059 dev->rx_desc[i].data_ptr = 0;
1060 }
1061
1062 if (dev->rx_sg_skb) {
1063 dev_kfree_skb(dev->rx_sg_skb);
1064 dev->rx_sg_skb = NULL;
1065 }
1066}
1067
1068static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1069 gfp_t flags)
1070{
1071 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1072 if (unlikely(!skb))
1073 return -ENOMEM;
1074
1075 dev->rx_skb[slot] = skb;
1076 dev->rx_desc[slot].data_len = 0;
1077
1078 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1079 dev->rx_desc[slot].data_ptr =
1080 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1081 DMA_FROM_DEVICE) + 2;
1082 wmb();
1083 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1084 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1085
1086 return 0;
1087}
1088
1089static void emac_print_link_status(struct emac_instance *dev)
1090{
1091 if (netif_carrier_ok(dev->ndev))
1092 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1093 dev->ndev->name, dev->phy.speed,
1094 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1095 dev->phy.pause ? ", pause enabled" :
1096 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1097 else
1098 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1099}
1100
1101/* Process ctx, rtnl_lock semaphore */
1102static int emac_open(struct net_device *ndev)
1103{
1104 struct emac_instance *dev = netdev_priv(ndev);
1105 int err, i;
1106
1107 DBG(dev, "open" NL);
1108
1109 /* Setup error IRQ handler */
1110 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1111 if (err) {
1112 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1113 ndev->name, dev->emac_irq);
1114 return err;
1115 }
1116
1117 /* Allocate RX ring */
1118 for (i = 0; i < NUM_RX_BUFF; ++i)
1119 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1120 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1121 ndev->name);
1122 goto oom;
1123 }
1124
1125 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1126 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1127 dev->rx_sg_skb = NULL;
1128
1129 mutex_lock(&dev->link_lock);
61dbcece 1130 dev->opened = 1;
1d3bb996 1131
61dbcece 1132 /* Start PHY polling now.
1d3bb996
DG
1133 */
1134 if (dev->phy.address >= 0) {
1135 int link_poll_interval;
1136 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1137 dev->phy.def->ops->read_link(&dev->phy);
11121e30 1138 emac_rx_clk_default(dev);
1d3bb996
DG
1139 netif_carrier_on(dev->ndev);
1140 link_poll_interval = PHY_POLL_LINK_ON;
1141 } else {
11121e30 1142 emac_rx_clk_tx(dev);
1d3bb996
DG
1143 netif_carrier_off(dev->ndev);
1144 link_poll_interval = PHY_POLL_LINK_OFF;
1145 }
1146 dev->link_polling = 1;
1147 wmb();
1148 schedule_delayed_work(&dev->link_work, link_poll_interval);
1149 emac_print_link_status(dev);
1150 } else
1151 netif_carrier_on(dev->ndev);
1152
1153 emac_configure(dev);
1154 mal_poll_add(dev->mal, &dev->commac);
1155 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1156 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1157 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1158 emac_tx_enable(dev);
1159 emac_rx_enable(dev);
1160 emac_netif_start(dev);
1161
1162 mutex_unlock(&dev->link_lock);
1163
1164 return 0;
1165 oom:
1166 emac_clean_rx_ring(dev);
1167 free_irq(dev->emac_irq, dev);
1168
1169 return -ENOMEM;
1170}
1171
1172/* BHs disabled */
1173#if 0
1174static int emac_link_differs(struct emac_instance *dev)
1175{
1176 u32 r = in_be32(&dev->emacp->mr1);
1177
1178 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1179 int speed, pause, asym_pause;
1180
1181 if (r & EMAC_MR1_MF_1000)
1182 speed = SPEED_1000;
1183 else if (r & EMAC_MR1_MF_100)
1184 speed = SPEED_100;
1185 else
1186 speed = SPEED_10;
1187
1188 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1189 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1190 pause = 1;
1191 asym_pause = 0;
1192 break;
1193 case EMAC_MR1_APP:
1194 pause = 0;
1195 asym_pause = 1;
1196 break;
1197 default:
1198 pause = asym_pause = 0;
1199 }
1200 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1201 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1202}
1203#endif
1204
1205static void emac_link_timer(struct work_struct *work)
1206{
1207 struct emac_instance *dev =
1208 container_of((struct delayed_work *)work,
1209 struct emac_instance, link_work);
1210 int link_poll_interval;
1211
1212 mutex_lock(&dev->link_lock);
1d3bb996
DG
1213 DBG2(dev, "link timer" NL);
1214
61dbcece
BH
1215 if (!dev->opened)
1216 goto bail;
1217
1d3bb996
DG
1218 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1219 if (!netif_carrier_ok(dev->ndev)) {
11121e30 1220 emac_rx_clk_default(dev);
1d3bb996
DG
1221 /* Get new link parameters */
1222 dev->phy.def->ops->read_link(&dev->phy);
1223
1224 netif_carrier_on(dev->ndev);
1225 emac_netif_stop(dev);
1226 emac_full_tx_reset(dev);
1227 emac_netif_start(dev);
1228 emac_print_link_status(dev);
1229 }
1230 link_poll_interval = PHY_POLL_LINK_ON;
1231 } else {
1232 if (netif_carrier_ok(dev->ndev)) {
11121e30 1233 emac_rx_clk_tx(dev);
1d3bb996
DG
1234 netif_carrier_off(dev->ndev);
1235 netif_tx_disable(dev->ndev);
911b237d 1236 emac_reinitialize(dev);
1d3bb996
DG
1237 emac_print_link_status(dev);
1238 }
1239 link_poll_interval = PHY_POLL_LINK_OFF;
1240 }
1241 schedule_delayed_work(&dev->link_work, link_poll_interval);
61dbcece 1242 bail:
1d3bb996
DG
1243 mutex_unlock(&dev->link_lock);
1244}
1245
1246static void emac_force_link_update(struct emac_instance *dev)
1247{
1248 netif_carrier_off(dev->ndev);
61dbcece 1249 smp_rmb();
1d3bb996
DG
1250 if (dev->link_polling) {
1251 cancel_rearming_delayed_work(&dev->link_work);
1252 if (dev->link_polling)
1253 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1254 }
1255}
1256
1257/* Process ctx, rtnl_lock semaphore */
1258static int emac_close(struct net_device *ndev)
1259{
1260 struct emac_instance *dev = netdev_priv(ndev);
1261
1262 DBG(dev, "close" NL);
1263
61dbcece
BH
1264 if (dev->phy.address >= 0) {
1265 dev->link_polling = 0;
1d3bb996 1266 cancel_rearming_delayed_work(&dev->link_work);
61dbcece
BH
1267 }
1268 mutex_lock(&dev->link_lock);
1d3bb996 1269 emac_netif_stop(dev);
61dbcece
BH
1270 dev->opened = 0;
1271 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1272
1273 emac_rx_disable(dev);
1274 emac_tx_disable(dev);
1275 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1276 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1277 mal_poll_del(dev->mal, &dev->commac);
1278
1279 emac_clean_tx_ring(dev);
1280 emac_clean_rx_ring(dev);
1281
1282 free_irq(dev->emac_irq, dev);
1283
1284 return 0;
1285}
1286
1287static inline u16 emac_tx_csum(struct emac_instance *dev,
1288 struct sk_buff *skb)
1289{
e66f4168
VB
1290 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1291 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1d3bb996
DG
1292 ++dev->stats.tx_packets_csum;
1293 return EMAC_TX_CTRL_TAH_CSUM;
1294 }
1295 return 0;
1296}
1297
1298static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1299{
1300 struct emac_regs __iomem *p = dev->emacp;
1301 struct net_device *ndev = dev->ndev;
1302
1303 /* Send the packet out. If the if makes a significant perf
1304 * difference, then we can store the TMR0 value in "dev"
1305 * instead
1306 */
1307 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1308 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1309 else
1310 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1311
1312 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1313 netif_stop_queue(ndev);
1314 DBG2(dev, "stopped TX queue" NL);
1315 }
1316
1317 ndev->trans_start = jiffies;
1318 ++dev->stats.tx_packets;
1319 dev->stats.tx_bytes += len;
1320
1321 return 0;
1322}
1323
1324/* Tx lock BH */
1325static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1326{
1327 struct emac_instance *dev = netdev_priv(ndev);
1328 unsigned int len = skb->len;
1329 int slot;
1330
1331 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1332 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1333
1334 slot = dev->tx_slot++;
1335 if (dev->tx_slot == NUM_TX_BUFF) {
1336 dev->tx_slot = 0;
1337 ctrl |= MAL_TX_CTRL_WRAP;
1338 }
1339
1340 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1341
1342 dev->tx_skb[slot] = skb;
1343 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1344 skb->data, len,
1345 DMA_TO_DEVICE);
1346 dev->tx_desc[slot].data_len = (u16) len;
1347 wmb();
1348 dev->tx_desc[slot].ctrl = ctrl;
1349
1350 return emac_xmit_finish(dev, len);
1351}
1352
1d3bb996
DG
1353static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1354 u32 pd, int len, int last, u16 base_ctrl)
1355{
1356 while (1) {
1357 u16 ctrl = base_ctrl;
1358 int chunk = min(len, MAL_MAX_TX_SIZE);
1359 len -= chunk;
1360
1361 slot = (slot + 1) % NUM_TX_BUFF;
1362
1363 if (last && !len)
1364 ctrl |= MAL_TX_CTRL_LAST;
1365 if (slot == NUM_TX_BUFF - 1)
1366 ctrl |= MAL_TX_CTRL_WRAP;
1367
1368 dev->tx_skb[slot] = NULL;
1369 dev->tx_desc[slot].data_ptr = pd;
1370 dev->tx_desc[slot].data_len = (u16) chunk;
1371 dev->tx_desc[slot].ctrl = ctrl;
1372 ++dev->tx_cnt;
1373
1374 if (!len)
1375 break;
1376
1377 pd += chunk;
1378 }
1379 return slot;
1380}
1381
1382/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1383static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1384{
1385 struct emac_instance *dev = netdev_priv(ndev);
1386 int nr_frags = skb_shinfo(skb)->nr_frags;
1387 int len = skb->len, chunk;
1388 int slot, i;
1389 u16 ctrl;
1390 u32 pd;
1391
1392 /* This is common "fast" path */
1393 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1394 return emac_start_xmit(skb, ndev);
1395
1396 len -= skb->data_len;
1397
1398 /* Note, this is only an *estimation*, we can still run out of empty
1399 * slots because of the additional fragmentation into
1400 * MAL_MAX_TX_SIZE-sized chunks
1401 */
1402 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1403 goto stop_queue;
1404
1405 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1406 emac_tx_csum(dev, skb);
1407 slot = dev->tx_slot;
1408
1409 /* skb data */
1410 dev->tx_skb[slot] = NULL;
1411 chunk = min(len, MAL_MAX_TX_SIZE);
1412 dev->tx_desc[slot].data_ptr = pd =
1413 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1414 dev->tx_desc[slot].data_len = (u16) chunk;
1415 len -= chunk;
1416 if (unlikely(len))
1417 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1418 ctrl);
1419 /* skb fragments */
1420 for (i = 0; i < nr_frags; ++i) {
1421 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1422 len = frag->size;
1423
1424 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1425 goto undo_frame;
1426
1427 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1428 DMA_TO_DEVICE);
1429
1430 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1431 ctrl);
1432 }
1433
1434 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1435
1436 /* Attach skb to the last slot so we don't release it too early */
1437 dev->tx_skb[slot] = skb;
1438
1439 /* Send the packet out */
1440 if (dev->tx_slot == NUM_TX_BUFF - 1)
1441 ctrl |= MAL_TX_CTRL_WRAP;
1442 wmb();
1443 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1444 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1445
1446 return emac_xmit_finish(dev, skb->len);
1447
1448 undo_frame:
1449 /* Well, too bad. Our previous estimation was overly optimistic.
1450 * Undo everything.
1451 */
1452 while (slot != dev->tx_slot) {
1453 dev->tx_desc[slot].ctrl = 0;
1454 --dev->tx_cnt;
1455 if (--slot < 0)
1456 slot = NUM_TX_BUFF - 1;
1457 }
1458 ++dev->estats.tx_undo;
1459
1460 stop_queue:
1461 netif_stop_queue(ndev);
1462 DBG2(dev, "stopped TX queue" NL);
1463 return 1;
1464}
1d3bb996
DG
1465
1466/* Tx lock BHs */
1467static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1468{
1469 struct emac_error_stats *st = &dev->estats;
1470
1471 DBG(dev, "BD TX error %04x" NL, ctrl);
1472
1473 ++st->tx_bd_errors;
1474 if (ctrl & EMAC_TX_ST_BFCS)
1475 ++st->tx_bd_bad_fcs;
1476 if (ctrl & EMAC_TX_ST_LCS)
1477 ++st->tx_bd_carrier_loss;
1478 if (ctrl & EMAC_TX_ST_ED)
1479 ++st->tx_bd_excessive_deferral;
1480 if (ctrl & EMAC_TX_ST_EC)
1481 ++st->tx_bd_excessive_collisions;
1482 if (ctrl & EMAC_TX_ST_LC)
1483 ++st->tx_bd_late_collision;
1484 if (ctrl & EMAC_TX_ST_MC)
1485 ++st->tx_bd_multple_collisions;
1486 if (ctrl & EMAC_TX_ST_SC)
1487 ++st->tx_bd_single_collision;
1488 if (ctrl & EMAC_TX_ST_UR)
1489 ++st->tx_bd_underrun;
1490 if (ctrl & EMAC_TX_ST_SQE)
1491 ++st->tx_bd_sqe;
1492}
1493
1494static void emac_poll_tx(void *param)
1495{
1496 struct emac_instance *dev = param;
1497 u32 bad_mask;
1498
1499 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1500
1501 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1502 bad_mask = EMAC_IS_BAD_TX_TAH;
1503 else
1504 bad_mask = EMAC_IS_BAD_TX;
1505
1506 netif_tx_lock_bh(dev->ndev);
1507 if (dev->tx_cnt) {
1508 u16 ctrl;
1509 int slot = dev->ack_slot, n = 0;
1510 again:
1511 ctrl = dev->tx_desc[slot].ctrl;
1512 if (!(ctrl & MAL_TX_CTRL_READY)) {
1513 struct sk_buff *skb = dev->tx_skb[slot];
1514 ++n;
1515
1516 if (skb) {
1517 dev_kfree_skb(skb);
1518 dev->tx_skb[slot] = NULL;
1519 }
1520 slot = (slot + 1) % NUM_TX_BUFF;
1521
1522 if (unlikely(ctrl & bad_mask))
1523 emac_parse_tx_error(dev, ctrl);
1524
1525 if (--dev->tx_cnt)
1526 goto again;
1527 }
1528 if (n) {
1529 dev->ack_slot = slot;
1530 if (netif_queue_stopped(dev->ndev) &&
1531 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1532 netif_wake_queue(dev->ndev);
1533
1534 DBG2(dev, "tx %d pkts" NL, n);
1535 }
1536 }
1537 netif_tx_unlock_bh(dev->ndev);
1538}
1539
1540static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1541 int len)
1542{
1543 struct sk_buff *skb = dev->rx_skb[slot];
1544
1545 DBG2(dev, "recycle %d %d" NL, slot, len);
1546
1547 if (len)
1548 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1549 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1550
1551 dev->rx_desc[slot].data_len = 0;
1552 wmb();
1553 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1554 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1555}
1556
1557static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1558{
1559 struct emac_error_stats *st = &dev->estats;
1560
1561 DBG(dev, "BD RX error %04x" NL, ctrl);
1562
1563 ++st->rx_bd_errors;
1564 if (ctrl & EMAC_RX_ST_OE)
1565 ++st->rx_bd_overrun;
1566 if (ctrl & EMAC_RX_ST_BP)
1567 ++st->rx_bd_bad_packet;
1568 if (ctrl & EMAC_RX_ST_RP)
1569 ++st->rx_bd_runt_packet;
1570 if (ctrl & EMAC_RX_ST_SE)
1571 ++st->rx_bd_short_event;
1572 if (ctrl & EMAC_RX_ST_AE)
1573 ++st->rx_bd_alignment_error;
1574 if (ctrl & EMAC_RX_ST_BFCS)
1575 ++st->rx_bd_bad_fcs;
1576 if (ctrl & EMAC_RX_ST_PTL)
1577 ++st->rx_bd_packet_too_long;
1578 if (ctrl & EMAC_RX_ST_ORE)
1579 ++st->rx_bd_out_of_range;
1580 if (ctrl & EMAC_RX_ST_IRE)
1581 ++st->rx_bd_in_range;
1582}
1583
1584static inline void emac_rx_csum(struct emac_instance *dev,
1585 struct sk_buff *skb, u16 ctrl)
1586{
1587#ifdef CONFIG_IBM_NEW_EMAC_TAH
1588 if (!ctrl && dev->tah_dev) {
1589 skb->ip_summed = CHECKSUM_UNNECESSARY;
1590 ++dev->stats.rx_packets_csum;
1591 }
1592#endif
1593}
1594
1595static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1596{
1597 if (likely(dev->rx_sg_skb != NULL)) {
1598 int len = dev->rx_desc[slot].data_len;
1599 int tot_len = dev->rx_sg_skb->len + len;
1600
1601 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1602 ++dev->estats.rx_dropped_mtu;
1603 dev_kfree_skb(dev->rx_sg_skb);
1604 dev->rx_sg_skb = NULL;
1605 } else {
bef1bc95 1606 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1607 dev->rx_skb[slot]->data, len);
1608 skb_put(dev->rx_sg_skb, len);
1609 emac_recycle_rx_skb(dev, slot, len);
1610 return 0;
1611 }
1612 }
1613 emac_recycle_rx_skb(dev, slot, 0);
1614 return -1;
1615}
1616
1617/* NAPI poll context */
1618static int emac_poll_rx(void *param, int budget)
1619{
1620 struct emac_instance *dev = param;
1621 int slot = dev->rx_slot, received = 0;
1622
1623 DBG2(dev, "poll_rx(%d)" NL, budget);
1624
1625 again:
1626 while (budget > 0) {
1627 int len;
1628 struct sk_buff *skb;
1629 u16 ctrl = dev->rx_desc[slot].ctrl;
1630
1631 if (ctrl & MAL_RX_CTRL_EMPTY)
1632 break;
1633
1634 skb = dev->rx_skb[slot];
1635 mb();
1636 len = dev->rx_desc[slot].data_len;
1637
1638 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1639 goto sg;
1640
1641 ctrl &= EMAC_BAD_RX_MASK;
1642 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1643 emac_parse_rx_error(dev, ctrl);
1644 ++dev->estats.rx_dropped_error;
1645 emac_recycle_rx_skb(dev, slot, 0);
1646 len = 0;
1647 goto next;
1648 }
6c688f42
SN
1649
1650 if (len < ETH_HLEN) {
1651 ++dev->estats.rx_dropped_stack;
1652 emac_recycle_rx_skb(dev, slot, len);
1653 goto next;
1654 }
1d3bb996
DG
1655
1656 if (len && len < EMAC_RX_COPY_THRESH) {
1657 struct sk_buff *copy_skb =
1658 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1659 if (unlikely(!copy_skb))
1660 goto oom;
1661
1662 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1663 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1664 len + 2);
1665 emac_recycle_rx_skb(dev, slot, len);
1666 skb = copy_skb;
1667 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1668 goto oom;
1669
1670 skb_put(skb, len);
1671 push_packet:
1672 skb->dev = dev->ndev;
1673 skb->protocol = eth_type_trans(skb, dev->ndev);
1674 emac_rx_csum(dev, skb, ctrl);
1675
1676 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1677 ++dev->estats.rx_dropped_stack;
1678 next:
1679 ++dev->stats.rx_packets;
1680 skip:
1681 dev->stats.rx_bytes += len;
1682 slot = (slot + 1) % NUM_RX_BUFF;
1683 --budget;
1684 ++received;
1685 continue;
1686 sg:
1687 if (ctrl & MAL_RX_CTRL_FIRST) {
1688 BUG_ON(dev->rx_sg_skb);
1689 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1690 DBG(dev, "rx OOM %d" NL, slot);
1691 ++dev->estats.rx_dropped_oom;
1692 emac_recycle_rx_skb(dev, slot, 0);
1693 } else {
1694 dev->rx_sg_skb = skb;
1695 skb_put(skb, len);
1696 }
1697 } else if (!emac_rx_sg_append(dev, slot) &&
1698 (ctrl & MAL_RX_CTRL_LAST)) {
1699
1700 skb = dev->rx_sg_skb;
1701 dev->rx_sg_skb = NULL;
1702
1703 ctrl &= EMAC_BAD_RX_MASK;
1704 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1705 emac_parse_rx_error(dev, ctrl);
1706 ++dev->estats.rx_dropped_error;
1707 dev_kfree_skb(skb);
1708 len = 0;
1709 } else
1710 goto push_packet;
1711 }
1712 goto skip;
1713 oom:
1714 DBG(dev, "rx OOM %d" NL, slot);
1715 /* Drop the packet and recycle skb */
1716 ++dev->estats.rx_dropped_oom;
1717 emac_recycle_rx_skb(dev, slot, 0);
1718 goto next;
1719 }
1720
1721 if (received) {
1722 DBG2(dev, "rx %d BDs" NL, received);
1723 dev->rx_slot = slot;
1724 }
1725
1726 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1727 mb();
1728 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1729 DBG2(dev, "rx restart" NL);
1730 received = 0;
1731 goto again;
1732 }
1733
1734 if (dev->rx_sg_skb) {
1735 DBG2(dev, "dropping partial rx packet" NL);
1736 ++dev->estats.rx_dropped_error;
1737 dev_kfree_skb(dev->rx_sg_skb);
1738 dev->rx_sg_skb = NULL;
1739 }
1740
1741 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1742 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1743 emac_rx_enable(dev);
1744 dev->rx_slot = 0;
1745 }
1746 return received;
1747}
1748
1749/* NAPI poll context */
1750static int emac_peek_rx(void *param)
1751{
1752 struct emac_instance *dev = param;
1753
1754 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1755}
1756
1757/* NAPI poll context */
1758static int emac_peek_rx_sg(void *param)
1759{
1760 struct emac_instance *dev = param;
1761
1762 int slot = dev->rx_slot;
1763 while (1) {
1764 u16 ctrl = dev->rx_desc[slot].ctrl;
1765 if (ctrl & MAL_RX_CTRL_EMPTY)
1766 return 0;
1767 else if (ctrl & MAL_RX_CTRL_LAST)
1768 return 1;
1769
1770 slot = (slot + 1) % NUM_RX_BUFF;
1771
1772 /* I'm just being paranoid here :) */
1773 if (unlikely(slot == dev->rx_slot))
1774 return 0;
1775 }
1776}
1777
1778/* Hard IRQ */
1779static void emac_rxde(void *param)
1780{
1781 struct emac_instance *dev = param;
1782
1783 ++dev->estats.rx_stopped;
1784 emac_rx_disable_async(dev);
1785}
1786
1787/* Hard IRQ */
1788static irqreturn_t emac_irq(int irq, void *dev_instance)
1789{
1790 struct emac_instance *dev = dev_instance;
1791 struct emac_regs __iomem *p = dev->emacp;
1792 struct emac_error_stats *st = &dev->estats;
1793 u32 isr;
1794
1795 spin_lock(&dev->lock);
1796
1797 isr = in_be32(&p->isr);
1798 out_be32(&p->isr, isr);
1799
1800 DBG(dev, "isr = %08x" NL, isr);
1801
1802 if (isr & EMAC4_ISR_TXPE)
1803 ++st->tx_parity;
1804 if (isr & EMAC4_ISR_RXPE)
1805 ++st->rx_parity;
1806 if (isr & EMAC4_ISR_TXUE)
1807 ++st->tx_underrun;
1808 if (isr & EMAC4_ISR_RXOE)
1809 ++st->rx_fifo_overrun;
1810 if (isr & EMAC_ISR_OVR)
1811 ++st->rx_overrun;
1812 if (isr & EMAC_ISR_BP)
1813 ++st->rx_bad_packet;
1814 if (isr & EMAC_ISR_RP)
1815 ++st->rx_runt_packet;
1816 if (isr & EMAC_ISR_SE)
1817 ++st->rx_short_event;
1818 if (isr & EMAC_ISR_ALE)
1819 ++st->rx_alignment_error;
1820 if (isr & EMAC_ISR_BFCS)
1821 ++st->rx_bad_fcs;
1822 if (isr & EMAC_ISR_PTLE)
1823 ++st->rx_packet_too_long;
1824 if (isr & EMAC_ISR_ORE)
1825 ++st->rx_out_of_range;
1826 if (isr & EMAC_ISR_IRE)
1827 ++st->rx_in_range;
1828 if (isr & EMAC_ISR_SQE)
1829 ++st->tx_sqe;
1830 if (isr & EMAC_ISR_TE)
1831 ++st->tx_errors;
1832
1833 spin_unlock(&dev->lock);
1834
1835 return IRQ_HANDLED;
1836}
1837
1838static struct net_device_stats *emac_stats(struct net_device *ndev)
1839{
1840 struct emac_instance *dev = netdev_priv(ndev);
1841 struct emac_stats *st = &dev->stats;
1842 struct emac_error_stats *est = &dev->estats;
1843 struct net_device_stats *nst = &dev->nstats;
1844 unsigned long flags;
1845
1846 DBG2(dev, "stats" NL);
1847
1848 /* Compute "legacy" statistics */
1849 spin_lock_irqsave(&dev->lock, flags);
1850 nst->rx_packets = (unsigned long)st->rx_packets;
1851 nst->rx_bytes = (unsigned long)st->rx_bytes;
1852 nst->tx_packets = (unsigned long)st->tx_packets;
1853 nst->tx_bytes = (unsigned long)st->tx_bytes;
1854 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1855 est->rx_dropped_error +
1856 est->rx_dropped_resize +
1857 est->rx_dropped_mtu);
1858 nst->tx_dropped = (unsigned long)est->tx_dropped;
1859
1860 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1861 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1862 est->rx_fifo_overrun +
1863 est->rx_overrun);
1864 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1865 est->rx_alignment_error);
1866 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1867 est->rx_bad_fcs);
1868 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1869 est->rx_bd_short_event +
1870 est->rx_bd_packet_too_long +
1871 est->rx_bd_out_of_range +
1872 est->rx_bd_in_range +
1873 est->rx_runt_packet +
1874 est->rx_short_event +
1875 est->rx_packet_too_long +
1876 est->rx_out_of_range +
1877 est->rx_in_range);
1878
1879 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1880 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1881 est->tx_underrun);
1882 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1883 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1884 est->tx_bd_excessive_collisions +
1885 est->tx_bd_late_collision +
1886 est->tx_bd_multple_collisions);
1887 spin_unlock_irqrestore(&dev->lock, flags);
1888 return nst;
1889}
1890
1891static struct mal_commac_ops emac_commac_ops = {
1892 .poll_tx = &emac_poll_tx,
1893 .poll_rx = &emac_poll_rx,
1894 .peek_rx = &emac_peek_rx,
1895 .rxde = &emac_rxde,
1896};
1897
1898static struct mal_commac_ops emac_commac_sg_ops = {
1899 .poll_tx = &emac_poll_tx,
1900 .poll_rx = &emac_poll_rx,
1901 .peek_rx = &emac_peek_rx_sg,
1902 .rxde = &emac_rxde,
1903};
1904
1905/* Ethtool support */
1906static int emac_ethtool_get_settings(struct net_device *ndev,
1907 struct ethtool_cmd *cmd)
1908{
1909 struct emac_instance *dev = netdev_priv(ndev);
1910
1911 cmd->supported = dev->phy.features;
1912 cmd->port = PORT_MII;
1913 cmd->phy_address = dev->phy.address;
1914 cmd->transceiver =
1915 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1916
1917 mutex_lock(&dev->link_lock);
1918 cmd->advertising = dev->phy.advertising;
1919 cmd->autoneg = dev->phy.autoneg;
1920 cmd->speed = dev->phy.speed;
1921 cmd->duplex = dev->phy.duplex;
1922 mutex_unlock(&dev->link_lock);
1923
1924 return 0;
1925}
1926
1927static int emac_ethtool_set_settings(struct net_device *ndev,
1928 struct ethtool_cmd *cmd)
1929{
1930 struct emac_instance *dev = netdev_priv(ndev);
1931 u32 f = dev->phy.features;
1932
1933 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1934 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1935
1936 /* Basic sanity checks */
1937 if (dev->phy.address < 0)
1938 return -EOPNOTSUPP;
1939 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1940 return -EINVAL;
1941 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1942 return -EINVAL;
1943 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1944 return -EINVAL;
1945
1946 if (cmd->autoneg == AUTONEG_DISABLE) {
1947 switch (cmd->speed) {
1948 case SPEED_10:
1949 if (cmd->duplex == DUPLEX_HALF
1950 && !(f & SUPPORTED_10baseT_Half))
1951 return -EINVAL;
1952 if (cmd->duplex == DUPLEX_FULL
1953 && !(f & SUPPORTED_10baseT_Full))
1954 return -EINVAL;
1955 break;
1956 case SPEED_100:
1957 if (cmd->duplex == DUPLEX_HALF
1958 && !(f & SUPPORTED_100baseT_Half))
1959 return -EINVAL;
1960 if (cmd->duplex == DUPLEX_FULL
1961 && !(f & SUPPORTED_100baseT_Full))
1962 return -EINVAL;
1963 break;
1964 case SPEED_1000:
1965 if (cmd->duplex == DUPLEX_HALF
1966 && !(f & SUPPORTED_1000baseT_Half))
1967 return -EINVAL;
1968 if (cmd->duplex == DUPLEX_FULL
1969 && !(f & SUPPORTED_1000baseT_Full))
1970 return -EINVAL;
1971 break;
1972 default:
1973 return -EINVAL;
1974 }
1975
1976 mutex_lock(&dev->link_lock);
1977 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1978 cmd->duplex);
1979 mutex_unlock(&dev->link_lock);
1980
1981 } else {
1982 if (!(f & SUPPORTED_Autoneg))
1983 return -EINVAL;
1984
1985 mutex_lock(&dev->link_lock);
1986 dev->phy.def->ops->setup_aneg(&dev->phy,
1987 (cmd->advertising & f) |
1988 (dev->phy.advertising &
1989 (ADVERTISED_Pause |
1990 ADVERTISED_Asym_Pause)));
1991 mutex_unlock(&dev->link_lock);
1992 }
1993 emac_force_link_update(dev);
1994
1995 return 0;
1996}
1997
1998static void emac_ethtool_get_ringparam(struct net_device *ndev,
1999 struct ethtool_ringparam *rp)
2000{
2001 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2002 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2003}
2004
2005static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2006 struct ethtool_pauseparam *pp)
2007{
2008 struct emac_instance *dev = netdev_priv(ndev);
2009
2010 mutex_lock(&dev->link_lock);
2011 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2012 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2013 pp->autoneg = 1;
2014
2015 if (dev->phy.duplex == DUPLEX_FULL) {
2016 if (dev->phy.pause)
2017 pp->rx_pause = pp->tx_pause = 1;
2018 else if (dev->phy.asym_pause)
2019 pp->tx_pause = 1;
2020 }
2021 mutex_unlock(&dev->link_lock);
2022}
2023
2024static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2025{
2026 struct emac_instance *dev = netdev_priv(ndev);
2027
eb4d84f1 2028 return dev->tah_dev != NULL;
1d3bb996
DG
2029}
2030
2031static int emac_get_regs_len(struct emac_instance *dev)
2032{
2033 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2034 return sizeof(struct emac_ethtool_regs_subhdr) +
05781ccd 2035 EMAC4_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2036 else
2037 return sizeof(struct emac_ethtool_regs_subhdr) +
05781ccd 2038 EMAC_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2039}
2040
2041static int emac_ethtool_get_regs_len(struct net_device *ndev)
2042{
2043 struct emac_instance *dev = netdev_priv(ndev);
2044 int size;
2045
2046 size = sizeof(struct emac_ethtool_regs_hdr) +
2047 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2048 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2049 size += zmii_get_regs_len(dev->zmii_dev);
2050 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2051 size += rgmii_get_regs_len(dev->rgmii_dev);
2052 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2053 size += tah_get_regs_len(dev->tah_dev);
2054
2055 return size;
2056}
2057
2058static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2059{
2060 struct emac_ethtool_regs_subhdr *hdr = buf;
2061
2062 hdr->index = dev->cell_index;
2063 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2064 hdr->version = EMAC4_ETHTOOL_REGS_VER;
05781ccd
GE
2065 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2066 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
1d3bb996
DG
2067 } else {
2068 hdr->version = EMAC_ETHTOOL_REGS_VER;
05781ccd
GE
2069 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2070 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
1d3bb996
DG
2071 }
2072}
2073
2074static void emac_ethtool_get_regs(struct net_device *ndev,
2075 struct ethtool_regs *regs, void *buf)
2076{
2077 struct emac_instance *dev = netdev_priv(ndev);
2078 struct emac_ethtool_regs_hdr *hdr = buf;
2079
2080 hdr->components = 0;
2081 buf = hdr + 1;
2082
2083 buf = mal_dump_regs(dev->mal, buf);
2084 buf = emac_dump_regs(dev, buf);
2085 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2086 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2087 buf = zmii_dump_regs(dev->zmii_dev, buf);
2088 }
2089 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2090 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2091 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2092 }
2093 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2094 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2095 buf = tah_dump_regs(dev->tah_dev, buf);
2096 }
2097}
2098
2099static int emac_ethtool_nway_reset(struct net_device *ndev)
2100{
2101 struct emac_instance *dev = netdev_priv(ndev);
2102 int res = 0;
2103
2104 DBG(dev, "nway_reset" NL);
2105
2106 if (dev->phy.address < 0)
2107 return -EOPNOTSUPP;
2108
2109 mutex_lock(&dev->link_lock);
2110 if (!dev->phy.autoneg) {
2111 res = -EINVAL;
2112 goto out;
2113 }
2114
2115 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2116 out:
2117 mutex_unlock(&dev->link_lock);
2118 emac_force_link_update(dev);
2119 return res;
2120}
2121
2122static int emac_ethtool_get_stats_count(struct net_device *ndev)
2123{
2124 return EMAC_ETHTOOL_STATS_COUNT;
2125}
2126
2127static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2128 u8 * buf)
2129{
2130 if (stringset == ETH_SS_STATS)
2131 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2132}
2133
2134static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2135 struct ethtool_stats *estats,
2136 u64 * tmp_stats)
2137{
2138 struct emac_instance *dev = netdev_priv(ndev);
2139
2140 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2141 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2142 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2143}
2144
2145static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2146 struct ethtool_drvinfo *info)
2147{
2148 struct emac_instance *dev = netdev_priv(ndev);
2149
2150 strcpy(info->driver, "ibm_emac");
2151 strcpy(info->version, DRV_VERSION);
2152 info->fw_version[0] = '\0';
2153 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2154 dev->cell_index, dev->ofdev->node->full_name);
2155 info->n_stats = emac_ethtool_get_stats_count(ndev);
2156 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2157}
2158
2159static const struct ethtool_ops emac_ethtool_ops = {
2160 .get_settings = emac_ethtool_get_settings,
2161 .set_settings = emac_ethtool_set_settings,
2162 .get_drvinfo = emac_ethtool_get_drvinfo,
2163
2164 .get_regs_len = emac_ethtool_get_regs_len,
2165 .get_regs = emac_ethtool_get_regs,
2166
2167 .nway_reset = emac_ethtool_nway_reset,
2168
2169 .get_ringparam = emac_ethtool_get_ringparam,
2170 .get_pauseparam = emac_ethtool_get_pauseparam,
2171
2172 .get_rx_csum = emac_ethtool_get_rx_csum,
2173
2174 .get_strings = emac_ethtool_get_strings,
2175 .get_stats_count = emac_ethtool_get_stats_count,
2176 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2177
2178 .get_link = ethtool_op_get_link,
2179 .get_tx_csum = ethtool_op_get_tx_csum,
2180 .get_sg = ethtool_op_get_sg,
2181};
2182
2183static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2184{
2185 struct emac_instance *dev = netdev_priv(ndev);
2186 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2187
2188 DBG(dev, "ioctl %08x" NL, cmd);
2189
2190 if (dev->phy.address < 0)
2191 return -EOPNOTSUPP;
2192
2193 switch (cmd) {
2194 case SIOCGMIIPHY:
2195 case SIOCDEVPRIVATE:
2196 data[0] = dev->phy.address;
2197 /* Fall through */
2198 case SIOCGMIIREG:
2199 case SIOCDEVPRIVATE + 1:
2200 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2201 return 0;
2202
2203 case SIOCSMIIREG:
2204 case SIOCDEVPRIVATE + 2:
2205 if (!capable(CAP_NET_ADMIN))
2206 return -EPERM;
2207 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2208 return 0;
2209 default:
2210 return -EOPNOTSUPP;
2211 }
2212}
2213
2214struct emac_depentry {
2215 u32 phandle;
2216 struct device_node *node;
2217 struct of_device *ofdev;
2218 void *drvdata;
2219};
2220
2221#define EMAC_DEP_MAL_IDX 0
2222#define EMAC_DEP_ZMII_IDX 1
2223#define EMAC_DEP_RGMII_IDX 2
2224#define EMAC_DEP_TAH_IDX 3
2225#define EMAC_DEP_MDIO_IDX 4
2226#define EMAC_DEP_PREV_IDX 5
2227#define EMAC_DEP_COUNT 6
2228
2229static int __devinit emac_check_deps(struct emac_instance *dev,
2230 struct emac_depentry *deps)
2231{
2232 int i, there = 0;
2233 struct device_node *np;
2234
2235 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2236 /* no dependency on that item, allright */
2237 if (deps[i].phandle == 0) {
2238 there++;
2239 continue;
2240 }
2241 /* special case for blist as the dependency might go away */
2242 if (i == EMAC_DEP_PREV_IDX) {
2243 np = *(dev->blist - 1);
2244 if (np == NULL) {
2245 deps[i].phandle = 0;
2246 there++;
2247 continue;
2248 }
2249 if (deps[i].node == NULL)
2250 deps[i].node = of_node_get(np);
2251 }
2252 if (deps[i].node == NULL)
2253 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2254 if (deps[i].node == NULL)
2255 continue;
2256 if (deps[i].ofdev == NULL)
2257 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2258 if (deps[i].ofdev == NULL)
2259 continue;
2260 if (deps[i].drvdata == NULL)
2261 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2262 if (deps[i].drvdata != NULL)
2263 there++;
2264 }
2265 return (there == EMAC_DEP_COUNT);
2266}
2267
2268static void emac_put_deps(struct emac_instance *dev)
2269{
2270 if (dev->mal_dev)
2271 of_dev_put(dev->mal_dev);
2272 if (dev->zmii_dev)
2273 of_dev_put(dev->zmii_dev);
2274 if (dev->rgmii_dev)
2275 of_dev_put(dev->rgmii_dev);
2276 if (dev->mdio_dev)
2277 of_dev_put(dev->mdio_dev);
2278 if (dev->tah_dev)
2279 of_dev_put(dev->tah_dev);
2280}
2281
2282static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2283 unsigned long action, void *data)
2284{
2285 /* We are only intereted in device addition */
2286 if (action == BUS_NOTIFY_BOUND_DRIVER)
2287 wake_up_all(&emac_probe_wait);
2288 return 0;
2289}
2290
51d4a1cc 2291static struct notifier_block emac_of_bus_notifier __devinitdata = {
1d3bb996
DG
2292 .notifier_call = emac_of_bus_notify
2293};
2294
2295static int __devinit emac_wait_deps(struct emac_instance *dev)
2296{
2297 struct emac_depentry deps[EMAC_DEP_COUNT];
2298 int i, err;
2299
2300 memset(&deps, 0, sizeof(deps));
2301
2302 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2303 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2304 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2305 if (dev->tah_ph)
2306 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2307 if (dev->mdio_ph)
2308 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2309 if (dev->blist && dev->blist > emac_boot_list)
2310 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2311 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2312 wait_event_timeout(emac_probe_wait,
2313 emac_check_deps(dev, deps),
2314 EMAC_PROBE_DEP_TIMEOUT);
2315 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2316 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2317 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2318 if (deps[i].node)
2319 of_node_put(deps[i].node);
2320 if (err && deps[i].ofdev)
2321 of_dev_put(deps[i].ofdev);
2322 }
2323 if (err == 0) {
2324 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2325 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2326 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2327 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2328 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2329 }
2330 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2331 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2332 return err;
2333}
2334
2335static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2336 u32 *val, int fatal)
2337{
2338 int len;
2339 const u32 *prop = of_get_property(np, name, &len);
2340 if (prop == NULL || len < sizeof(u32)) {
2341 if (fatal)
2342 printk(KERN_ERR "%s: missing %s property\n",
2343 np->full_name, name);
2344 return -ENODEV;
2345 }
2346 *val = *prop;
2347 return 0;
2348}
2349
2350static int __devinit emac_init_phy(struct emac_instance *dev)
2351{
2352 struct device_node *np = dev->ofdev->node;
2353 struct net_device *ndev = dev->ndev;
2354 u32 phy_map, adv;
2355 int i;
2356
2357 dev->phy.dev = ndev;
2358 dev->phy.mode = dev->phy_mode;
2359
2360 /* PHY-less configuration.
2361 * XXX I probably should move these settings to the dev tree
2362 */
2363 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2364 emac_reset(dev);
2365
2366 /* PHY-less configuration.
2367 * XXX I probably should move these settings to the dev tree
2368 */
2369 dev->phy.address = -1;
2370 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2371 dev->phy.pause = 1;
2372
2373 return 0;
2374 }
2375
2376 mutex_lock(&emac_phy_map_lock);
2377 phy_map = dev->phy_map | busy_phy_map;
2378
2379 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2380
2381 dev->phy.mdio_read = emac_mdio_read;
2382 dev->phy.mdio_write = emac_mdio_write;
2383
0925ab5d
VB
2384 /* Enable internal clock source */
2385#ifdef CONFIG_PPC_DCR_NATIVE
2386 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2387 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
11121e30
VB
2388#endif
2389 /* PHY clock workaround */
2390 emac_rx_clk_tx(dev);
2391
2392 /* Enable internal clock source on 440GX*/
2393#ifdef CONFIG_PPC_DCR_NATIVE
2394 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2395 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
0925ab5d 2396#endif
1d3bb996
DG
2397 /* Configure EMAC with defaults so we can at least use MDIO
2398 * This is needed mostly for 440GX
2399 */
2400 if (emac_phy_gpcs(dev->phy.mode)) {
2401 /* XXX
2402 * Make GPCS PHY address equal to EMAC index.
2403 * We probably should take into account busy_phy_map
2404 * and/or phy_map here.
2405 *
2406 * Note that the busy_phy_map is currently global
2407 * while it should probably be per-ASIC...
2408 */
2409 dev->phy.address = dev->cell_index;
2410 }
2411
2412 emac_configure(dev);
2413
2414 if (dev->phy_address != 0xffffffff)
2415 phy_map = ~(1 << dev->phy_address);
2416
2417 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2418 if (!(phy_map & 1)) {
2419 int r;
2420 busy_phy_map |= 1 << i;
2421
2422 /* Quick check if there is a PHY at the address */
2423 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2424 if (r == 0xffff || r < 0)
2425 continue;
2426 if (!emac_mii_phy_probe(&dev->phy, i))
2427 break;
2428 }
0925ab5d
VB
2429
2430 /* Enable external clock source */
2431#ifdef CONFIG_PPC_DCR_NATIVE
2432 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2433 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2434#endif
1d3bb996
DG
2435 mutex_unlock(&emac_phy_map_lock);
2436 if (i == 0x20) {
2437 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2438 return -ENXIO;
2439 }
2440
2441 /* Init PHY */
2442 if (dev->phy.def->ops->init)
2443 dev->phy.def->ops->init(&dev->phy);
2444
2445 /* Disable any PHY features not supported by the platform */
2446 dev->phy.def->features &= ~dev->phy_feat_exc;
2447
2448 /* Setup initial link parameters */
2449 if (dev->phy.features & SUPPORTED_Autoneg) {
2450 adv = dev->phy.features;
2451 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2452 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2453 /* Restart autonegotiation */
2454 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2455 } else {
2456 u32 f = dev->phy.def->features;
2457 int speed = SPEED_10, fd = DUPLEX_HALF;
2458
2459 /* Select highest supported speed/duplex */
2460 if (f & SUPPORTED_1000baseT_Full) {
2461 speed = SPEED_1000;
2462 fd = DUPLEX_FULL;
2463 } else if (f & SUPPORTED_1000baseT_Half)
2464 speed = SPEED_1000;
2465 else if (f & SUPPORTED_100baseT_Full) {
2466 speed = SPEED_100;
2467 fd = DUPLEX_FULL;
2468 } else if (f & SUPPORTED_100baseT_Half)
2469 speed = SPEED_100;
2470 else if (f & SUPPORTED_10baseT_Full)
2471 fd = DUPLEX_FULL;
2472
2473 /* Force link parameters */
2474 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2475 }
2476 return 0;
2477}
2478
2479static int __devinit emac_init_config(struct emac_instance *dev)
2480{
2481 struct device_node *np = dev->ofdev->node;
2482 const void *p;
2483 unsigned int plen;
2484 const char *pm, *phy_modes[] = {
2485 [PHY_MODE_NA] = "",
2486 [PHY_MODE_MII] = "mii",
2487 [PHY_MODE_RMII] = "rmii",
2488 [PHY_MODE_SMII] = "smii",
2489 [PHY_MODE_RGMII] = "rgmii",
2490 [PHY_MODE_TBI] = "tbi",
2491 [PHY_MODE_GMII] = "gmii",
2492 [PHY_MODE_RTBI] = "rtbi",
2493 [PHY_MODE_SGMII] = "sgmii",
2494 };
2495
2496 /* Read config from device-tree */
2497 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2498 return -ENXIO;
2499 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2500 return -ENXIO;
2501 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2502 return -ENXIO;
2503 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2504 return -ENXIO;
2505 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2506 dev->max_mtu = 1500;
2507 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2508 dev->rx_fifo_size = 2048;
2509 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2510 dev->tx_fifo_size = 2048;
2511 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2512 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2513 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2514 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2515 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2516 dev->phy_address = 0xffffffff;
2517 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2518 dev->phy_map = 0xffffffff;
2519 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2520 return -ENXIO;
2521 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2522 dev->tah_ph = 0;
2523 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
63b6cad7 2524 dev->tah_port = 0;
1d3bb996
DG
2525 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2526 dev->mdio_ph = 0;
2527 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2528 dev->zmii_ph = 0;;
2529 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2530 dev->zmii_port = 0xffffffff;;
2531 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2532 dev->rgmii_ph = 0;;
2533 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2534 dev->rgmii_port = 0xffffffff;;
2535 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2536 dev->fifo_entry_size = 16;
2537 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2538 dev->mal_burst_size = 256;
2539
2540 /* PHY mode needs some decoding */
2541 dev->phy_mode = PHY_MODE_NA;
2542 pm = of_get_property(np, "phy-mode", &plen);
2543 if (pm != NULL) {
2544 int i;
2545 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2546 if (!strcasecmp(pm, phy_modes[i])) {
2547 dev->phy_mode = i;
2548 break;
2549 }
2550 }
2551
2552 /* Backward compat with non-final DT */
2553 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2554 u32 nmode = *(const u32 *)pm;
2555 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2556 dev->phy_mode = nmode;
2557 }
2558
2559 /* Check EMAC version */
05781ccd
GE
2560 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2561 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2562 } else if (of_device_is_compatible(np, "ibm,emac4")) {
1d3bb996 2563 dev->features |= EMAC_FTR_EMAC4;
0925ab5d
VB
2564 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2565 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
11121e30
VB
2566 } else {
2567 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2568 of_device_is_compatible(np, "ibm,emac-440gr"))
2569 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
0925ab5d 2570 }
bff713b5
BH
2571
2572 /* Fixup some feature bits based on the device tree */
2573 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
1d3bb996 2574 dev->features |= EMAC_FTR_STACR_OC_INVERT;
bff713b5
BH
2575 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2576 dev->features |= EMAC_FTR_HAS_NEW_STACR;
1d3bb996 2577
bff713b5
BH
2578 /* CAB lacks the appropriate properties */
2579 if (of_device_is_compatible(np, "ibm,emac-axon"))
2580 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2581 EMAC_FTR_STACR_OC_INVERT;
2582
2583 /* Enable TAH/ZMII/RGMII features as found */
1d3bb996
DG
2584 if (dev->tah_ph != 0) {
2585#ifdef CONFIG_IBM_NEW_EMAC_TAH
2586 dev->features |= EMAC_FTR_HAS_TAH;
2587#else
2588 printk(KERN_ERR "%s: TAH support not enabled !\n",
2589 np->full_name);
2590 return -ENXIO;
2591#endif
2592 }
2593
2594 if (dev->zmii_ph != 0) {
2595#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2596 dev->features |= EMAC_FTR_HAS_ZMII;
2597#else
2598 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2599 np->full_name);
2600 return -ENXIO;
2601#endif
2602 }
2603
2604 if (dev->rgmii_ph != 0) {
2605#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2606 dev->features |= EMAC_FTR_HAS_RGMII;
2607#else
2608 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2609 np->full_name);
2610 return -ENXIO;
2611#endif
2612 }
2613
2614 /* Read MAC-address */
2615 p = of_get_property(np, "local-mac-address", NULL);
2616 if (p == NULL) {
2617 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2618 np->full_name);
2619 return -ENXIO;
2620 }
2621 memcpy(dev->ndev->dev_addr, p, 6);
2622
05781ccd
GE
2623 /* IAHT and GAHT filter parameterization */
2624 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2625 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2626 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2627 } else {
2628 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2629 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2630 }
2631
1d3bb996
DG
2632 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2633 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2634 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2635 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2636 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2637
2638 return 0;
2639}
2640
2641static int __devinit emac_probe(struct of_device *ofdev,
2642 const struct of_device_id *match)
2643{
2644 struct net_device *ndev;
2645 struct emac_instance *dev;
2646 struct device_node *np = ofdev->node;
2647 struct device_node **blist = NULL;
2648 int err, i;
2649
be63c09a
JB
2650 /* Skip unused/unwired EMACS. We leave the check for an unused
2651 * property here for now, but new flat device trees should set a
2652 * status property to "disabled" instead.
2653 */
2654 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3d722562
HB
2655 return -ENODEV;
2656
1d3bb996
DG
2657 /* Find ourselves in the bootlist if we are there */
2658 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2659 if (emac_boot_list[i] == np)
2660 blist = &emac_boot_list[i];
2661
2662 /* Allocate our net_device structure */
2663 err = -ENOMEM;
2664 ndev = alloc_etherdev(sizeof(struct emac_instance));
2665 if (!ndev) {
2666 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2667 np->full_name);
2668 goto err_gone;
2669 }
2670 dev = netdev_priv(ndev);
2671 dev->ndev = ndev;
2672 dev->ofdev = ofdev;
2673 dev->blist = blist;
1d3bb996
DG
2674 SET_NETDEV_DEV(ndev, &ofdev->dev);
2675
2676 /* Initialize some embedded data structures */
2677 mutex_init(&dev->mdio_lock);
2678 mutex_init(&dev->link_lock);
2679 spin_lock_init(&dev->lock);
2680 INIT_WORK(&dev->reset_work, emac_reset_work);
2681
2682 /* Init various config data based on device-tree */
2683 err = emac_init_config(dev);
2684 if (err != 0)
2685 goto err_free;
2686
2687 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2688 dev->emac_irq = irq_of_parse_and_map(np, 0);
2689 dev->wol_irq = irq_of_parse_and_map(np, 1);
2690 if (dev->emac_irq == NO_IRQ) {
2691 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2692 goto err_free;
2693 }
2694 ndev->irq = dev->emac_irq;
2695
2696 /* Map EMAC regs */
2697 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2698 printk(KERN_ERR "%s: Can't get registers address\n",
2699 np->full_name);
2700 goto err_irq_unmap;
2701 }
2702 // TODO : request_mem_region
05781ccd
GE
2703 dev->emacp = ioremap(dev->rsrc_regs.start,
2704 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
1d3bb996
DG
2705 if (dev->emacp == NULL) {
2706 printk(KERN_ERR "%s: Can't map device registers!\n",
2707 np->full_name);
2708 err = -ENOMEM;
2709 goto err_irq_unmap;
2710 }
2711
2712 /* Wait for dependent devices */
2713 err = emac_wait_deps(dev);
2714 if (err) {
2715 printk(KERN_ERR
2716 "%s: Timeout waiting for dependent devices\n",
2717 np->full_name);
2718 /* display more info about what's missing ? */
2719 goto err_reg_unmap;
2720 }
2721 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2722 if (dev->mdio_dev != NULL)
2723 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2724
2725 /* Register with MAL */
2726 dev->commac.ops = &emac_commac_ops;
2727 dev->commac.dev = dev;
2728 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2729 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2730 err = mal_register_commac(dev->mal, &dev->commac);
2731 if (err) {
2732 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2733 np->full_name, dev->mal_dev->node->full_name);
2734 goto err_rel_deps;
2735 }
2736 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2737 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2738
2739 /* Get pointers to BD rings */
2740 dev->tx_desc =
2741 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2742 dev->rx_desc =
2743 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2744
2745 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2746 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2747
2748 /* Clean rings */
2749 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2750 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
ab9b30cc
SN
2751 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2752 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
1d3bb996
DG
2753
2754 /* Attach to ZMII, if needed */
2755 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2756 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2757 goto err_unreg_commac;
2758
2759 /* Attach to RGMII, if needed */
2760 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2761 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2762 goto err_detach_zmii;
2763
2764 /* Attach to TAH, if needed */
2765 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2766 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2767 goto err_detach_rgmii;
2768
2769 /* Set some link defaults before we can find out real parameters */
2770 dev->phy.speed = SPEED_100;
2771 dev->phy.duplex = DUPLEX_FULL;
2772 dev->phy.autoneg = AUTONEG_DISABLE;
2773 dev->phy.pause = dev->phy.asym_pause = 0;
2774 dev->stop_timeout = STOP_TIMEOUT_100;
2775 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2776
2777 /* Find PHY if any */
2778 err = emac_init_phy(dev);
2779 if (err != 0)
2780 goto err_detach_tah;
2781
2782 /* Fill in the driver function table */
2783 ndev->open = &emac_open;
ee63d22b 2784 if (dev->tah_dev)
1d3bb996 2785 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1d3bb996
DG
2786 ndev->tx_timeout = &emac_tx_timeout;
2787 ndev->watchdog_timeo = 5 * HZ;
2788 ndev->stop = &emac_close;
2789 ndev->get_stats = &emac_stats;
2790 ndev->set_multicast_list = &emac_set_multicast_list;
2791 ndev->do_ioctl = &emac_ioctl;
2792 if (emac_phy_supports_gige(dev->phy_mode)) {
ee63d22b 2793 ndev->hard_start_xmit = &emac_start_xmit_sg;
1d3bb996
DG
2794 ndev->change_mtu = &emac_change_mtu;
2795 dev->commac.ops = &emac_commac_sg_ops;
ee63d22b
SR
2796 } else {
2797 ndev->hard_start_xmit = &emac_start_xmit;
1d3bb996
DG
2798 }
2799 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2800
2801 netif_carrier_off(ndev);
2802 netif_stop_queue(ndev);
2803
2804 err = register_netdev(ndev);
2805 if (err) {
2806 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2807 np->full_name, err);
2808 goto err_detach_tah;
2809 }
2810
2811 /* Set our drvdata last as we don't want them visible until we are
2812 * fully initialized
2813 */
2814 wmb();
2815 dev_set_drvdata(&ofdev->dev, dev);
2816
2817 /* There's a new kid in town ! Let's tell everybody */
2818 wake_up_all(&emac_probe_wait);
2819
2820
2821 printk(KERN_INFO
2822 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2823 ndev->name, dev->cell_index, np->full_name,
2824 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2825 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2826
2827 if (dev->phy.address >= 0)
2828 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2829 dev->phy.def->name, dev->phy.address);
2830
2831 emac_dbg_register(dev);
2832
2833 /* Life is good */
2834 return 0;
2835
2836 /* I have a bad feeling about this ... */
2837
2838 err_detach_tah:
2839 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2840 tah_detach(dev->tah_dev, dev->tah_port);
2841 err_detach_rgmii:
2842 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2843 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2844 err_detach_zmii:
2845 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2846 zmii_detach(dev->zmii_dev, dev->zmii_port);
2847 err_unreg_commac:
2848 mal_unregister_commac(dev->mal, &dev->commac);
2849 err_rel_deps:
2850 emac_put_deps(dev);
2851 err_reg_unmap:
2852 iounmap(dev->emacp);
2853 err_irq_unmap:
2854 if (dev->wol_irq != NO_IRQ)
2855 irq_dispose_mapping(dev->wol_irq);
2856 if (dev->emac_irq != NO_IRQ)
2857 irq_dispose_mapping(dev->emac_irq);
2858 err_free:
2859 kfree(ndev);
2860 err_gone:
2861 /* if we were on the bootlist, remove us as we won't show up and
2862 * wake up all waiters to notify them in case they were waiting
2863 * on us
2864 */
2865 if (blist) {
2866 *blist = NULL;
2867 wake_up_all(&emac_probe_wait);
2868 }
2869 return err;
2870}
2871
2872static int __devexit emac_remove(struct of_device *ofdev)
2873{
2874 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2875
2876 DBG(dev, "remove" NL);
2877
2878 dev_set_drvdata(&ofdev->dev, NULL);
2879
2880 unregister_netdev(dev->ndev);
2881
61dbcece
BH
2882 flush_scheduled_work();
2883
1d3bb996
DG
2884 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2885 tah_detach(dev->tah_dev, dev->tah_port);
2886 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2887 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2888 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2889 zmii_detach(dev->zmii_dev, dev->zmii_port);
2890
2891 mal_unregister_commac(dev->mal, &dev->commac);
2892 emac_put_deps(dev);
2893
2894 emac_dbg_unregister(dev);
2895 iounmap(dev->emacp);
2896
2897 if (dev->wol_irq != NO_IRQ)
2898 irq_dispose_mapping(dev->wol_irq);
2899 if (dev->emac_irq != NO_IRQ)
2900 irq_dispose_mapping(dev->emac_irq);
2901
2902 kfree(dev->ndev);
2903
2904 return 0;
2905}
2906
2907/* XXX Features in here should be replaced by properties... */
2908static struct of_device_id emac_match[] =
2909{
2910 {
2911 .type = "network",
2912 .compatible = "ibm,emac",
2913 },
2914 {
2915 .type = "network",
2916 .compatible = "ibm,emac4",
2917 },
05781ccd
GE
2918 {
2919 .type = "network",
2920 .compatible = "ibm,emac4sync",
2921 },
1d3bb996
DG
2922 {},
2923};
2924
2925static struct of_platform_driver emac_driver = {
2926 .name = "emac",
2927 .match_table = emac_match,
2928
2929 .probe = emac_probe,
2930 .remove = emac_remove,
2931};
2932
2933static void __init emac_make_bootlist(void)
2934{
2935 struct device_node *np = NULL;
2936 int j, max, i = 0, k;
2937 int cell_indices[EMAC_BOOT_LIST_SIZE];
2938
2939 /* Collect EMACs */
2940 while((np = of_find_all_nodes(np)) != NULL) {
2941 const u32 *idx;
2942
2943 if (of_match_node(emac_match, np) == NULL)
2944 continue;
2945 if (of_get_property(np, "unused", NULL))
2946 continue;
2947 idx = of_get_property(np, "cell-index", NULL);
2948 if (idx == NULL)
2949 continue;
2950 cell_indices[i] = *idx;
2951 emac_boot_list[i++] = of_node_get(np);
2952 if (i >= EMAC_BOOT_LIST_SIZE) {
2953 of_node_put(np);
2954 break;
2955 }
2956 }
2957 max = i;
2958
2959 /* Bubble sort them (doh, what a creative algorithm :-) */
2960 for (i = 0; max > 1 && (i < (max - 1)); i++)
2961 for (j = i; j < max; j++) {
2962 if (cell_indices[i] > cell_indices[j]) {
2963 np = emac_boot_list[i];
2964 emac_boot_list[i] = emac_boot_list[j];
2965 emac_boot_list[j] = np;
2966 k = cell_indices[i];
2967 cell_indices[i] = cell_indices[j];
2968 cell_indices[j] = k;
2969 }
2970 }
2971}
2972
2973static int __init emac_init(void)
2974{
2975 int rc;
2976
2977 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2978
2979 /* Init debug stuff */
2980 emac_init_debug();
2981
2982 /* Build EMAC boot list */
2983 emac_make_bootlist();
2984
2985 /* Init submodules */
2986 rc = mal_init();
2987 if (rc)
2988 goto err;
2989 rc = zmii_init();
2990 if (rc)
2991 goto err_mal;
2992 rc = rgmii_init();
2993 if (rc)
2994 goto err_zmii;
2995 rc = tah_init();
2996 if (rc)
2997 goto err_rgmii;
2998 rc = of_register_platform_driver(&emac_driver);
2999 if (rc)
3000 goto err_tah;
3001
3002 return 0;
3003
3004 err_tah:
3005 tah_exit();
3006 err_rgmii:
3007 rgmii_exit();
3008 err_zmii:
3009 zmii_exit();
3010 err_mal:
3011 mal_exit();
3012 err:
3013 return rc;
3014}
3015
3016static void __exit emac_exit(void)
3017{
3018 int i;
3019
3020 of_unregister_platform_driver(&emac_driver);
3021
3022 tah_exit();
3023 rgmii_exit();
3024 zmii_exit();
3025 mal_exit();
3026 emac_fini_debug();
3027
3028 /* Destroy EMAC boot list */
3029 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3030 if (emac_boot_list[i])
3031 of_node_put(emac_boot_list[i]);
3032}
3033
3034module_init(emac_init);
3035module_exit(emac_exit);
This page took 0.285169 seconds and 5 git commands to generate.