ibm_newemac: Parameterize EMAC Multicast Match Handling
[deliverable/linux.git] / drivers / net / ibm_newemac / core.c
1 /*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
41
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
46 #include <asm/dcr.h>
47 #include <asm/dcr-regs.h>
48
49 #include "core.h"
50
51 /*
52 * Lack of dma_unmap_???? calls is intentional.
53 *
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
66 */
67
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
71
72 MODULE_DESCRIPTION(DRV_DESC);
73 MODULE_AUTHOR
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
76
77 /*
78 * PPC64 doesn't (yet) have a cacheable_memcpy
79 */
80 #ifdef CONFIG_PPC64
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 #endif
83
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
86
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
89 */
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
91
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
95 *
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
99 */
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
102
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
105 */
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
107
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
115 * cell_index.
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 */
119
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
122
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
125
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
128 */
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
130 const char *error)
131 {
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
137 }
138
139 /* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
142 */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON HZ
163 #define PHY_POLL_LINK_OFF (HZ / 5)
164
165 /* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167 */
168 #define STOP_TIMEOUT_10 1230
169 #define STOP_TIMEOUT_100 124
170 #define STOP_TIMEOUT_1000 13
171 #define STOP_TIMEOUT_1000_JUMBO 73
172
173 static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193 "tx_errors"
194 };
195
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_TBI ||
205 phy_mode == PHY_MODE_RTBI;
206 }
207
208 static inline int emac_phy_gpcs(int phy_mode)
209 {
210 return phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
212 }
213
214 static inline void emac_tx_enable(struct emac_instance *dev)
215 {
216 struct emac_regs __iomem *p = dev->emacp;
217 u32 r;
218
219 DBG(dev, "tx_enable" NL);
220
221 r = in_be32(&p->mr0);
222 if (!(r & EMAC_MR0_TXE))
223 out_be32(&p->mr0, r | EMAC_MR0_TXE);
224 }
225
226 static void emac_tx_disable(struct emac_instance *dev)
227 {
228 struct emac_regs __iomem *p = dev->emacp;
229 u32 r;
230
231 DBG(dev, "tx_disable" NL);
232
233 r = in_be32(&p->mr0);
234 if (r & EMAC_MR0_TXE) {
235 int n = dev->stop_timeout;
236 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
238 udelay(1);
239 --n;
240 }
241 if (unlikely(!n))
242 emac_report_timeout_error(dev, "TX disable timeout");
243 }
244 }
245
246 static void emac_rx_enable(struct emac_instance *dev)
247 {
248 struct emac_regs __iomem *p = dev->emacp;
249 u32 r;
250
251 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
252 goto out;
253
254 DBG(dev, "rx_enable" NL);
255
256 r = in_be32(&p->mr0);
257 if (!(r & EMAC_MR0_RXE)) {
258 if (unlikely(!(r & EMAC_MR0_RXI))) {
259 /* Wait if previous async disable is still in progress */
260 int n = dev->stop_timeout;
261 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
262 udelay(1);
263 --n;
264 }
265 if (unlikely(!n))
266 emac_report_timeout_error(dev,
267 "RX disable timeout");
268 }
269 out_be32(&p->mr0, r | EMAC_MR0_RXE);
270 }
271 out:
272 ;
273 }
274
275 static void emac_rx_disable(struct emac_instance *dev)
276 {
277 struct emac_regs __iomem *p = dev->emacp;
278 u32 r;
279
280 DBG(dev, "rx_disable" NL);
281
282 r = in_be32(&p->mr0);
283 if (r & EMAC_MR0_RXE) {
284 int n = dev->stop_timeout;
285 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
287 udelay(1);
288 --n;
289 }
290 if (unlikely(!n))
291 emac_report_timeout_error(dev, "RX disable timeout");
292 }
293 }
294
295 static inline void emac_netif_stop(struct emac_instance *dev)
296 {
297 netif_tx_lock_bh(dev->ndev);
298 dev->no_mcast = 1;
299 netif_tx_unlock_bh(dev->ndev);
300 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
301 mal_poll_disable(dev->mal, &dev->commac);
302 netif_tx_disable(dev->ndev);
303 }
304
305 static inline void emac_netif_start(struct emac_instance *dev)
306 {
307 netif_tx_lock_bh(dev->ndev);
308 dev->no_mcast = 0;
309 if (dev->mcast_pending && netif_running(dev->ndev))
310 __emac_set_multicast_list(dev);
311 netif_tx_unlock_bh(dev->ndev);
312
313 netif_wake_queue(dev->ndev);
314
315 /* NOTE: unconditional netif_wake_queue is only appropriate
316 * so long as all callers are assured to have free tx slots
317 * (taken from tg3... though the case where that is wrong is
318 * not terribly harmful)
319 */
320 mal_poll_enable(dev->mal, &dev->commac);
321 }
322
323 static inline void emac_rx_disable_async(struct emac_instance *dev)
324 {
325 struct emac_regs __iomem *p = dev->emacp;
326 u32 r;
327
328 DBG(dev, "rx_disable_async" NL);
329
330 r = in_be32(&p->mr0);
331 if (r & EMAC_MR0_RXE)
332 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
333 }
334
335 static int emac_reset(struct emac_instance *dev)
336 {
337 struct emac_regs __iomem *p = dev->emacp;
338 int n = 20;
339
340 DBG(dev, "reset" NL);
341
342 if (!dev->reset_failed) {
343 /* 40x erratum suggests stopping RX channel before reset,
344 * we stop TX as well
345 */
346 emac_rx_disable(dev);
347 emac_tx_disable(dev);
348 }
349
350 out_be32(&p->mr0, EMAC_MR0_SRST);
351 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
352 --n;
353
354 if (n) {
355 dev->reset_failed = 0;
356 return 0;
357 } else {
358 emac_report_timeout_error(dev, "reset timeout");
359 dev->reset_failed = 1;
360 return -ETIMEDOUT;
361 }
362 }
363
364 static void emac_hash_mc(struct emac_instance *dev)
365 {
366 const int regs = EMAC_XAHT_REGS(dev);
367 u32 *gaht_base = emac_gaht_base(dev);
368 u32 gaht_temp[regs];
369 struct dev_mc_list *dmi;
370 int i;
371
372 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
373
374 memset(gaht_temp, 0, sizeof (gaht_temp));
375
376 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
377 int slot, reg, mask;
378 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
379 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
380 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
381
382 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
383 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
384 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
385
386 gaht_temp[reg] |= mask;
387 }
388
389 for (i = 0; i < regs; i++)
390 out_be32(gaht_base + i, gaht_temp[i]);
391 }
392
393 static inline u32 emac_iff2rmr(struct net_device *ndev)
394 {
395 struct emac_instance *dev = netdev_priv(ndev);
396 u32 r;
397
398 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
399
400 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
401 r |= EMAC4_RMR_BASE;
402 else
403 r |= EMAC_RMR_BASE;
404
405 if (ndev->flags & IFF_PROMISC)
406 r |= EMAC_RMR_PME;
407 else if (ndev->flags & IFF_ALLMULTI ||
408 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
409 r |= EMAC_RMR_PMME;
410 else if (ndev->mc_count > 0)
411 r |= EMAC_RMR_MAE;
412
413 return r;
414 }
415
416 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
417 {
418 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
419
420 DBG2(dev, "__emac_calc_base_mr1" NL);
421
422 switch(tx_size) {
423 case 2048:
424 ret |= EMAC_MR1_TFS_2K;
425 break;
426 default:
427 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
428 dev->ndev->name, tx_size);
429 }
430
431 switch(rx_size) {
432 case 16384:
433 ret |= EMAC_MR1_RFS_16K;
434 break;
435 case 4096:
436 ret |= EMAC_MR1_RFS_4K;
437 break;
438 default:
439 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
440 dev->ndev->name, rx_size);
441 }
442
443 return ret;
444 }
445
446 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
447 {
448 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
449 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
450
451 DBG2(dev, "__emac4_calc_base_mr1" NL);
452
453 switch(tx_size) {
454 case 4096:
455 ret |= EMAC4_MR1_TFS_4K;
456 break;
457 case 2048:
458 ret |= EMAC4_MR1_TFS_2K;
459 break;
460 default:
461 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
462 dev->ndev->name, tx_size);
463 }
464
465 switch(rx_size) {
466 case 16384:
467 ret |= EMAC4_MR1_RFS_16K;
468 break;
469 case 4096:
470 ret |= EMAC4_MR1_RFS_4K;
471 break;
472 case 2048:
473 ret |= EMAC4_MR1_RFS_2K;
474 break;
475 default:
476 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
477 dev->ndev->name, rx_size);
478 }
479
480 return ret;
481 }
482
483 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
484 {
485 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
486 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
487 __emac_calc_base_mr1(dev, tx_size, rx_size);
488 }
489
490 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
491 {
492 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
493 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
494 else
495 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
496 }
497
498 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
499 unsigned int low, unsigned int high)
500 {
501 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
502 return (low << 22) | ( (high & 0x3ff) << 6);
503 else
504 return (low << 23) | ( (high & 0x1ff) << 7);
505 }
506
507 static int emac_configure(struct emac_instance *dev)
508 {
509 struct emac_regs __iomem *p = dev->emacp;
510 struct net_device *ndev = dev->ndev;
511 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
512 u32 r, mr1 = 0;
513
514 DBG(dev, "configure" NL);
515
516 if (!link) {
517 out_be32(&p->mr1, in_be32(&p->mr1)
518 | EMAC_MR1_FDE | EMAC_MR1_ILE);
519 udelay(100);
520 } else if (emac_reset(dev) < 0)
521 return -ETIMEDOUT;
522
523 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
524 tah_reset(dev->tah_dev);
525
526 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
527 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
528
529 /* Default fifo sizes */
530 tx_size = dev->tx_fifo_size;
531 rx_size = dev->rx_fifo_size;
532
533 /* No link, force loopback */
534 if (!link)
535 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
536
537 /* Check for full duplex */
538 else if (dev->phy.duplex == DUPLEX_FULL)
539 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
540
541 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
542 dev->stop_timeout = STOP_TIMEOUT_10;
543 switch (dev->phy.speed) {
544 case SPEED_1000:
545 if (emac_phy_gpcs(dev->phy.mode)) {
546 mr1 |= EMAC_MR1_MF_1000GPCS |
547 EMAC_MR1_MF_IPPA(dev->phy.address);
548
549 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
550 * identify this GPCS PHY later.
551 */
552 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
553 } else
554 mr1 |= EMAC_MR1_MF_1000;
555
556 /* Extended fifo sizes */
557 tx_size = dev->tx_fifo_size_gige;
558 rx_size = dev->rx_fifo_size_gige;
559
560 if (dev->ndev->mtu > ETH_DATA_LEN) {
561 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
562 mr1 |= EMAC4_MR1_JPSM;
563 else
564 mr1 |= EMAC_MR1_JPSM;
565 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
566 } else
567 dev->stop_timeout = STOP_TIMEOUT_1000;
568 break;
569 case SPEED_100:
570 mr1 |= EMAC_MR1_MF_100;
571 dev->stop_timeout = STOP_TIMEOUT_100;
572 break;
573 default: /* make gcc happy */
574 break;
575 }
576
577 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
578 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
579 dev->phy.speed);
580 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
581 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
582
583 /* on 40x erratum forces us to NOT use integrated flow control,
584 * let's hope it works on 44x ;)
585 */
586 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
587 dev->phy.duplex == DUPLEX_FULL) {
588 if (dev->phy.pause)
589 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
590 else if (dev->phy.asym_pause)
591 mr1 |= EMAC_MR1_APP;
592 }
593
594 /* Add base settings & fifo sizes & program MR1 */
595 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
596 out_be32(&p->mr1, mr1);
597
598 /* Set individual MAC address */
599 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
600 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
601 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
602 ndev->dev_addr[5]);
603
604 /* VLAN Tag Protocol ID */
605 out_be32(&p->vtpid, 0x8100);
606
607 /* Receive mode register */
608 r = emac_iff2rmr(ndev);
609 if (r & EMAC_RMR_MAE)
610 emac_hash_mc(dev);
611 out_be32(&p->rmr, r);
612
613 /* FIFOs thresholds */
614 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
615 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
616 tx_size / 2 / dev->fifo_entry_size);
617 else
618 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
619 tx_size / 2 / dev->fifo_entry_size);
620 out_be32(&p->tmr1, r);
621 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
622
623 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
624 there should be still enough space in FIFO to allow the our link
625 partner time to process this frame and also time to send PAUSE
626 frame itself.
627
628 Here is the worst case scenario for the RX FIFO "headroom"
629 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
630
631 1) One maximum-length frame on TX 1522 bytes
632 2) One PAUSE frame time 64 bytes
633 3) PAUSE frame decode time allowance 64 bytes
634 4) One maximum-length frame on RX 1522 bytes
635 5) Round-trip propagation delay of the link (100Mb) 15 bytes
636 ----------
637 3187 bytes
638
639 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
640 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
641 */
642 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
643 rx_size / 4 / dev->fifo_entry_size);
644 out_be32(&p->rwmr, r);
645
646 /* Set PAUSE timer to the maximum */
647 out_be32(&p->ptr, 0xffff);
648
649 /* IRQ sources */
650 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
651 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
652 EMAC_ISR_IRE | EMAC_ISR_TE;
653 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
654 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
655 EMAC4_ISR_RXOE | */;
656 out_be32(&p->iser, r);
657
658 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
659 if (emac_phy_gpcs(dev->phy.mode))
660 emac_mii_reset_phy(&dev->phy);
661
662 /* Required for Pause packet support in EMAC */
663 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
664
665 return 0;
666 }
667
668 static void emac_reinitialize(struct emac_instance *dev)
669 {
670 DBG(dev, "reinitialize" NL);
671
672 emac_netif_stop(dev);
673 if (!emac_configure(dev)) {
674 emac_tx_enable(dev);
675 emac_rx_enable(dev);
676 }
677 emac_netif_start(dev);
678 }
679
680 static void emac_full_tx_reset(struct emac_instance *dev)
681 {
682 DBG(dev, "full_tx_reset" NL);
683
684 emac_tx_disable(dev);
685 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
686 emac_clean_tx_ring(dev);
687 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
688
689 emac_configure(dev);
690
691 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
692 emac_tx_enable(dev);
693 emac_rx_enable(dev);
694 }
695
696 static void emac_reset_work(struct work_struct *work)
697 {
698 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
699
700 DBG(dev, "reset_work" NL);
701
702 mutex_lock(&dev->link_lock);
703 if (dev->opened) {
704 emac_netif_stop(dev);
705 emac_full_tx_reset(dev);
706 emac_netif_start(dev);
707 }
708 mutex_unlock(&dev->link_lock);
709 }
710
711 static void emac_tx_timeout(struct net_device *ndev)
712 {
713 struct emac_instance *dev = netdev_priv(ndev);
714
715 DBG(dev, "tx_timeout" NL);
716
717 schedule_work(&dev->reset_work);
718 }
719
720
721 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
722 {
723 int done = !!(stacr & EMAC_STACR_OC);
724
725 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
726 done = !done;
727
728 return done;
729 };
730
731 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
732 {
733 struct emac_regs __iomem *p = dev->emacp;
734 u32 r = 0;
735 int n, err = -ETIMEDOUT;
736
737 mutex_lock(&dev->mdio_lock);
738
739 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
740
741 /* Enable proper MDIO port */
742 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
743 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
744 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
745 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
746
747 /* Wait for management interface to become idle */
748 n = 20;
749 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
750 udelay(1);
751 if (!--n) {
752 DBG2(dev, " -> timeout wait idle\n");
753 goto bail;
754 }
755 }
756
757 /* Issue read command */
758 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
759 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
760 else
761 r = EMAC_STACR_BASE(dev->opb_bus_freq);
762 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
763 r |= EMAC_STACR_OC;
764 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
765 r |= EMACX_STACR_STAC_READ;
766 else
767 r |= EMAC_STACR_STAC_READ;
768 r |= (reg & EMAC_STACR_PRA_MASK)
769 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
770 out_be32(&p->stacr, r);
771
772 /* Wait for read to complete */
773 n = 200;
774 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
775 udelay(1);
776 if (!--n) {
777 DBG2(dev, " -> timeout wait complete\n");
778 goto bail;
779 }
780 }
781
782 if (unlikely(r & EMAC_STACR_PHYE)) {
783 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
784 err = -EREMOTEIO;
785 goto bail;
786 }
787
788 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
789
790 DBG2(dev, "mdio_read -> %04x" NL, r);
791 err = 0;
792 bail:
793 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
794 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
795 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
796 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
797 mutex_unlock(&dev->mdio_lock);
798
799 return err == 0 ? r : err;
800 }
801
802 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
803 u16 val)
804 {
805 struct emac_regs __iomem *p = dev->emacp;
806 u32 r = 0;
807 int n, err = -ETIMEDOUT;
808
809 mutex_lock(&dev->mdio_lock);
810
811 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
812
813 /* Enable proper MDIO port */
814 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
815 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
816 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
817 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
818
819 /* Wait for management interface to be idle */
820 n = 20;
821 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
822 udelay(1);
823 if (!--n) {
824 DBG2(dev, " -> timeout wait idle\n");
825 goto bail;
826 }
827 }
828
829 /* Issue write command */
830 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
831 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
832 else
833 r = EMAC_STACR_BASE(dev->opb_bus_freq);
834 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
835 r |= EMAC_STACR_OC;
836 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
837 r |= EMACX_STACR_STAC_WRITE;
838 else
839 r |= EMAC_STACR_STAC_WRITE;
840 r |= (reg & EMAC_STACR_PRA_MASK) |
841 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
842 (val << EMAC_STACR_PHYD_SHIFT);
843 out_be32(&p->stacr, r);
844
845 /* Wait for write to complete */
846 n = 200;
847 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
848 udelay(1);
849 if (!--n) {
850 DBG2(dev, " -> timeout wait complete\n");
851 goto bail;
852 }
853 }
854 err = 0;
855 bail:
856 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
857 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
858 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
859 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
860 mutex_unlock(&dev->mdio_lock);
861 }
862
863 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
864 {
865 struct emac_instance *dev = netdev_priv(ndev);
866 int res;
867
868 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
869 (u8) id, (u8) reg);
870 return res;
871 }
872
873 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
874 {
875 struct emac_instance *dev = netdev_priv(ndev);
876
877 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
878 (u8) id, (u8) reg, (u16) val);
879 }
880
881 /* Tx lock BH */
882 static void __emac_set_multicast_list(struct emac_instance *dev)
883 {
884 struct emac_regs __iomem *p = dev->emacp;
885 u32 rmr = emac_iff2rmr(dev->ndev);
886
887 DBG(dev, "__multicast %08x" NL, rmr);
888
889 /* I decided to relax register access rules here to avoid
890 * full EMAC reset.
891 *
892 * There is a real problem with EMAC4 core if we use MWSW_001 bit
893 * in MR1 register and do a full EMAC reset.
894 * One TX BD status update is delayed and, after EMAC reset, it
895 * never happens, resulting in TX hung (it'll be recovered by TX
896 * timeout handler eventually, but this is just gross).
897 * So we either have to do full TX reset or try to cheat here :)
898 *
899 * The only required change is to RX mode register, so I *think* all
900 * we need is just to stop RX channel. This seems to work on all
901 * tested SoCs. --ebs
902 *
903 * If we need the full reset, we might just trigger the workqueue
904 * and do it async... a bit nasty but should work --BenH
905 */
906 dev->mcast_pending = 0;
907 emac_rx_disable(dev);
908 if (rmr & EMAC_RMR_MAE)
909 emac_hash_mc(dev);
910 out_be32(&p->rmr, rmr);
911 emac_rx_enable(dev);
912 }
913
914 /* Tx lock BH */
915 static void emac_set_multicast_list(struct net_device *ndev)
916 {
917 struct emac_instance *dev = netdev_priv(ndev);
918
919 DBG(dev, "multicast" NL);
920
921 BUG_ON(!netif_running(dev->ndev));
922
923 if (dev->no_mcast) {
924 dev->mcast_pending = 1;
925 return;
926 }
927 __emac_set_multicast_list(dev);
928 }
929
930 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
931 {
932 int rx_sync_size = emac_rx_sync_size(new_mtu);
933 int rx_skb_size = emac_rx_skb_size(new_mtu);
934 int i, ret = 0;
935
936 mutex_lock(&dev->link_lock);
937 emac_netif_stop(dev);
938 emac_rx_disable(dev);
939 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
940
941 if (dev->rx_sg_skb) {
942 ++dev->estats.rx_dropped_resize;
943 dev_kfree_skb(dev->rx_sg_skb);
944 dev->rx_sg_skb = NULL;
945 }
946
947 /* Make a first pass over RX ring and mark BDs ready, dropping
948 * non-processed packets on the way. We need this as a separate pass
949 * to simplify error recovery in the case of allocation failure later.
950 */
951 for (i = 0; i < NUM_RX_BUFF; ++i) {
952 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
953 ++dev->estats.rx_dropped_resize;
954
955 dev->rx_desc[i].data_len = 0;
956 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
957 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
958 }
959
960 /* Reallocate RX ring only if bigger skb buffers are required */
961 if (rx_skb_size <= dev->rx_skb_size)
962 goto skip;
963
964 /* Second pass, allocate new skbs */
965 for (i = 0; i < NUM_RX_BUFF; ++i) {
966 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
967 if (!skb) {
968 ret = -ENOMEM;
969 goto oom;
970 }
971
972 BUG_ON(!dev->rx_skb[i]);
973 dev_kfree_skb(dev->rx_skb[i]);
974
975 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
976 dev->rx_desc[i].data_ptr =
977 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
978 DMA_FROM_DEVICE) + 2;
979 dev->rx_skb[i] = skb;
980 }
981 skip:
982 /* Check if we need to change "Jumbo" bit in MR1 */
983 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
984 /* This is to prevent starting RX channel in emac_rx_enable() */
985 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
986
987 dev->ndev->mtu = new_mtu;
988 emac_full_tx_reset(dev);
989 }
990
991 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
992 oom:
993 /* Restart RX */
994 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
995 dev->rx_slot = 0;
996 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
997 emac_rx_enable(dev);
998 emac_netif_start(dev);
999 mutex_unlock(&dev->link_lock);
1000
1001 return ret;
1002 }
1003
1004 /* Process ctx, rtnl_lock semaphore */
1005 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1006 {
1007 struct emac_instance *dev = netdev_priv(ndev);
1008 int ret = 0;
1009
1010 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1011 return -EINVAL;
1012
1013 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1014
1015 if (netif_running(ndev)) {
1016 /* Check if we really need to reinitalize RX ring */
1017 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1018 ret = emac_resize_rx_ring(dev, new_mtu);
1019 }
1020
1021 if (!ret) {
1022 ndev->mtu = new_mtu;
1023 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1024 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1025 }
1026
1027 return ret;
1028 }
1029
1030 static void emac_clean_tx_ring(struct emac_instance *dev)
1031 {
1032 int i;
1033
1034 for (i = 0; i < NUM_TX_BUFF; ++i) {
1035 if (dev->tx_skb[i]) {
1036 dev_kfree_skb(dev->tx_skb[i]);
1037 dev->tx_skb[i] = NULL;
1038 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1039 ++dev->estats.tx_dropped;
1040 }
1041 dev->tx_desc[i].ctrl = 0;
1042 dev->tx_desc[i].data_ptr = 0;
1043 }
1044 }
1045
1046 static void emac_clean_rx_ring(struct emac_instance *dev)
1047 {
1048 int i;
1049
1050 for (i = 0; i < NUM_RX_BUFF; ++i)
1051 if (dev->rx_skb[i]) {
1052 dev->rx_desc[i].ctrl = 0;
1053 dev_kfree_skb(dev->rx_skb[i]);
1054 dev->rx_skb[i] = NULL;
1055 dev->rx_desc[i].data_ptr = 0;
1056 }
1057
1058 if (dev->rx_sg_skb) {
1059 dev_kfree_skb(dev->rx_sg_skb);
1060 dev->rx_sg_skb = NULL;
1061 }
1062 }
1063
1064 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1065 gfp_t flags)
1066 {
1067 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1068 if (unlikely(!skb))
1069 return -ENOMEM;
1070
1071 dev->rx_skb[slot] = skb;
1072 dev->rx_desc[slot].data_len = 0;
1073
1074 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1075 dev->rx_desc[slot].data_ptr =
1076 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1077 DMA_FROM_DEVICE) + 2;
1078 wmb();
1079 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1080 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1081
1082 return 0;
1083 }
1084
1085 static void emac_print_link_status(struct emac_instance *dev)
1086 {
1087 if (netif_carrier_ok(dev->ndev))
1088 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1089 dev->ndev->name, dev->phy.speed,
1090 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1091 dev->phy.pause ? ", pause enabled" :
1092 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1093 else
1094 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1095 }
1096
1097 /* Process ctx, rtnl_lock semaphore */
1098 static int emac_open(struct net_device *ndev)
1099 {
1100 struct emac_instance *dev = netdev_priv(ndev);
1101 int err, i;
1102
1103 DBG(dev, "open" NL);
1104
1105 /* Setup error IRQ handler */
1106 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1107 if (err) {
1108 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1109 ndev->name, dev->emac_irq);
1110 return err;
1111 }
1112
1113 /* Allocate RX ring */
1114 for (i = 0; i < NUM_RX_BUFF; ++i)
1115 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1116 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1117 ndev->name);
1118 goto oom;
1119 }
1120
1121 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1122 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1123 dev->rx_sg_skb = NULL;
1124
1125 mutex_lock(&dev->link_lock);
1126 dev->opened = 1;
1127
1128 /* Start PHY polling now.
1129 */
1130 if (dev->phy.address >= 0) {
1131 int link_poll_interval;
1132 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1133 dev->phy.def->ops->read_link(&dev->phy);
1134 emac_rx_clk_default(dev);
1135 netif_carrier_on(dev->ndev);
1136 link_poll_interval = PHY_POLL_LINK_ON;
1137 } else {
1138 emac_rx_clk_tx(dev);
1139 netif_carrier_off(dev->ndev);
1140 link_poll_interval = PHY_POLL_LINK_OFF;
1141 }
1142 dev->link_polling = 1;
1143 wmb();
1144 schedule_delayed_work(&dev->link_work, link_poll_interval);
1145 emac_print_link_status(dev);
1146 } else
1147 netif_carrier_on(dev->ndev);
1148
1149 emac_configure(dev);
1150 mal_poll_add(dev->mal, &dev->commac);
1151 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1152 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1153 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1154 emac_tx_enable(dev);
1155 emac_rx_enable(dev);
1156 emac_netif_start(dev);
1157
1158 mutex_unlock(&dev->link_lock);
1159
1160 return 0;
1161 oom:
1162 emac_clean_rx_ring(dev);
1163 free_irq(dev->emac_irq, dev);
1164
1165 return -ENOMEM;
1166 }
1167
1168 /* BHs disabled */
1169 #if 0
1170 static int emac_link_differs(struct emac_instance *dev)
1171 {
1172 u32 r = in_be32(&dev->emacp->mr1);
1173
1174 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1175 int speed, pause, asym_pause;
1176
1177 if (r & EMAC_MR1_MF_1000)
1178 speed = SPEED_1000;
1179 else if (r & EMAC_MR1_MF_100)
1180 speed = SPEED_100;
1181 else
1182 speed = SPEED_10;
1183
1184 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1185 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1186 pause = 1;
1187 asym_pause = 0;
1188 break;
1189 case EMAC_MR1_APP:
1190 pause = 0;
1191 asym_pause = 1;
1192 break;
1193 default:
1194 pause = asym_pause = 0;
1195 }
1196 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1197 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1198 }
1199 #endif
1200
1201 static void emac_link_timer(struct work_struct *work)
1202 {
1203 struct emac_instance *dev =
1204 container_of((struct delayed_work *)work,
1205 struct emac_instance, link_work);
1206 int link_poll_interval;
1207
1208 mutex_lock(&dev->link_lock);
1209 DBG2(dev, "link timer" NL);
1210
1211 if (!dev->opened)
1212 goto bail;
1213
1214 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1215 if (!netif_carrier_ok(dev->ndev)) {
1216 emac_rx_clk_default(dev);
1217 /* Get new link parameters */
1218 dev->phy.def->ops->read_link(&dev->phy);
1219
1220 netif_carrier_on(dev->ndev);
1221 emac_netif_stop(dev);
1222 emac_full_tx_reset(dev);
1223 emac_netif_start(dev);
1224 emac_print_link_status(dev);
1225 }
1226 link_poll_interval = PHY_POLL_LINK_ON;
1227 } else {
1228 if (netif_carrier_ok(dev->ndev)) {
1229 emac_rx_clk_tx(dev);
1230 netif_carrier_off(dev->ndev);
1231 netif_tx_disable(dev->ndev);
1232 emac_reinitialize(dev);
1233 emac_print_link_status(dev);
1234 }
1235 link_poll_interval = PHY_POLL_LINK_OFF;
1236 }
1237 schedule_delayed_work(&dev->link_work, link_poll_interval);
1238 bail:
1239 mutex_unlock(&dev->link_lock);
1240 }
1241
1242 static void emac_force_link_update(struct emac_instance *dev)
1243 {
1244 netif_carrier_off(dev->ndev);
1245 smp_rmb();
1246 if (dev->link_polling) {
1247 cancel_rearming_delayed_work(&dev->link_work);
1248 if (dev->link_polling)
1249 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1250 }
1251 }
1252
1253 /* Process ctx, rtnl_lock semaphore */
1254 static int emac_close(struct net_device *ndev)
1255 {
1256 struct emac_instance *dev = netdev_priv(ndev);
1257
1258 DBG(dev, "close" NL);
1259
1260 if (dev->phy.address >= 0) {
1261 dev->link_polling = 0;
1262 cancel_rearming_delayed_work(&dev->link_work);
1263 }
1264 mutex_lock(&dev->link_lock);
1265 emac_netif_stop(dev);
1266 dev->opened = 0;
1267 mutex_unlock(&dev->link_lock);
1268
1269 emac_rx_disable(dev);
1270 emac_tx_disable(dev);
1271 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1272 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1273 mal_poll_del(dev->mal, &dev->commac);
1274
1275 emac_clean_tx_ring(dev);
1276 emac_clean_rx_ring(dev);
1277
1278 free_irq(dev->emac_irq, dev);
1279
1280 return 0;
1281 }
1282
1283 static inline u16 emac_tx_csum(struct emac_instance *dev,
1284 struct sk_buff *skb)
1285 {
1286 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1287 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1288 ++dev->stats.tx_packets_csum;
1289 return EMAC_TX_CTRL_TAH_CSUM;
1290 }
1291 return 0;
1292 }
1293
1294 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1295 {
1296 struct emac_regs __iomem *p = dev->emacp;
1297 struct net_device *ndev = dev->ndev;
1298
1299 /* Send the packet out. If the if makes a significant perf
1300 * difference, then we can store the TMR0 value in "dev"
1301 * instead
1302 */
1303 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1304 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1305 else
1306 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1307
1308 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1309 netif_stop_queue(ndev);
1310 DBG2(dev, "stopped TX queue" NL);
1311 }
1312
1313 ndev->trans_start = jiffies;
1314 ++dev->stats.tx_packets;
1315 dev->stats.tx_bytes += len;
1316
1317 return 0;
1318 }
1319
1320 /* Tx lock BH */
1321 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1322 {
1323 struct emac_instance *dev = netdev_priv(ndev);
1324 unsigned int len = skb->len;
1325 int slot;
1326
1327 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1328 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1329
1330 slot = dev->tx_slot++;
1331 if (dev->tx_slot == NUM_TX_BUFF) {
1332 dev->tx_slot = 0;
1333 ctrl |= MAL_TX_CTRL_WRAP;
1334 }
1335
1336 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1337
1338 dev->tx_skb[slot] = skb;
1339 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1340 skb->data, len,
1341 DMA_TO_DEVICE);
1342 dev->tx_desc[slot].data_len = (u16) len;
1343 wmb();
1344 dev->tx_desc[slot].ctrl = ctrl;
1345
1346 return emac_xmit_finish(dev, len);
1347 }
1348
1349 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1350 u32 pd, int len, int last, u16 base_ctrl)
1351 {
1352 while (1) {
1353 u16 ctrl = base_ctrl;
1354 int chunk = min(len, MAL_MAX_TX_SIZE);
1355 len -= chunk;
1356
1357 slot = (slot + 1) % NUM_TX_BUFF;
1358
1359 if (last && !len)
1360 ctrl |= MAL_TX_CTRL_LAST;
1361 if (slot == NUM_TX_BUFF - 1)
1362 ctrl |= MAL_TX_CTRL_WRAP;
1363
1364 dev->tx_skb[slot] = NULL;
1365 dev->tx_desc[slot].data_ptr = pd;
1366 dev->tx_desc[slot].data_len = (u16) chunk;
1367 dev->tx_desc[slot].ctrl = ctrl;
1368 ++dev->tx_cnt;
1369
1370 if (!len)
1371 break;
1372
1373 pd += chunk;
1374 }
1375 return slot;
1376 }
1377
1378 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1379 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1380 {
1381 struct emac_instance *dev = netdev_priv(ndev);
1382 int nr_frags = skb_shinfo(skb)->nr_frags;
1383 int len = skb->len, chunk;
1384 int slot, i;
1385 u16 ctrl;
1386 u32 pd;
1387
1388 /* This is common "fast" path */
1389 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1390 return emac_start_xmit(skb, ndev);
1391
1392 len -= skb->data_len;
1393
1394 /* Note, this is only an *estimation*, we can still run out of empty
1395 * slots because of the additional fragmentation into
1396 * MAL_MAX_TX_SIZE-sized chunks
1397 */
1398 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1399 goto stop_queue;
1400
1401 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1402 emac_tx_csum(dev, skb);
1403 slot = dev->tx_slot;
1404
1405 /* skb data */
1406 dev->tx_skb[slot] = NULL;
1407 chunk = min(len, MAL_MAX_TX_SIZE);
1408 dev->tx_desc[slot].data_ptr = pd =
1409 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1410 dev->tx_desc[slot].data_len = (u16) chunk;
1411 len -= chunk;
1412 if (unlikely(len))
1413 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1414 ctrl);
1415 /* skb fragments */
1416 for (i = 0; i < nr_frags; ++i) {
1417 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1418 len = frag->size;
1419
1420 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1421 goto undo_frame;
1422
1423 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1424 DMA_TO_DEVICE);
1425
1426 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1427 ctrl);
1428 }
1429
1430 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1431
1432 /* Attach skb to the last slot so we don't release it too early */
1433 dev->tx_skb[slot] = skb;
1434
1435 /* Send the packet out */
1436 if (dev->tx_slot == NUM_TX_BUFF - 1)
1437 ctrl |= MAL_TX_CTRL_WRAP;
1438 wmb();
1439 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1440 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1441
1442 return emac_xmit_finish(dev, skb->len);
1443
1444 undo_frame:
1445 /* Well, too bad. Our previous estimation was overly optimistic.
1446 * Undo everything.
1447 */
1448 while (slot != dev->tx_slot) {
1449 dev->tx_desc[slot].ctrl = 0;
1450 --dev->tx_cnt;
1451 if (--slot < 0)
1452 slot = NUM_TX_BUFF - 1;
1453 }
1454 ++dev->estats.tx_undo;
1455
1456 stop_queue:
1457 netif_stop_queue(ndev);
1458 DBG2(dev, "stopped TX queue" NL);
1459 return 1;
1460 }
1461
1462 /* Tx lock BHs */
1463 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1464 {
1465 struct emac_error_stats *st = &dev->estats;
1466
1467 DBG(dev, "BD TX error %04x" NL, ctrl);
1468
1469 ++st->tx_bd_errors;
1470 if (ctrl & EMAC_TX_ST_BFCS)
1471 ++st->tx_bd_bad_fcs;
1472 if (ctrl & EMAC_TX_ST_LCS)
1473 ++st->tx_bd_carrier_loss;
1474 if (ctrl & EMAC_TX_ST_ED)
1475 ++st->tx_bd_excessive_deferral;
1476 if (ctrl & EMAC_TX_ST_EC)
1477 ++st->tx_bd_excessive_collisions;
1478 if (ctrl & EMAC_TX_ST_LC)
1479 ++st->tx_bd_late_collision;
1480 if (ctrl & EMAC_TX_ST_MC)
1481 ++st->tx_bd_multple_collisions;
1482 if (ctrl & EMAC_TX_ST_SC)
1483 ++st->tx_bd_single_collision;
1484 if (ctrl & EMAC_TX_ST_UR)
1485 ++st->tx_bd_underrun;
1486 if (ctrl & EMAC_TX_ST_SQE)
1487 ++st->tx_bd_sqe;
1488 }
1489
1490 static void emac_poll_tx(void *param)
1491 {
1492 struct emac_instance *dev = param;
1493 u32 bad_mask;
1494
1495 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1496
1497 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1498 bad_mask = EMAC_IS_BAD_TX_TAH;
1499 else
1500 bad_mask = EMAC_IS_BAD_TX;
1501
1502 netif_tx_lock_bh(dev->ndev);
1503 if (dev->tx_cnt) {
1504 u16 ctrl;
1505 int slot = dev->ack_slot, n = 0;
1506 again:
1507 ctrl = dev->tx_desc[slot].ctrl;
1508 if (!(ctrl & MAL_TX_CTRL_READY)) {
1509 struct sk_buff *skb = dev->tx_skb[slot];
1510 ++n;
1511
1512 if (skb) {
1513 dev_kfree_skb(skb);
1514 dev->tx_skb[slot] = NULL;
1515 }
1516 slot = (slot + 1) % NUM_TX_BUFF;
1517
1518 if (unlikely(ctrl & bad_mask))
1519 emac_parse_tx_error(dev, ctrl);
1520
1521 if (--dev->tx_cnt)
1522 goto again;
1523 }
1524 if (n) {
1525 dev->ack_slot = slot;
1526 if (netif_queue_stopped(dev->ndev) &&
1527 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1528 netif_wake_queue(dev->ndev);
1529
1530 DBG2(dev, "tx %d pkts" NL, n);
1531 }
1532 }
1533 netif_tx_unlock_bh(dev->ndev);
1534 }
1535
1536 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1537 int len)
1538 {
1539 struct sk_buff *skb = dev->rx_skb[slot];
1540
1541 DBG2(dev, "recycle %d %d" NL, slot, len);
1542
1543 if (len)
1544 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1545 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1546
1547 dev->rx_desc[slot].data_len = 0;
1548 wmb();
1549 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1550 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1551 }
1552
1553 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1554 {
1555 struct emac_error_stats *st = &dev->estats;
1556
1557 DBG(dev, "BD RX error %04x" NL, ctrl);
1558
1559 ++st->rx_bd_errors;
1560 if (ctrl & EMAC_RX_ST_OE)
1561 ++st->rx_bd_overrun;
1562 if (ctrl & EMAC_RX_ST_BP)
1563 ++st->rx_bd_bad_packet;
1564 if (ctrl & EMAC_RX_ST_RP)
1565 ++st->rx_bd_runt_packet;
1566 if (ctrl & EMAC_RX_ST_SE)
1567 ++st->rx_bd_short_event;
1568 if (ctrl & EMAC_RX_ST_AE)
1569 ++st->rx_bd_alignment_error;
1570 if (ctrl & EMAC_RX_ST_BFCS)
1571 ++st->rx_bd_bad_fcs;
1572 if (ctrl & EMAC_RX_ST_PTL)
1573 ++st->rx_bd_packet_too_long;
1574 if (ctrl & EMAC_RX_ST_ORE)
1575 ++st->rx_bd_out_of_range;
1576 if (ctrl & EMAC_RX_ST_IRE)
1577 ++st->rx_bd_in_range;
1578 }
1579
1580 static inline void emac_rx_csum(struct emac_instance *dev,
1581 struct sk_buff *skb, u16 ctrl)
1582 {
1583 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1584 if (!ctrl && dev->tah_dev) {
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586 ++dev->stats.rx_packets_csum;
1587 }
1588 #endif
1589 }
1590
1591 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1592 {
1593 if (likely(dev->rx_sg_skb != NULL)) {
1594 int len = dev->rx_desc[slot].data_len;
1595 int tot_len = dev->rx_sg_skb->len + len;
1596
1597 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1598 ++dev->estats.rx_dropped_mtu;
1599 dev_kfree_skb(dev->rx_sg_skb);
1600 dev->rx_sg_skb = NULL;
1601 } else {
1602 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1603 dev->rx_skb[slot]->data, len);
1604 skb_put(dev->rx_sg_skb, len);
1605 emac_recycle_rx_skb(dev, slot, len);
1606 return 0;
1607 }
1608 }
1609 emac_recycle_rx_skb(dev, slot, 0);
1610 return -1;
1611 }
1612
1613 /* NAPI poll context */
1614 static int emac_poll_rx(void *param, int budget)
1615 {
1616 struct emac_instance *dev = param;
1617 int slot = dev->rx_slot, received = 0;
1618
1619 DBG2(dev, "poll_rx(%d)" NL, budget);
1620
1621 again:
1622 while (budget > 0) {
1623 int len;
1624 struct sk_buff *skb;
1625 u16 ctrl = dev->rx_desc[slot].ctrl;
1626
1627 if (ctrl & MAL_RX_CTRL_EMPTY)
1628 break;
1629
1630 skb = dev->rx_skb[slot];
1631 mb();
1632 len = dev->rx_desc[slot].data_len;
1633
1634 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1635 goto sg;
1636
1637 ctrl &= EMAC_BAD_RX_MASK;
1638 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1639 emac_parse_rx_error(dev, ctrl);
1640 ++dev->estats.rx_dropped_error;
1641 emac_recycle_rx_skb(dev, slot, 0);
1642 len = 0;
1643 goto next;
1644 }
1645
1646 if (len && len < EMAC_RX_COPY_THRESH) {
1647 struct sk_buff *copy_skb =
1648 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1649 if (unlikely(!copy_skb))
1650 goto oom;
1651
1652 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1653 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1654 len + 2);
1655 emac_recycle_rx_skb(dev, slot, len);
1656 skb = copy_skb;
1657 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1658 goto oom;
1659
1660 skb_put(skb, len);
1661 push_packet:
1662 skb->dev = dev->ndev;
1663 skb->protocol = eth_type_trans(skb, dev->ndev);
1664 emac_rx_csum(dev, skb, ctrl);
1665
1666 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1667 ++dev->estats.rx_dropped_stack;
1668 next:
1669 ++dev->stats.rx_packets;
1670 skip:
1671 dev->stats.rx_bytes += len;
1672 slot = (slot + 1) % NUM_RX_BUFF;
1673 --budget;
1674 ++received;
1675 continue;
1676 sg:
1677 if (ctrl & MAL_RX_CTRL_FIRST) {
1678 BUG_ON(dev->rx_sg_skb);
1679 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1680 DBG(dev, "rx OOM %d" NL, slot);
1681 ++dev->estats.rx_dropped_oom;
1682 emac_recycle_rx_skb(dev, slot, 0);
1683 } else {
1684 dev->rx_sg_skb = skb;
1685 skb_put(skb, len);
1686 }
1687 } else if (!emac_rx_sg_append(dev, slot) &&
1688 (ctrl & MAL_RX_CTRL_LAST)) {
1689
1690 skb = dev->rx_sg_skb;
1691 dev->rx_sg_skb = NULL;
1692
1693 ctrl &= EMAC_BAD_RX_MASK;
1694 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1695 emac_parse_rx_error(dev, ctrl);
1696 ++dev->estats.rx_dropped_error;
1697 dev_kfree_skb(skb);
1698 len = 0;
1699 } else
1700 goto push_packet;
1701 }
1702 goto skip;
1703 oom:
1704 DBG(dev, "rx OOM %d" NL, slot);
1705 /* Drop the packet and recycle skb */
1706 ++dev->estats.rx_dropped_oom;
1707 emac_recycle_rx_skb(dev, slot, 0);
1708 goto next;
1709 }
1710
1711 if (received) {
1712 DBG2(dev, "rx %d BDs" NL, received);
1713 dev->rx_slot = slot;
1714 }
1715
1716 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1717 mb();
1718 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1719 DBG2(dev, "rx restart" NL);
1720 received = 0;
1721 goto again;
1722 }
1723
1724 if (dev->rx_sg_skb) {
1725 DBG2(dev, "dropping partial rx packet" NL);
1726 ++dev->estats.rx_dropped_error;
1727 dev_kfree_skb(dev->rx_sg_skb);
1728 dev->rx_sg_skb = NULL;
1729 }
1730
1731 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1732 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1733 emac_rx_enable(dev);
1734 dev->rx_slot = 0;
1735 }
1736 return received;
1737 }
1738
1739 /* NAPI poll context */
1740 static int emac_peek_rx(void *param)
1741 {
1742 struct emac_instance *dev = param;
1743
1744 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1745 }
1746
1747 /* NAPI poll context */
1748 static int emac_peek_rx_sg(void *param)
1749 {
1750 struct emac_instance *dev = param;
1751
1752 int slot = dev->rx_slot;
1753 while (1) {
1754 u16 ctrl = dev->rx_desc[slot].ctrl;
1755 if (ctrl & MAL_RX_CTRL_EMPTY)
1756 return 0;
1757 else if (ctrl & MAL_RX_CTRL_LAST)
1758 return 1;
1759
1760 slot = (slot + 1) % NUM_RX_BUFF;
1761
1762 /* I'm just being paranoid here :) */
1763 if (unlikely(slot == dev->rx_slot))
1764 return 0;
1765 }
1766 }
1767
1768 /* Hard IRQ */
1769 static void emac_rxde(void *param)
1770 {
1771 struct emac_instance *dev = param;
1772
1773 ++dev->estats.rx_stopped;
1774 emac_rx_disable_async(dev);
1775 }
1776
1777 /* Hard IRQ */
1778 static irqreturn_t emac_irq(int irq, void *dev_instance)
1779 {
1780 struct emac_instance *dev = dev_instance;
1781 struct emac_regs __iomem *p = dev->emacp;
1782 struct emac_error_stats *st = &dev->estats;
1783 u32 isr;
1784
1785 spin_lock(&dev->lock);
1786
1787 isr = in_be32(&p->isr);
1788 out_be32(&p->isr, isr);
1789
1790 DBG(dev, "isr = %08x" NL, isr);
1791
1792 if (isr & EMAC4_ISR_TXPE)
1793 ++st->tx_parity;
1794 if (isr & EMAC4_ISR_RXPE)
1795 ++st->rx_parity;
1796 if (isr & EMAC4_ISR_TXUE)
1797 ++st->tx_underrun;
1798 if (isr & EMAC4_ISR_RXOE)
1799 ++st->rx_fifo_overrun;
1800 if (isr & EMAC_ISR_OVR)
1801 ++st->rx_overrun;
1802 if (isr & EMAC_ISR_BP)
1803 ++st->rx_bad_packet;
1804 if (isr & EMAC_ISR_RP)
1805 ++st->rx_runt_packet;
1806 if (isr & EMAC_ISR_SE)
1807 ++st->rx_short_event;
1808 if (isr & EMAC_ISR_ALE)
1809 ++st->rx_alignment_error;
1810 if (isr & EMAC_ISR_BFCS)
1811 ++st->rx_bad_fcs;
1812 if (isr & EMAC_ISR_PTLE)
1813 ++st->rx_packet_too_long;
1814 if (isr & EMAC_ISR_ORE)
1815 ++st->rx_out_of_range;
1816 if (isr & EMAC_ISR_IRE)
1817 ++st->rx_in_range;
1818 if (isr & EMAC_ISR_SQE)
1819 ++st->tx_sqe;
1820 if (isr & EMAC_ISR_TE)
1821 ++st->tx_errors;
1822
1823 spin_unlock(&dev->lock);
1824
1825 return IRQ_HANDLED;
1826 }
1827
1828 static struct net_device_stats *emac_stats(struct net_device *ndev)
1829 {
1830 struct emac_instance *dev = netdev_priv(ndev);
1831 struct emac_stats *st = &dev->stats;
1832 struct emac_error_stats *est = &dev->estats;
1833 struct net_device_stats *nst = &dev->nstats;
1834 unsigned long flags;
1835
1836 DBG2(dev, "stats" NL);
1837
1838 /* Compute "legacy" statistics */
1839 spin_lock_irqsave(&dev->lock, flags);
1840 nst->rx_packets = (unsigned long)st->rx_packets;
1841 nst->rx_bytes = (unsigned long)st->rx_bytes;
1842 nst->tx_packets = (unsigned long)st->tx_packets;
1843 nst->tx_bytes = (unsigned long)st->tx_bytes;
1844 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1845 est->rx_dropped_error +
1846 est->rx_dropped_resize +
1847 est->rx_dropped_mtu);
1848 nst->tx_dropped = (unsigned long)est->tx_dropped;
1849
1850 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1851 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1852 est->rx_fifo_overrun +
1853 est->rx_overrun);
1854 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1855 est->rx_alignment_error);
1856 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1857 est->rx_bad_fcs);
1858 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1859 est->rx_bd_short_event +
1860 est->rx_bd_packet_too_long +
1861 est->rx_bd_out_of_range +
1862 est->rx_bd_in_range +
1863 est->rx_runt_packet +
1864 est->rx_short_event +
1865 est->rx_packet_too_long +
1866 est->rx_out_of_range +
1867 est->rx_in_range);
1868
1869 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1870 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1871 est->tx_underrun);
1872 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1873 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1874 est->tx_bd_excessive_collisions +
1875 est->tx_bd_late_collision +
1876 est->tx_bd_multple_collisions);
1877 spin_unlock_irqrestore(&dev->lock, flags);
1878 return nst;
1879 }
1880
1881 static struct mal_commac_ops emac_commac_ops = {
1882 .poll_tx = &emac_poll_tx,
1883 .poll_rx = &emac_poll_rx,
1884 .peek_rx = &emac_peek_rx,
1885 .rxde = &emac_rxde,
1886 };
1887
1888 static struct mal_commac_ops emac_commac_sg_ops = {
1889 .poll_tx = &emac_poll_tx,
1890 .poll_rx = &emac_poll_rx,
1891 .peek_rx = &emac_peek_rx_sg,
1892 .rxde = &emac_rxde,
1893 };
1894
1895 /* Ethtool support */
1896 static int emac_ethtool_get_settings(struct net_device *ndev,
1897 struct ethtool_cmd *cmd)
1898 {
1899 struct emac_instance *dev = netdev_priv(ndev);
1900
1901 cmd->supported = dev->phy.features;
1902 cmd->port = PORT_MII;
1903 cmd->phy_address = dev->phy.address;
1904 cmd->transceiver =
1905 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1906
1907 mutex_lock(&dev->link_lock);
1908 cmd->advertising = dev->phy.advertising;
1909 cmd->autoneg = dev->phy.autoneg;
1910 cmd->speed = dev->phy.speed;
1911 cmd->duplex = dev->phy.duplex;
1912 mutex_unlock(&dev->link_lock);
1913
1914 return 0;
1915 }
1916
1917 static int emac_ethtool_set_settings(struct net_device *ndev,
1918 struct ethtool_cmd *cmd)
1919 {
1920 struct emac_instance *dev = netdev_priv(ndev);
1921 u32 f = dev->phy.features;
1922
1923 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1924 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1925
1926 /* Basic sanity checks */
1927 if (dev->phy.address < 0)
1928 return -EOPNOTSUPP;
1929 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1930 return -EINVAL;
1931 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1932 return -EINVAL;
1933 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1934 return -EINVAL;
1935
1936 if (cmd->autoneg == AUTONEG_DISABLE) {
1937 switch (cmd->speed) {
1938 case SPEED_10:
1939 if (cmd->duplex == DUPLEX_HALF
1940 && !(f & SUPPORTED_10baseT_Half))
1941 return -EINVAL;
1942 if (cmd->duplex == DUPLEX_FULL
1943 && !(f & SUPPORTED_10baseT_Full))
1944 return -EINVAL;
1945 break;
1946 case SPEED_100:
1947 if (cmd->duplex == DUPLEX_HALF
1948 && !(f & SUPPORTED_100baseT_Half))
1949 return -EINVAL;
1950 if (cmd->duplex == DUPLEX_FULL
1951 && !(f & SUPPORTED_100baseT_Full))
1952 return -EINVAL;
1953 break;
1954 case SPEED_1000:
1955 if (cmd->duplex == DUPLEX_HALF
1956 && !(f & SUPPORTED_1000baseT_Half))
1957 return -EINVAL;
1958 if (cmd->duplex == DUPLEX_FULL
1959 && !(f & SUPPORTED_1000baseT_Full))
1960 return -EINVAL;
1961 break;
1962 default:
1963 return -EINVAL;
1964 }
1965
1966 mutex_lock(&dev->link_lock);
1967 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1968 cmd->duplex);
1969 mutex_unlock(&dev->link_lock);
1970
1971 } else {
1972 if (!(f & SUPPORTED_Autoneg))
1973 return -EINVAL;
1974
1975 mutex_lock(&dev->link_lock);
1976 dev->phy.def->ops->setup_aneg(&dev->phy,
1977 (cmd->advertising & f) |
1978 (dev->phy.advertising &
1979 (ADVERTISED_Pause |
1980 ADVERTISED_Asym_Pause)));
1981 mutex_unlock(&dev->link_lock);
1982 }
1983 emac_force_link_update(dev);
1984
1985 return 0;
1986 }
1987
1988 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1989 struct ethtool_ringparam *rp)
1990 {
1991 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1992 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1993 }
1994
1995 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1996 struct ethtool_pauseparam *pp)
1997 {
1998 struct emac_instance *dev = netdev_priv(ndev);
1999
2000 mutex_lock(&dev->link_lock);
2001 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2002 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2003 pp->autoneg = 1;
2004
2005 if (dev->phy.duplex == DUPLEX_FULL) {
2006 if (dev->phy.pause)
2007 pp->rx_pause = pp->tx_pause = 1;
2008 else if (dev->phy.asym_pause)
2009 pp->tx_pause = 1;
2010 }
2011 mutex_unlock(&dev->link_lock);
2012 }
2013
2014 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2015 {
2016 struct emac_instance *dev = netdev_priv(ndev);
2017
2018 return dev->tah_dev != NULL;
2019 }
2020
2021 static int emac_get_regs_len(struct emac_instance *dev)
2022 {
2023 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2024 return sizeof(struct emac_ethtool_regs_subhdr) +
2025 EMAC4_ETHTOOL_REGS_SIZE(dev);
2026 else
2027 return sizeof(struct emac_ethtool_regs_subhdr) +
2028 EMAC_ETHTOOL_REGS_SIZE(dev);
2029 }
2030
2031 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2032 {
2033 struct emac_instance *dev = netdev_priv(ndev);
2034 int size;
2035
2036 size = sizeof(struct emac_ethtool_regs_hdr) +
2037 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2038 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2039 size += zmii_get_regs_len(dev->zmii_dev);
2040 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2041 size += rgmii_get_regs_len(dev->rgmii_dev);
2042 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2043 size += tah_get_regs_len(dev->tah_dev);
2044
2045 return size;
2046 }
2047
2048 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2049 {
2050 struct emac_ethtool_regs_subhdr *hdr = buf;
2051
2052 hdr->index = dev->cell_index;
2053 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2054 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2055 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2056 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2057 } else {
2058 hdr->version = EMAC_ETHTOOL_REGS_VER;
2059 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2060 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2061 }
2062 }
2063
2064 static void emac_ethtool_get_regs(struct net_device *ndev,
2065 struct ethtool_regs *regs, void *buf)
2066 {
2067 struct emac_instance *dev = netdev_priv(ndev);
2068 struct emac_ethtool_regs_hdr *hdr = buf;
2069
2070 hdr->components = 0;
2071 buf = hdr + 1;
2072
2073 buf = mal_dump_regs(dev->mal, buf);
2074 buf = emac_dump_regs(dev, buf);
2075 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2076 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2077 buf = zmii_dump_regs(dev->zmii_dev, buf);
2078 }
2079 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2080 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2081 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2082 }
2083 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2084 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2085 buf = tah_dump_regs(dev->tah_dev, buf);
2086 }
2087 }
2088
2089 static int emac_ethtool_nway_reset(struct net_device *ndev)
2090 {
2091 struct emac_instance *dev = netdev_priv(ndev);
2092 int res = 0;
2093
2094 DBG(dev, "nway_reset" NL);
2095
2096 if (dev->phy.address < 0)
2097 return -EOPNOTSUPP;
2098
2099 mutex_lock(&dev->link_lock);
2100 if (!dev->phy.autoneg) {
2101 res = -EINVAL;
2102 goto out;
2103 }
2104
2105 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2106 out:
2107 mutex_unlock(&dev->link_lock);
2108 emac_force_link_update(dev);
2109 return res;
2110 }
2111
2112 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2113 {
2114 return EMAC_ETHTOOL_STATS_COUNT;
2115 }
2116
2117 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2118 u8 * buf)
2119 {
2120 if (stringset == ETH_SS_STATS)
2121 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2122 }
2123
2124 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2125 struct ethtool_stats *estats,
2126 u64 * tmp_stats)
2127 {
2128 struct emac_instance *dev = netdev_priv(ndev);
2129
2130 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2131 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2132 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2133 }
2134
2135 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2136 struct ethtool_drvinfo *info)
2137 {
2138 struct emac_instance *dev = netdev_priv(ndev);
2139
2140 strcpy(info->driver, "ibm_emac");
2141 strcpy(info->version, DRV_VERSION);
2142 info->fw_version[0] = '\0';
2143 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2144 dev->cell_index, dev->ofdev->node->full_name);
2145 info->n_stats = emac_ethtool_get_stats_count(ndev);
2146 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2147 }
2148
2149 static const struct ethtool_ops emac_ethtool_ops = {
2150 .get_settings = emac_ethtool_get_settings,
2151 .set_settings = emac_ethtool_set_settings,
2152 .get_drvinfo = emac_ethtool_get_drvinfo,
2153
2154 .get_regs_len = emac_ethtool_get_regs_len,
2155 .get_regs = emac_ethtool_get_regs,
2156
2157 .nway_reset = emac_ethtool_nway_reset,
2158
2159 .get_ringparam = emac_ethtool_get_ringparam,
2160 .get_pauseparam = emac_ethtool_get_pauseparam,
2161
2162 .get_rx_csum = emac_ethtool_get_rx_csum,
2163
2164 .get_strings = emac_ethtool_get_strings,
2165 .get_stats_count = emac_ethtool_get_stats_count,
2166 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2167
2168 .get_link = ethtool_op_get_link,
2169 .get_tx_csum = ethtool_op_get_tx_csum,
2170 .get_sg = ethtool_op_get_sg,
2171 };
2172
2173 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2174 {
2175 struct emac_instance *dev = netdev_priv(ndev);
2176 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2177
2178 DBG(dev, "ioctl %08x" NL, cmd);
2179
2180 if (dev->phy.address < 0)
2181 return -EOPNOTSUPP;
2182
2183 switch (cmd) {
2184 case SIOCGMIIPHY:
2185 case SIOCDEVPRIVATE:
2186 data[0] = dev->phy.address;
2187 /* Fall through */
2188 case SIOCGMIIREG:
2189 case SIOCDEVPRIVATE + 1:
2190 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2191 return 0;
2192
2193 case SIOCSMIIREG:
2194 case SIOCDEVPRIVATE + 2:
2195 if (!capable(CAP_NET_ADMIN))
2196 return -EPERM;
2197 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2198 return 0;
2199 default:
2200 return -EOPNOTSUPP;
2201 }
2202 }
2203
2204 struct emac_depentry {
2205 u32 phandle;
2206 struct device_node *node;
2207 struct of_device *ofdev;
2208 void *drvdata;
2209 };
2210
2211 #define EMAC_DEP_MAL_IDX 0
2212 #define EMAC_DEP_ZMII_IDX 1
2213 #define EMAC_DEP_RGMII_IDX 2
2214 #define EMAC_DEP_TAH_IDX 3
2215 #define EMAC_DEP_MDIO_IDX 4
2216 #define EMAC_DEP_PREV_IDX 5
2217 #define EMAC_DEP_COUNT 6
2218
2219 static int __devinit emac_check_deps(struct emac_instance *dev,
2220 struct emac_depentry *deps)
2221 {
2222 int i, there = 0;
2223 struct device_node *np;
2224
2225 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2226 /* no dependency on that item, allright */
2227 if (deps[i].phandle == 0) {
2228 there++;
2229 continue;
2230 }
2231 /* special case for blist as the dependency might go away */
2232 if (i == EMAC_DEP_PREV_IDX) {
2233 np = *(dev->blist - 1);
2234 if (np == NULL) {
2235 deps[i].phandle = 0;
2236 there++;
2237 continue;
2238 }
2239 if (deps[i].node == NULL)
2240 deps[i].node = of_node_get(np);
2241 }
2242 if (deps[i].node == NULL)
2243 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2244 if (deps[i].node == NULL)
2245 continue;
2246 if (deps[i].ofdev == NULL)
2247 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2248 if (deps[i].ofdev == NULL)
2249 continue;
2250 if (deps[i].drvdata == NULL)
2251 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2252 if (deps[i].drvdata != NULL)
2253 there++;
2254 }
2255 return (there == EMAC_DEP_COUNT);
2256 }
2257
2258 static void emac_put_deps(struct emac_instance *dev)
2259 {
2260 if (dev->mal_dev)
2261 of_dev_put(dev->mal_dev);
2262 if (dev->zmii_dev)
2263 of_dev_put(dev->zmii_dev);
2264 if (dev->rgmii_dev)
2265 of_dev_put(dev->rgmii_dev);
2266 if (dev->mdio_dev)
2267 of_dev_put(dev->mdio_dev);
2268 if (dev->tah_dev)
2269 of_dev_put(dev->tah_dev);
2270 }
2271
2272 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2273 unsigned long action, void *data)
2274 {
2275 /* We are only intereted in device addition */
2276 if (action == BUS_NOTIFY_BOUND_DRIVER)
2277 wake_up_all(&emac_probe_wait);
2278 return 0;
2279 }
2280
2281 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2282 .notifier_call = emac_of_bus_notify
2283 };
2284
2285 static int __devinit emac_wait_deps(struct emac_instance *dev)
2286 {
2287 struct emac_depentry deps[EMAC_DEP_COUNT];
2288 int i, err;
2289
2290 memset(&deps, 0, sizeof(deps));
2291
2292 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2293 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2294 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2295 if (dev->tah_ph)
2296 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2297 if (dev->mdio_ph)
2298 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2299 if (dev->blist && dev->blist > emac_boot_list)
2300 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2301 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2302 wait_event_timeout(emac_probe_wait,
2303 emac_check_deps(dev, deps),
2304 EMAC_PROBE_DEP_TIMEOUT);
2305 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2306 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2307 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2308 if (deps[i].node)
2309 of_node_put(deps[i].node);
2310 if (err && deps[i].ofdev)
2311 of_dev_put(deps[i].ofdev);
2312 }
2313 if (err == 0) {
2314 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2315 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2316 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2317 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2318 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2319 }
2320 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2321 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2322 return err;
2323 }
2324
2325 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2326 u32 *val, int fatal)
2327 {
2328 int len;
2329 const u32 *prop = of_get_property(np, name, &len);
2330 if (prop == NULL || len < sizeof(u32)) {
2331 if (fatal)
2332 printk(KERN_ERR "%s: missing %s property\n",
2333 np->full_name, name);
2334 return -ENODEV;
2335 }
2336 *val = *prop;
2337 return 0;
2338 }
2339
2340 static int __devinit emac_init_phy(struct emac_instance *dev)
2341 {
2342 struct device_node *np = dev->ofdev->node;
2343 struct net_device *ndev = dev->ndev;
2344 u32 phy_map, adv;
2345 int i;
2346
2347 dev->phy.dev = ndev;
2348 dev->phy.mode = dev->phy_mode;
2349
2350 /* PHY-less configuration.
2351 * XXX I probably should move these settings to the dev tree
2352 */
2353 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2354 emac_reset(dev);
2355
2356 /* PHY-less configuration.
2357 * XXX I probably should move these settings to the dev tree
2358 */
2359 dev->phy.address = -1;
2360 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2361 dev->phy.pause = 1;
2362
2363 return 0;
2364 }
2365
2366 mutex_lock(&emac_phy_map_lock);
2367 phy_map = dev->phy_map | busy_phy_map;
2368
2369 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2370
2371 dev->phy.mdio_read = emac_mdio_read;
2372 dev->phy.mdio_write = emac_mdio_write;
2373
2374 /* Enable internal clock source */
2375 #ifdef CONFIG_PPC_DCR_NATIVE
2376 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2377 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2378 #endif
2379 /* PHY clock workaround */
2380 emac_rx_clk_tx(dev);
2381
2382 /* Enable internal clock source on 440GX*/
2383 #ifdef CONFIG_PPC_DCR_NATIVE
2384 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2385 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2386 #endif
2387 /* Configure EMAC with defaults so we can at least use MDIO
2388 * This is needed mostly for 440GX
2389 */
2390 if (emac_phy_gpcs(dev->phy.mode)) {
2391 /* XXX
2392 * Make GPCS PHY address equal to EMAC index.
2393 * We probably should take into account busy_phy_map
2394 * and/or phy_map here.
2395 *
2396 * Note that the busy_phy_map is currently global
2397 * while it should probably be per-ASIC...
2398 */
2399 dev->phy.address = dev->cell_index;
2400 }
2401
2402 emac_configure(dev);
2403
2404 if (dev->phy_address != 0xffffffff)
2405 phy_map = ~(1 << dev->phy_address);
2406
2407 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2408 if (!(phy_map & 1)) {
2409 int r;
2410 busy_phy_map |= 1 << i;
2411
2412 /* Quick check if there is a PHY at the address */
2413 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2414 if (r == 0xffff || r < 0)
2415 continue;
2416 if (!emac_mii_phy_probe(&dev->phy, i))
2417 break;
2418 }
2419
2420 /* Enable external clock source */
2421 #ifdef CONFIG_PPC_DCR_NATIVE
2422 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2423 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2424 #endif
2425 mutex_unlock(&emac_phy_map_lock);
2426 if (i == 0x20) {
2427 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2428 return -ENXIO;
2429 }
2430
2431 /* Init PHY */
2432 if (dev->phy.def->ops->init)
2433 dev->phy.def->ops->init(&dev->phy);
2434
2435 /* Disable any PHY features not supported by the platform */
2436 dev->phy.def->features &= ~dev->phy_feat_exc;
2437
2438 /* Setup initial link parameters */
2439 if (dev->phy.features & SUPPORTED_Autoneg) {
2440 adv = dev->phy.features;
2441 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2442 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2443 /* Restart autonegotiation */
2444 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2445 } else {
2446 u32 f = dev->phy.def->features;
2447 int speed = SPEED_10, fd = DUPLEX_HALF;
2448
2449 /* Select highest supported speed/duplex */
2450 if (f & SUPPORTED_1000baseT_Full) {
2451 speed = SPEED_1000;
2452 fd = DUPLEX_FULL;
2453 } else if (f & SUPPORTED_1000baseT_Half)
2454 speed = SPEED_1000;
2455 else if (f & SUPPORTED_100baseT_Full) {
2456 speed = SPEED_100;
2457 fd = DUPLEX_FULL;
2458 } else if (f & SUPPORTED_100baseT_Half)
2459 speed = SPEED_100;
2460 else if (f & SUPPORTED_10baseT_Full)
2461 fd = DUPLEX_FULL;
2462
2463 /* Force link parameters */
2464 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2465 }
2466 return 0;
2467 }
2468
2469 static int __devinit emac_init_config(struct emac_instance *dev)
2470 {
2471 struct device_node *np = dev->ofdev->node;
2472 const void *p;
2473 unsigned int plen;
2474 const char *pm, *phy_modes[] = {
2475 [PHY_MODE_NA] = "",
2476 [PHY_MODE_MII] = "mii",
2477 [PHY_MODE_RMII] = "rmii",
2478 [PHY_MODE_SMII] = "smii",
2479 [PHY_MODE_RGMII] = "rgmii",
2480 [PHY_MODE_TBI] = "tbi",
2481 [PHY_MODE_GMII] = "gmii",
2482 [PHY_MODE_RTBI] = "rtbi",
2483 [PHY_MODE_SGMII] = "sgmii",
2484 };
2485
2486 /* Read config from device-tree */
2487 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2488 return -ENXIO;
2489 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2490 return -ENXIO;
2491 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2492 return -ENXIO;
2493 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2494 return -ENXIO;
2495 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2496 dev->max_mtu = 1500;
2497 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2498 dev->rx_fifo_size = 2048;
2499 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2500 dev->tx_fifo_size = 2048;
2501 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2502 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2503 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2504 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2505 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2506 dev->phy_address = 0xffffffff;
2507 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2508 dev->phy_map = 0xffffffff;
2509 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2510 return -ENXIO;
2511 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2512 dev->tah_ph = 0;
2513 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2514 dev->tah_port = 0;
2515 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2516 dev->mdio_ph = 0;
2517 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2518 dev->zmii_ph = 0;;
2519 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2520 dev->zmii_port = 0xffffffff;;
2521 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2522 dev->rgmii_ph = 0;;
2523 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2524 dev->rgmii_port = 0xffffffff;;
2525 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2526 dev->fifo_entry_size = 16;
2527 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2528 dev->mal_burst_size = 256;
2529
2530 /* PHY mode needs some decoding */
2531 dev->phy_mode = PHY_MODE_NA;
2532 pm = of_get_property(np, "phy-mode", &plen);
2533 if (pm != NULL) {
2534 int i;
2535 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2536 if (!strcasecmp(pm, phy_modes[i])) {
2537 dev->phy_mode = i;
2538 break;
2539 }
2540 }
2541
2542 /* Backward compat with non-final DT */
2543 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2544 u32 nmode = *(const u32 *)pm;
2545 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2546 dev->phy_mode = nmode;
2547 }
2548
2549 /* Check EMAC version */
2550 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2551 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2552 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2553 dev->features |= EMAC_FTR_EMAC4;
2554 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2555 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2556 } else {
2557 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2558 of_device_is_compatible(np, "ibm,emac-440gr"))
2559 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2560 }
2561
2562 /* Fixup some feature bits based on the device tree */
2563 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2564 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2565 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2566 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2567
2568 /* CAB lacks the appropriate properties */
2569 if (of_device_is_compatible(np, "ibm,emac-axon"))
2570 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2571 EMAC_FTR_STACR_OC_INVERT;
2572
2573 /* Enable TAH/ZMII/RGMII features as found */
2574 if (dev->tah_ph != 0) {
2575 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2576 dev->features |= EMAC_FTR_HAS_TAH;
2577 #else
2578 printk(KERN_ERR "%s: TAH support not enabled !\n",
2579 np->full_name);
2580 return -ENXIO;
2581 #endif
2582 }
2583
2584 if (dev->zmii_ph != 0) {
2585 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2586 dev->features |= EMAC_FTR_HAS_ZMII;
2587 #else
2588 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2589 np->full_name);
2590 return -ENXIO;
2591 #endif
2592 }
2593
2594 if (dev->rgmii_ph != 0) {
2595 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2596 dev->features |= EMAC_FTR_HAS_RGMII;
2597 #else
2598 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2599 np->full_name);
2600 return -ENXIO;
2601 #endif
2602 }
2603
2604 /* Read MAC-address */
2605 p = of_get_property(np, "local-mac-address", NULL);
2606 if (p == NULL) {
2607 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2608 np->full_name);
2609 return -ENXIO;
2610 }
2611 memcpy(dev->ndev->dev_addr, p, 6);
2612
2613 /* IAHT and GAHT filter parameterization */
2614 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2615 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2616 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2617 } else {
2618 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2619 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2620 }
2621
2622 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2623 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2624 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2625 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2626 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2627
2628 return 0;
2629 }
2630
2631 static int __devinit emac_probe(struct of_device *ofdev,
2632 const struct of_device_id *match)
2633 {
2634 struct net_device *ndev;
2635 struct emac_instance *dev;
2636 struct device_node *np = ofdev->node;
2637 struct device_node **blist = NULL;
2638 int err, i;
2639
2640 /* Skip unused/unwired EMACS. We leave the check for an unused
2641 * property here for now, but new flat device trees should set a
2642 * status property to "disabled" instead.
2643 */
2644 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2645 return -ENODEV;
2646
2647 /* Find ourselves in the bootlist if we are there */
2648 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2649 if (emac_boot_list[i] == np)
2650 blist = &emac_boot_list[i];
2651
2652 /* Allocate our net_device structure */
2653 err = -ENOMEM;
2654 ndev = alloc_etherdev(sizeof(struct emac_instance));
2655 if (!ndev) {
2656 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2657 np->full_name);
2658 goto err_gone;
2659 }
2660 dev = netdev_priv(ndev);
2661 dev->ndev = ndev;
2662 dev->ofdev = ofdev;
2663 dev->blist = blist;
2664 SET_NETDEV_DEV(ndev, &ofdev->dev);
2665
2666 /* Initialize some embedded data structures */
2667 mutex_init(&dev->mdio_lock);
2668 mutex_init(&dev->link_lock);
2669 spin_lock_init(&dev->lock);
2670 INIT_WORK(&dev->reset_work, emac_reset_work);
2671
2672 /* Init various config data based on device-tree */
2673 err = emac_init_config(dev);
2674 if (err != 0)
2675 goto err_free;
2676
2677 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2678 dev->emac_irq = irq_of_parse_and_map(np, 0);
2679 dev->wol_irq = irq_of_parse_and_map(np, 1);
2680 if (dev->emac_irq == NO_IRQ) {
2681 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2682 goto err_free;
2683 }
2684 ndev->irq = dev->emac_irq;
2685
2686 /* Map EMAC regs */
2687 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2688 printk(KERN_ERR "%s: Can't get registers address\n",
2689 np->full_name);
2690 goto err_irq_unmap;
2691 }
2692 // TODO : request_mem_region
2693 dev->emacp = ioremap(dev->rsrc_regs.start,
2694 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2695 if (dev->emacp == NULL) {
2696 printk(KERN_ERR "%s: Can't map device registers!\n",
2697 np->full_name);
2698 err = -ENOMEM;
2699 goto err_irq_unmap;
2700 }
2701
2702 /* Wait for dependent devices */
2703 err = emac_wait_deps(dev);
2704 if (err) {
2705 printk(KERN_ERR
2706 "%s: Timeout waiting for dependent devices\n",
2707 np->full_name);
2708 /* display more info about what's missing ? */
2709 goto err_reg_unmap;
2710 }
2711 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2712 if (dev->mdio_dev != NULL)
2713 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2714
2715 /* Register with MAL */
2716 dev->commac.ops = &emac_commac_ops;
2717 dev->commac.dev = dev;
2718 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2719 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2720 err = mal_register_commac(dev->mal, &dev->commac);
2721 if (err) {
2722 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2723 np->full_name, dev->mal_dev->node->full_name);
2724 goto err_rel_deps;
2725 }
2726 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2727 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2728
2729 /* Get pointers to BD rings */
2730 dev->tx_desc =
2731 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2732 dev->rx_desc =
2733 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2734
2735 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2736 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2737
2738 /* Clean rings */
2739 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2740 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2741
2742 /* Attach to ZMII, if needed */
2743 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2744 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2745 goto err_unreg_commac;
2746
2747 /* Attach to RGMII, if needed */
2748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2749 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2750 goto err_detach_zmii;
2751
2752 /* Attach to TAH, if needed */
2753 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2754 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2755 goto err_detach_rgmii;
2756
2757 /* Set some link defaults before we can find out real parameters */
2758 dev->phy.speed = SPEED_100;
2759 dev->phy.duplex = DUPLEX_FULL;
2760 dev->phy.autoneg = AUTONEG_DISABLE;
2761 dev->phy.pause = dev->phy.asym_pause = 0;
2762 dev->stop_timeout = STOP_TIMEOUT_100;
2763 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2764
2765 /* Find PHY if any */
2766 err = emac_init_phy(dev);
2767 if (err != 0)
2768 goto err_detach_tah;
2769
2770 /* Fill in the driver function table */
2771 ndev->open = &emac_open;
2772 if (dev->tah_dev)
2773 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2774 ndev->tx_timeout = &emac_tx_timeout;
2775 ndev->watchdog_timeo = 5 * HZ;
2776 ndev->stop = &emac_close;
2777 ndev->get_stats = &emac_stats;
2778 ndev->set_multicast_list = &emac_set_multicast_list;
2779 ndev->do_ioctl = &emac_ioctl;
2780 if (emac_phy_supports_gige(dev->phy_mode)) {
2781 ndev->hard_start_xmit = &emac_start_xmit_sg;
2782 ndev->change_mtu = &emac_change_mtu;
2783 dev->commac.ops = &emac_commac_sg_ops;
2784 } else {
2785 ndev->hard_start_xmit = &emac_start_xmit;
2786 }
2787 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2788
2789 netif_carrier_off(ndev);
2790 netif_stop_queue(ndev);
2791
2792 err = register_netdev(ndev);
2793 if (err) {
2794 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2795 np->full_name, err);
2796 goto err_detach_tah;
2797 }
2798
2799 /* Set our drvdata last as we don't want them visible until we are
2800 * fully initialized
2801 */
2802 wmb();
2803 dev_set_drvdata(&ofdev->dev, dev);
2804
2805 /* There's a new kid in town ! Let's tell everybody */
2806 wake_up_all(&emac_probe_wait);
2807
2808
2809 printk(KERN_INFO
2810 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2811 ndev->name, dev->cell_index, np->full_name,
2812 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2813 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2814
2815 if (dev->phy.address >= 0)
2816 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2817 dev->phy.def->name, dev->phy.address);
2818
2819 emac_dbg_register(dev);
2820
2821 /* Life is good */
2822 return 0;
2823
2824 /* I have a bad feeling about this ... */
2825
2826 err_detach_tah:
2827 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2828 tah_detach(dev->tah_dev, dev->tah_port);
2829 err_detach_rgmii:
2830 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2831 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2832 err_detach_zmii:
2833 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2834 zmii_detach(dev->zmii_dev, dev->zmii_port);
2835 err_unreg_commac:
2836 mal_unregister_commac(dev->mal, &dev->commac);
2837 err_rel_deps:
2838 emac_put_deps(dev);
2839 err_reg_unmap:
2840 iounmap(dev->emacp);
2841 err_irq_unmap:
2842 if (dev->wol_irq != NO_IRQ)
2843 irq_dispose_mapping(dev->wol_irq);
2844 if (dev->emac_irq != NO_IRQ)
2845 irq_dispose_mapping(dev->emac_irq);
2846 err_free:
2847 kfree(ndev);
2848 err_gone:
2849 /* if we were on the bootlist, remove us as we won't show up and
2850 * wake up all waiters to notify them in case they were waiting
2851 * on us
2852 */
2853 if (blist) {
2854 *blist = NULL;
2855 wake_up_all(&emac_probe_wait);
2856 }
2857 return err;
2858 }
2859
2860 static int __devexit emac_remove(struct of_device *ofdev)
2861 {
2862 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2863
2864 DBG(dev, "remove" NL);
2865
2866 dev_set_drvdata(&ofdev->dev, NULL);
2867
2868 unregister_netdev(dev->ndev);
2869
2870 flush_scheduled_work();
2871
2872 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2873 tah_detach(dev->tah_dev, dev->tah_port);
2874 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2875 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2876 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2877 zmii_detach(dev->zmii_dev, dev->zmii_port);
2878
2879 mal_unregister_commac(dev->mal, &dev->commac);
2880 emac_put_deps(dev);
2881
2882 emac_dbg_unregister(dev);
2883 iounmap(dev->emacp);
2884
2885 if (dev->wol_irq != NO_IRQ)
2886 irq_dispose_mapping(dev->wol_irq);
2887 if (dev->emac_irq != NO_IRQ)
2888 irq_dispose_mapping(dev->emac_irq);
2889
2890 kfree(dev->ndev);
2891
2892 return 0;
2893 }
2894
2895 /* XXX Features in here should be replaced by properties... */
2896 static struct of_device_id emac_match[] =
2897 {
2898 {
2899 .type = "network",
2900 .compatible = "ibm,emac",
2901 },
2902 {
2903 .type = "network",
2904 .compatible = "ibm,emac4",
2905 },
2906 {
2907 .type = "network",
2908 .compatible = "ibm,emac4sync",
2909 },
2910 {},
2911 };
2912
2913 static struct of_platform_driver emac_driver = {
2914 .name = "emac",
2915 .match_table = emac_match,
2916
2917 .probe = emac_probe,
2918 .remove = emac_remove,
2919 };
2920
2921 static void __init emac_make_bootlist(void)
2922 {
2923 struct device_node *np = NULL;
2924 int j, max, i = 0, k;
2925 int cell_indices[EMAC_BOOT_LIST_SIZE];
2926
2927 /* Collect EMACs */
2928 while((np = of_find_all_nodes(np)) != NULL) {
2929 const u32 *idx;
2930
2931 if (of_match_node(emac_match, np) == NULL)
2932 continue;
2933 if (of_get_property(np, "unused", NULL))
2934 continue;
2935 idx = of_get_property(np, "cell-index", NULL);
2936 if (idx == NULL)
2937 continue;
2938 cell_indices[i] = *idx;
2939 emac_boot_list[i++] = of_node_get(np);
2940 if (i >= EMAC_BOOT_LIST_SIZE) {
2941 of_node_put(np);
2942 break;
2943 }
2944 }
2945 max = i;
2946
2947 /* Bubble sort them (doh, what a creative algorithm :-) */
2948 for (i = 0; max > 1 && (i < (max - 1)); i++)
2949 for (j = i; j < max; j++) {
2950 if (cell_indices[i] > cell_indices[j]) {
2951 np = emac_boot_list[i];
2952 emac_boot_list[i] = emac_boot_list[j];
2953 emac_boot_list[j] = np;
2954 k = cell_indices[i];
2955 cell_indices[i] = cell_indices[j];
2956 cell_indices[j] = k;
2957 }
2958 }
2959 }
2960
2961 static int __init emac_init(void)
2962 {
2963 int rc;
2964
2965 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2966
2967 /* Init debug stuff */
2968 emac_init_debug();
2969
2970 /* Build EMAC boot list */
2971 emac_make_bootlist();
2972
2973 /* Init submodules */
2974 rc = mal_init();
2975 if (rc)
2976 goto err;
2977 rc = zmii_init();
2978 if (rc)
2979 goto err_mal;
2980 rc = rgmii_init();
2981 if (rc)
2982 goto err_zmii;
2983 rc = tah_init();
2984 if (rc)
2985 goto err_rgmii;
2986 rc = of_register_platform_driver(&emac_driver);
2987 if (rc)
2988 goto err_tah;
2989
2990 return 0;
2991
2992 err_tah:
2993 tah_exit();
2994 err_rgmii:
2995 rgmii_exit();
2996 err_zmii:
2997 zmii_exit();
2998 err_mal:
2999 mal_exit();
3000 err:
3001 return rc;
3002 }
3003
3004 static void __exit emac_exit(void)
3005 {
3006 int i;
3007
3008 of_unregister_platform_driver(&emac_driver);
3009
3010 tah_exit();
3011 rgmii_exit();
3012 zmii_exit();
3013 mal_exit();
3014 emac_fini_debug();
3015
3016 /* Destroy EMAC boot list */
3017 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3018 if (emac_boot_list[i])
3019 of_node_put(emac_boot_list[i]);
3020 }
3021
3022 module_init(emac_init);
3023 module_exit(emac_exit);
This page took 0.095008 seconds and 5 git commands to generate.