ibm_newemac: Fix ZMII refcounting bug
[deliverable/linux.git] / drivers / net / ibm_newemac / core.c
CommitLineData
1d3bb996
DG
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 */
21
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/delay.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/crc32.h>
31#include <linux/ethtool.h>
32#include <linux/mii.h>
33#include <linux/bitops.h>
34#include <linux/workqueue.h>
35
36#include <asm/processor.h>
37#include <asm/io.h>
38#include <asm/dma.h>
39#include <asm/uaccess.h>
40
41#include "core.h"
42
43/*
44 * Lack of dma_unmap_???? calls is intentional.
45 *
46 * API-correct usage requires additional support state information to be
47 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
48 * EMAC design (e.g. TX buffer passed from network stack can be split into
49 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
50 * maintaining such information will add additional overhead.
51 * Current DMA API implementation for 4xx processors only ensures cache coherency
52 * and dma_unmap_???? routines are empty and are likely to stay this way.
53 * I decided to omit dma_unmap_??? calls because I don't want to add additional
54 * complexity just for the sake of following some abstract API, when it doesn't
55 * add any real benefit to the driver. I understand that this decision maybe
56 * controversial, but I really tried to make code API-correct and efficient
57 * at the same time and didn't come up with code I liked :(. --ebs
58 */
59
60#define DRV_NAME "emac"
61#define DRV_VERSION "3.54"
62#define DRV_DESC "PPC 4xx OCP EMAC driver"
63
64MODULE_DESCRIPTION(DRV_DESC);
65MODULE_AUTHOR
66 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
67MODULE_LICENSE("GPL");
68
69/*
70 * PPC64 doesn't (yet) have a cacheable_memcpy
71 */
72#ifdef CONFIG_PPC64
73#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
74#endif
75
76/* minimum number of free TX descriptors required to wake up TX process */
77#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
78
79/* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
81 */
82#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
83
84/* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
87 *
88 * XXX This is something that needs to be reworked as we can have multiple
89 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
90 * probably require in that case to have explicit PHY IDs in the device-tree
91 */
92static u32 busy_phy_map;
93static DEFINE_MUTEX(emac_phy_map_lock);
94
95/* This is the wait queue used to wait on any event related to probe, that
96 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
97 */
98static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
99
100/* Having stable interface names is a doomed idea. However, it would be nice
101 * if we didn't have completely random interface names at boot too :-) It's
102 * just a matter of making everybody's life easier. Since we are doing
103 * threaded probing, it's a bit harder though. The base idea here is that
104 * we make up a list of all emacs in the device-tree before we register the
105 * driver. Every emac will then wait for the previous one in the list to
106 * initialize before itself. We should also keep that list ordered by
107 * cell_index.
108 * That list is only 4 entries long, meaning that additional EMACs don't
109 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
110 */
111
112#define EMAC_BOOT_LIST_SIZE 4
113static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
114
115/* How long should I wait for dependent devices ? */
116#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
117
118/* I don't want to litter system log with timeout errors
119 * when we have brain-damaged PHY.
120 */
121static inline void emac_report_timeout_error(struct emac_instance *dev,
122 const char *error)
123{
124 if (net_ratelimit())
125 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
126}
127
128/* PHY polling intervals */
129#define PHY_POLL_LINK_ON HZ
130#define PHY_POLL_LINK_OFF (HZ / 5)
131
132/* Graceful stop timeouts in us.
133 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
134 */
135#define STOP_TIMEOUT_10 1230
136#define STOP_TIMEOUT_100 124
137#define STOP_TIMEOUT_1000 13
138#define STOP_TIMEOUT_1000_JUMBO 73
139
140/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
141static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
142 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
143 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
144 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
145 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
146 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
147 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
148 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
149 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
150 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
151 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
152 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
153 "tx_bd_excessive_collisions", "tx_bd_late_collision",
154 "tx_bd_multple_collisions", "tx_bd_single_collision",
155 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
156 "tx_errors"
157};
158
159static irqreturn_t emac_irq(int irq, void *dev_instance);
160static void emac_clean_tx_ring(struct emac_instance *dev);
161static void __emac_set_multicast_list(struct emac_instance *dev);
162
163static inline int emac_phy_supports_gige(int phy_mode)
164{
165 return phy_mode == PHY_MODE_GMII ||
166 phy_mode == PHY_MODE_RGMII ||
167 phy_mode == PHY_MODE_TBI ||
168 phy_mode == PHY_MODE_RTBI;
169}
170
171static inline int emac_phy_gpcs(int phy_mode)
172{
173 return phy_mode == PHY_MODE_TBI ||
174 phy_mode == PHY_MODE_RTBI;
175}
176
177static inline void emac_tx_enable(struct emac_instance *dev)
178{
179 struct emac_regs __iomem *p = dev->emacp;
180 u32 r;
181
182 DBG(dev, "tx_enable" NL);
183
184 r = in_be32(&p->mr0);
185 if (!(r & EMAC_MR0_TXE))
186 out_be32(&p->mr0, r | EMAC_MR0_TXE);
187}
188
189static void emac_tx_disable(struct emac_instance *dev)
190{
191 struct emac_regs __iomem *p = dev->emacp;
192 u32 r;
193
194 DBG(dev, "tx_disable" NL);
195
196 r = in_be32(&p->mr0);
197 if (r & EMAC_MR0_TXE) {
198 int n = dev->stop_timeout;
199 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
200 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
201 udelay(1);
202 --n;
203 }
204 if (unlikely(!n))
205 emac_report_timeout_error(dev, "TX disable timeout");
206 }
207}
208
209static void emac_rx_enable(struct emac_instance *dev)
210{
211 struct emac_regs __iomem *p = dev->emacp;
212 u32 r;
213
214 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
215 goto out;
216
217 DBG(dev, "rx_enable" NL);
218
219 r = in_be32(&p->mr0);
220 if (!(r & EMAC_MR0_RXE)) {
221 if (unlikely(!(r & EMAC_MR0_RXI))) {
222 /* Wait if previous async disable is still in progress */
223 int n = dev->stop_timeout;
224 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
225 udelay(1);
226 --n;
227 }
228 if (unlikely(!n))
229 emac_report_timeout_error(dev,
230 "RX disable timeout");
231 }
232 out_be32(&p->mr0, r | EMAC_MR0_RXE);
233 }
234 out:
235 ;
236}
237
238static void emac_rx_disable(struct emac_instance *dev)
239{
240 struct emac_regs __iomem *p = dev->emacp;
241 u32 r;
242
243 DBG(dev, "rx_disable" NL);
244
245 r = in_be32(&p->mr0);
246 if (r & EMAC_MR0_RXE) {
247 int n = dev->stop_timeout;
248 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
249 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
250 udelay(1);
251 --n;
252 }
253 if (unlikely(!n))
254 emac_report_timeout_error(dev, "RX disable timeout");
255 }
256}
257
258static inline void emac_netif_stop(struct emac_instance *dev)
259{
260 netif_tx_lock_bh(dev->ndev);
261 dev->no_mcast = 1;
262 netif_tx_unlock_bh(dev->ndev);
263 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
264 mal_poll_disable(dev->mal, &dev->commac);
265 netif_tx_disable(dev->ndev);
266}
267
268static inline void emac_netif_start(struct emac_instance *dev)
269{
270 netif_tx_lock_bh(dev->ndev);
271 dev->no_mcast = 0;
272 if (dev->mcast_pending && netif_running(dev->ndev))
273 __emac_set_multicast_list(dev);
274 netif_tx_unlock_bh(dev->ndev);
275
276 netif_wake_queue(dev->ndev);
277
278 /* NOTE: unconditional netif_wake_queue is only appropriate
279 * so long as all callers are assured to have free tx slots
280 * (taken from tg3... though the case where that is wrong is
281 * not terribly harmful)
282 */
283 mal_poll_enable(dev->mal, &dev->commac);
284}
285
286static inline void emac_rx_disable_async(struct emac_instance *dev)
287{
288 struct emac_regs __iomem *p = dev->emacp;
289 u32 r;
290
291 DBG(dev, "rx_disable_async" NL);
292
293 r = in_be32(&p->mr0);
294 if (r & EMAC_MR0_RXE)
295 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
296}
297
298static int emac_reset(struct emac_instance *dev)
299{
300 struct emac_regs __iomem *p = dev->emacp;
301 int n = 20;
302
303 DBG(dev, "reset" NL);
304
305 if (!dev->reset_failed) {
306 /* 40x erratum suggests stopping RX channel before reset,
307 * we stop TX as well
308 */
309 emac_rx_disable(dev);
310 emac_tx_disable(dev);
311 }
312
313 out_be32(&p->mr0, EMAC_MR0_SRST);
314 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
315 --n;
316
317 if (n) {
318 dev->reset_failed = 0;
319 return 0;
320 } else {
321 emac_report_timeout_error(dev, "reset timeout");
322 dev->reset_failed = 1;
323 return -ETIMEDOUT;
324 }
325}
326
327static void emac_hash_mc(struct emac_instance *dev)
328{
329 struct emac_regs __iomem *p = dev->emacp;
330 u16 gaht[4] = { 0 };
331 struct dev_mc_list *dmi;
332
333 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
334
335 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
336 int bit;
337 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
338 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
339 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
340
341 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
342 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
343 }
344 out_be32(&p->gaht1, gaht[0]);
345 out_be32(&p->gaht2, gaht[1]);
346 out_be32(&p->gaht3, gaht[2]);
347 out_be32(&p->gaht4, gaht[3]);
348}
349
350static inline u32 emac_iff2rmr(struct net_device *ndev)
351{
352 struct emac_instance *dev = netdev_priv(ndev);
353 u32 r;
354
355 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
356
357 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
358 r |= EMAC4_RMR_BASE;
359 else
360 r |= EMAC_RMR_BASE;
361
362 if (ndev->flags & IFF_PROMISC)
363 r |= EMAC_RMR_PME;
364 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
365 r |= EMAC_RMR_PMME;
366 else if (ndev->mc_count > 0)
367 r |= EMAC_RMR_MAE;
368
369 return r;
370}
371
372static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
373{
374 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
375
376 DBG2(dev, "__emac_calc_base_mr1" NL);
377
378 switch(tx_size) {
379 case 2048:
380 ret |= EMAC_MR1_TFS_2K;
381 break;
382 default:
383 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
384 dev->ndev->name, tx_size);
385 }
386
387 switch(rx_size) {
388 case 16384:
389 ret |= EMAC_MR1_RFS_16K;
390 break;
391 case 4096:
392 ret |= EMAC_MR1_RFS_4K;
393 break;
394 default:
395 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
396 dev->ndev->name, rx_size);
397 }
398
399 return ret;
400}
401
402static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
403{
404 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
405 EMAC4_MR1_OBCI(dev->opb_bus_freq);
406
407 DBG2(dev, "__emac4_calc_base_mr1" NL);
408
409 switch(tx_size) {
410 case 4096:
411 ret |= EMAC4_MR1_TFS_4K;
412 break;
413 case 2048:
414 ret |= EMAC4_MR1_TFS_2K;
415 break;
416 default:
417 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
418 dev->ndev->name, tx_size);
419 }
420
421 switch(rx_size) {
422 case 16384:
423 ret |= EMAC4_MR1_RFS_16K;
424 break;
425 case 4096:
426 ret |= EMAC4_MR1_RFS_4K;
427 break;
428 case 2048:
429 ret |= EMAC4_MR1_RFS_2K;
430 break;
431 default:
432 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
433 dev->ndev->name, rx_size);
434 }
435
436 return ret;
437}
438
439static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
440{
441 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
442 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
443 __emac_calc_base_mr1(dev, tx_size, rx_size);
444}
445
446static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
447{
448 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
449 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
450 else
451 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
452}
453
454static inline u32 emac_calc_rwmr(struct emac_instance *dev,
455 unsigned int low, unsigned int high)
456{
457 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458 return (low << 22) | ( (high & 0x3ff) << 6);
459 else
460 return (low << 23) | ( (high & 0x1ff) << 7);
461}
462
463static int emac_configure(struct emac_instance *dev)
464{
465 struct emac_regs __iomem *p = dev->emacp;
466 struct net_device *ndev = dev->ndev;
467 int tx_size, rx_size;
468 u32 r, mr1 = 0;
469
470 DBG(dev, "configure" NL);
471
472 if (emac_reset(dev) < 0)
473 return -ETIMEDOUT;
474
475 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
476 tah_reset(dev->tah_dev);
477
478 DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n",
479 dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
480
481 /* Default fifo sizes */
482 tx_size = dev->tx_fifo_size;
483 rx_size = dev->rx_fifo_size;
484
485 /* Check for full duplex */
486 if (dev->phy.duplex == DUPLEX_FULL)
487 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
488
489 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
490 dev->stop_timeout = STOP_TIMEOUT_10;
491 switch (dev->phy.speed) {
492 case SPEED_1000:
493 if (emac_phy_gpcs(dev->phy.mode)) {
494 mr1 |= EMAC_MR1_MF_1000GPCS |
495 EMAC_MR1_MF_IPPA(dev->phy.address);
496
497 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
498 * identify this GPCS PHY later.
499 */
500 out_be32(&p->ipcr, 0xdeadbeef);
501 } else
502 mr1 |= EMAC_MR1_MF_1000;
503
504 /* Extended fifo sizes */
505 tx_size = dev->tx_fifo_size_gige;
506 rx_size = dev->rx_fifo_size_gige;
507
508 if (dev->ndev->mtu > ETH_DATA_LEN) {
509 mr1 |= EMAC_MR1_JPSM;
510 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
511 } else
512 dev->stop_timeout = STOP_TIMEOUT_1000;
513 break;
514 case SPEED_100:
515 mr1 |= EMAC_MR1_MF_100;
516 dev->stop_timeout = STOP_TIMEOUT_100;
517 break;
518 default: /* make gcc happy */
519 break;
520 }
521
522 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
523 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
524 dev->phy.speed);
525 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
526 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
527
528 /* on 40x erratum forces us to NOT use integrated flow control,
529 * let's hope it works on 44x ;)
530 */
531 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
532 dev->phy.duplex == DUPLEX_FULL) {
533 if (dev->phy.pause)
534 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
535 else if (dev->phy.asym_pause)
536 mr1 |= EMAC_MR1_APP;
537 }
538
539 /* Add base settings & fifo sizes & program MR1 */
540 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
541 out_be32(&p->mr1, mr1);
542
543 /* Set individual MAC address */
544 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
545 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
546 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
547 ndev->dev_addr[5]);
548
549 /* VLAN Tag Protocol ID */
550 out_be32(&p->vtpid, 0x8100);
551
552 /* Receive mode register */
553 r = emac_iff2rmr(ndev);
554 if (r & EMAC_RMR_MAE)
555 emac_hash_mc(dev);
556 out_be32(&p->rmr, r);
557
558 /* FIFOs thresholds */
559 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
560 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
561 tx_size / 2 / dev->fifo_entry_size);
562 else
563 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
564 tx_size / 2 / dev->fifo_entry_size);
565 out_be32(&p->tmr1, r);
566 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
567
568 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
569 there should be still enough space in FIFO to allow the our link
570 partner time to process this frame and also time to send PAUSE
571 frame itself.
572
573 Here is the worst case scenario for the RX FIFO "headroom"
574 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
575
576 1) One maximum-length frame on TX 1522 bytes
577 2) One PAUSE frame time 64 bytes
578 3) PAUSE frame decode time allowance 64 bytes
579 4) One maximum-length frame on RX 1522 bytes
580 5) Round-trip propagation delay of the link (100Mb) 15 bytes
581 ----------
582 3187 bytes
583
584 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
585 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
586 */
587 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
588 rx_size / 4 / dev->fifo_entry_size);
589 out_be32(&p->rwmr, r);
590
591 /* Set PAUSE timer to the maximum */
592 out_be32(&p->ptr, 0xffff);
593
594 /* IRQ sources */
595 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
596 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
597 EMAC_ISR_IRE | EMAC_ISR_TE;
598 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
599 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
600 EMAC4_ISR_RXOE | */;
601 out_be32(&p->iser, r);
602
603 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
604 if (emac_phy_gpcs(dev->phy.mode))
605 emac_mii_reset_phy(&dev->phy);
606
607 return 0;
608}
609
610static void emac_reinitialize(struct emac_instance *dev)
611{
612 DBG(dev, "reinitialize" NL);
613
614 emac_netif_stop(dev);
615 if (!emac_configure(dev)) {
616 emac_tx_enable(dev);
617 emac_rx_enable(dev);
618 }
619 emac_netif_start(dev);
620}
621
622static void emac_full_tx_reset(struct emac_instance *dev)
623{
624 DBG(dev, "full_tx_reset" NL);
625
626 emac_tx_disable(dev);
627 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
628 emac_clean_tx_ring(dev);
629 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
630
631 emac_configure(dev);
632
633 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
634 emac_tx_enable(dev);
635 emac_rx_enable(dev);
636}
637
638static void emac_reset_work(struct work_struct *work)
639{
640 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
641
642 DBG(dev, "reset_work" NL);
643
644 mutex_lock(&dev->link_lock);
61dbcece
BH
645 if (dev->opened) {
646 emac_netif_stop(dev);
647 emac_full_tx_reset(dev);
648 emac_netif_start(dev);
649 }
1d3bb996
DG
650 mutex_unlock(&dev->link_lock);
651}
652
653static void emac_tx_timeout(struct net_device *ndev)
654{
655 struct emac_instance *dev = netdev_priv(ndev);
656
657 DBG(dev, "tx_timeout" NL);
658
659 schedule_work(&dev->reset_work);
660}
661
662
663static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
664{
665 int done = !!(stacr & EMAC_STACR_OC);
666
667 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
668 done = !done;
669
670 return done;
671};
672
673static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
674{
675 struct emac_regs __iomem *p = dev->emacp;
676 u32 r = 0;
677 int n, err = -ETIMEDOUT;
678
679 mutex_lock(&dev->mdio_lock);
680
681 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
682
683 /* Enable proper MDIO port */
684 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
685 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
686 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
687 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
688
689 /* Wait for management interface to become idle */
690 n = 10;
691 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
692 udelay(1);
693 if (!--n) {
694 DBG2(dev, " -> timeout wait idle\n");
695 goto bail;
696 }
697 }
698
699 /* Issue read command */
700 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
701 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
702 else
703 r = EMAC_STACR_BASE(dev->opb_bus_freq);
704 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
705 r |= EMAC_STACR_OC;
706 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
707 r |= EMACX_STACR_STAC_READ;
708 else
709 r |= EMAC_STACR_STAC_READ;
710 r |= (reg & EMAC_STACR_PRA_MASK)
711 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
712 out_be32(&p->stacr, r);
713
714 /* Wait for read to complete */
715 n = 100;
716 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
717 udelay(1);
718 if (!--n) {
719 DBG2(dev, " -> timeout wait complete\n");
720 goto bail;
721 }
722 }
723
724 if (unlikely(r & EMAC_STACR_PHYE)) {
725 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
726 err = -EREMOTEIO;
727 goto bail;
728 }
729
730 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
731
732 DBG2(dev, "mdio_read -> %04x" NL, r);
733 err = 0;
734 bail:
735 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
736 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
737 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
738 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
739 mutex_unlock(&dev->mdio_lock);
740
741 return err == 0 ? r : err;
742}
743
744static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
745 u16 val)
746{
747 struct emac_regs __iomem *p = dev->emacp;
748 u32 r = 0;
749 int n, err = -ETIMEDOUT;
750
751 mutex_lock(&dev->mdio_lock);
752
753 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
754
755 /* Enable proper MDIO port */
756 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
757 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
758 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
759 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
760
761 /* Wait for management interface to be idle */
762 n = 10;
763 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
764 udelay(1);
765 if (!--n) {
766 DBG2(dev, " -> timeout wait idle\n");
767 goto bail;
768 }
769 }
770
771 /* Issue write command */
772 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
773 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
774 else
775 r = EMAC_STACR_BASE(dev->opb_bus_freq);
776 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
777 r |= EMAC_STACR_OC;
778 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
779 r |= EMACX_STACR_STAC_WRITE;
780 else
781 r |= EMAC_STACR_STAC_WRITE;
782 r |= (reg & EMAC_STACR_PRA_MASK) |
783 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
784 (val << EMAC_STACR_PHYD_SHIFT);
785 out_be32(&p->stacr, r);
786
787 /* Wait for write to complete */
788 n = 100;
789 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
790 udelay(1);
791 if (!--n) {
792 DBG2(dev, " -> timeout wait complete\n");
793 goto bail;
794 }
795 }
796 err = 0;
797 bail:
798 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
799 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
800 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
801 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
802 mutex_unlock(&dev->mdio_lock);
803}
804
805static int emac_mdio_read(struct net_device *ndev, int id, int reg)
806{
807 struct emac_instance *dev = netdev_priv(ndev);
808 int res;
809
810 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
811 (u8) id, (u8) reg);
812 return res;
813}
814
815static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
816{
817 struct emac_instance *dev = netdev_priv(ndev);
818
819 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
820 (u8) id, (u8) reg, (u16) val);
821}
822
823/* Tx lock BH */
824static void __emac_set_multicast_list(struct emac_instance *dev)
825{
826 struct emac_regs __iomem *p = dev->emacp;
827 u32 rmr = emac_iff2rmr(dev->ndev);
828
829 DBG(dev, "__multicast %08x" NL, rmr);
830
831 /* I decided to relax register access rules here to avoid
832 * full EMAC reset.
833 *
834 * There is a real problem with EMAC4 core if we use MWSW_001 bit
835 * in MR1 register and do a full EMAC reset.
836 * One TX BD status update is delayed and, after EMAC reset, it
837 * never happens, resulting in TX hung (it'll be recovered by TX
838 * timeout handler eventually, but this is just gross).
839 * So we either have to do full TX reset or try to cheat here :)
840 *
841 * The only required change is to RX mode register, so I *think* all
842 * we need is just to stop RX channel. This seems to work on all
843 * tested SoCs. --ebs
844 *
845 * If we need the full reset, we might just trigger the workqueue
846 * and do it async... a bit nasty but should work --BenH
847 */
848 dev->mcast_pending = 0;
849 emac_rx_disable(dev);
850 if (rmr & EMAC_RMR_MAE)
851 emac_hash_mc(dev);
852 out_be32(&p->rmr, rmr);
853 emac_rx_enable(dev);
854}
855
856/* Tx lock BH */
857static void emac_set_multicast_list(struct net_device *ndev)
858{
859 struct emac_instance *dev = netdev_priv(ndev);
860
861 DBG(dev, "multicast" NL);
862
863 BUG_ON(!netif_running(dev->ndev));
864
865 if (dev->no_mcast) {
866 dev->mcast_pending = 1;
867 return;
868 }
869 __emac_set_multicast_list(dev);
870}
871
872static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
873{
874 int rx_sync_size = emac_rx_sync_size(new_mtu);
875 int rx_skb_size = emac_rx_skb_size(new_mtu);
876 int i, ret = 0;
877
878 mutex_lock(&dev->link_lock);
879 emac_netif_stop(dev);
880 emac_rx_disable(dev);
881 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
882
883 if (dev->rx_sg_skb) {
884 ++dev->estats.rx_dropped_resize;
885 dev_kfree_skb(dev->rx_sg_skb);
886 dev->rx_sg_skb = NULL;
887 }
888
889 /* Make a first pass over RX ring and mark BDs ready, dropping
890 * non-processed packets on the way. We need this as a separate pass
891 * to simplify error recovery in the case of allocation failure later.
892 */
893 for (i = 0; i < NUM_RX_BUFF; ++i) {
894 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
895 ++dev->estats.rx_dropped_resize;
896
897 dev->rx_desc[i].data_len = 0;
898 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
899 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
900 }
901
902 /* Reallocate RX ring only if bigger skb buffers are required */
903 if (rx_skb_size <= dev->rx_skb_size)
904 goto skip;
905
906 /* Second pass, allocate new skbs */
907 for (i = 0; i < NUM_RX_BUFF; ++i) {
908 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
909 if (!skb) {
910 ret = -ENOMEM;
911 goto oom;
912 }
913
914 BUG_ON(!dev->rx_skb[i]);
915 dev_kfree_skb(dev->rx_skb[i]);
916
917 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
918 dev->rx_desc[i].data_ptr =
919 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
920 DMA_FROM_DEVICE) + 2;
921 dev->rx_skb[i] = skb;
922 }
923 skip:
924 /* Check if we need to change "Jumbo" bit in MR1 */
925 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
926 /* This is to prevent starting RX channel in emac_rx_enable() */
927 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
928
929 dev->ndev->mtu = new_mtu;
930 emac_full_tx_reset(dev);
931 }
932
933 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
934 oom:
935 /* Restart RX */
936 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
937 dev->rx_slot = 0;
938 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
939 emac_rx_enable(dev);
940 emac_netif_start(dev);
941 mutex_unlock(&dev->link_lock);
942
943 return ret;
944}
945
946/* Process ctx, rtnl_lock semaphore */
947static int emac_change_mtu(struct net_device *ndev, int new_mtu)
948{
949 struct emac_instance *dev = netdev_priv(ndev);
950 int ret = 0;
951
952 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
953 return -EINVAL;
954
955 DBG(dev, "change_mtu(%d)" NL, new_mtu);
956
957 if (netif_running(ndev)) {
958 /* Check if we really need to reinitalize RX ring */
959 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
960 ret = emac_resize_rx_ring(dev, new_mtu);
961 }
962
963 if (!ret) {
964 ndev->mtu = new_mtu;
965 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
966 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
967 }
968
969 return ret;
970}
971
972static void emac_clean_tx_ring(struct emac_instance *dev)
973{
974 int i;
975
976 for (i = 0; i < NUM_TX_BUFF; ++i) {
977 if (dev->tx_skb[i]) {
978 dev_kfree_skb(dev->tx_skb[i]);
979 dev->tx_skb[i] = NULL;
980 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
981 ++dev->estats.tx_dropped;
982 }
983 dev->tx_desc[i].ctrl = 0;
984 dev->tx_desc[i].data_ptr = 0;
985 }
986}
987
988static void emac_clean_rx_ring(struct emac_instance *dev)
989{
990 int i;
991
992 for (i = 0; i < NUM_RX_BUFF; ++i)
993 if (dev->rx_skb[i]) {
994 dev->rx_desc[i].ctrl = 0;
995 dev_kfree_skb(dev->rx_skb[i]);
996 dev->rx_skb[i] = NULL;
997 dev->rx_desc[i].data_ptr = 0;
998 }
999
1000 if (dev->rx_sg_skb) {
1001 dev_kfree_skb(dev->rx_sg_skb);
1002 dev->rx_sg_skb = NULL;
1003 }
1004}
1005
1006static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1007 gfp_t flags)
1008{
1009 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1010 if (unlikely(!skb))
1011 return -ENOMEM;
1012
1013 dev->rx_skb[slot] = skb;
1014 dev->rx_desc[slot].data_len = 0;
1015
1016 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1017 dev->rx_desc[slot].data_ptr =
1018 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1019 DMA_FROM_DEVICE) + 2;
1020 wmb();
1021 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1022 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1023
1024 return 0;
1025}
1026
1027static void emac_print_link_status(struct emac_instance *dev)
1028{
1029 if (netif_carrier_ok(dev->ndev))
1030 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1031 dev->ndev->name, dev->phy.speed,
1032 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1033 dev->phy.pause ? ", pause enabled" :
1034 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1035 else
1036 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1037}
1038
1039/* Process ctx, rtnl_lock semaphore */
1040static int emac_open(struct net_device *ndev)
1041{
1042 struct emac_instance *dev = netdev_priv(ndev);
1043 int err, i;
1044
1045 DBG(dev, "open" NL);
1046
1047 /* Setup error IRQ handler */
1048 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1049 if (err) {
1050 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1051 ndev->name, dev->emac_irq);
1052 return err;
1053 }
1054
1055 /* Allocate RX ring */
1056 for (i = 0; i < NUM_RX_BUFF; ++i)
1057 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1058 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1059 ndev->name);
1060 goto oom;
1061 }
1062
1063 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1064 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1065 dev->rx_sg_skb = NULL;
1066
1067 mutex_lock(&dev->link_lock);
61dbcece 1068 dev->opened = 1;
1d3bb996 1069
61dbcece 1070 /* Start PHY polling now.
1d3bb996
DG
1071 */
1072 if (dev->phy.address >= 0) {
1073 int link_poll_interval;
1074 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1075 dev->phy.def->ops->read_link(&dev->phy);
1076 netif_carrier_on(dev->ndev);
1077 link_poll_interval = PHY_POLL_LINK_ON;
1078 } else {
1079 netif_carrier_off(dev->ndev);
1080 link_poll_interval = PHY_POLL_LINK_OFF;
1081 }
1082 dev->link_polling = 1;
1083 wmb();
1084 schedule_delayed_work(&dev->link_work, link_poll_interval);
1085 emac_print_link_status(dev);
1086 } else
1087 netif_carrier_on(dev->ndev);
1088
1089 emac_configure(dev);
1090 mal_poll_add(dev->mal, &dev->commac);
1091 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1092 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1093 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1094 emac_tx_enable(dev);
1095 emac_rx_enable(dev);
1096 emac_netif_start(dev);
1097
1098 mutex_unlock(&dev->link_lock);
1099
1100 return 0;
1101 oom:
1102 emac_clean_rx_ring(dev);
1103 free_irq(dev->emac_irq, dev);
1104
1105 return -ENOMEM;
1106}
1107
1108/* BHs disabled */
1109#if 0
1110static int emac_link_differs(struct emac_instance *dev)
1111{
1112 u32 r = in_be32(&dev->emacp->mr1);
1113
1114 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1115 int speed, pause, asym_pause;
1116
1117 if (r & EMAC_MR1_MF_1000)
1118 speed = SPEED_1000;
1119 else if (r & EMAC_MR1_MF_100)
1120 speed = SPEED_100;
1121 else
1122 speed = SPEED_10;
1123
1124 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1125 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1126 pause = 1;
1127 asym_pause = 0;
1128 break;
1129 case EMAC_MR1_APP:
1130 pause = 0;
1131 asym_pause = 1;
1132 break;
1133 default:
1134 pause = asym_pause = 0;
1135 }
1136 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1137 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1138}
1139#endif
1140
1141static void emac_link_timer(struct work_struct *work)
1142{
1143 struct emac_instance *dev =
1144 container_of((struct delayed_work *)work,
1145 struct emac_instance, link_work);
1146 int link_poll_interval;
1147
1148 mutex_lock(&dev->link_lock);
1d3bb996
DG
1149 DBG2(dev, "link timer" NL);
1150
61dbcece
BH
1151 if (!dev->opened)
1152 goto bail;
1153
1d3bb996
DG
1154 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1155 if (!netif_carrier_ok(dev->ndev)) {
1156 /* Get new link parameters */
1157 dev->phy.def->ops->read_link(&dev->phy);
1158
1159 netif_carrier_on(dev->ndev);
1160 emac_netif_stop(dev);
1161 emac_full_tx_reset(dev);
1162 emac_netif_start(dev);
1163 emac_print_link_status(dev);
1164 }
1165 link_poll_interval = PHY_POLL_LINK_ON;
1166 } else {
1167 if (netif_carrier_ok(dev->ndev)) {
1168 emac_reinitialize(dev);
1169 netif_carrier_off(dev->ndev);
1170 netif_tx_disable(dev->ndev);
1171 emac_print_link_status(dev);
1172 }
1173 link_poll_interval = PHY_POLL_LINK_OFF;
1174 }
1175 schedule_delayed_work(&dev->link_work, link_poll_interval);
61dbcece 1176 bail:
1d3bb996
DG
1177 mutex_unlock(&dev->link_lock);
1178}
1179
1180static void emac_force_link_update(struct emac_instance *dev)
1181{
1182 netif_carrier_off(dev->ndev);
61dbcece 1183 smp_rmb();
1d3bb996
DG
1184 if (dev->link_polling) {
1185 cancel_rearming_delayed_work(&dev->link_work);
1186 if (dev->link_polling)
1187 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1188 }
1189}
1190
1191/* Process ctx, rtnl_lock semaphore */
1192static int emac_close(struct net_device *ndev)
1193{
1194 struct emac_instance *dev = netdev_priv(ndev);
1195
1196 DBG(dev, "close" NL);
1197
61dbcece
BH
1198 if (dev->phy.address >= 0) {
1199 dev->link_polling = 0;
1d3bb996 1200 cancel_rearming_delayed_work(&dev->link_work);
61dbcece
BH
1201 }
1202 mutex_lock(&dev->link_lock);
1d3bb996 1203 emac_netif_stop(dev);
61dbcece
BH
1204 dev->opened = 0;
1205 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1206
1207 emac_rx_disable(dev);
1208 emac_tx_disable(dev);
1209 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1210 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1211 mal_poll_del(dev->mal, &dev->commac);
1212
1213 emac_clean_tx_ring(dev);
1214 emac_clean_rx_ring(dev);
1215
1216 free_irq(dev->emac_irq, dev);
1217
1218 return 0;
1219}
1220
1221static inline u16 emac_tx_csum(struct emac_instance *dev,
1222 struct sk_buff *skb)
1223{
1224 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1225 skb->ip_summed == CHECKSUM_PARTIAL)) {
1226 ++dev->stats.tx_packets_csum;
1227 return EMAC_TX_CTRL_TAH_CSUM;
1228 }
1229 return 0;
1230}
1231
1232static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1233{
1234 struct emac_regs __iomem *p = dev->emacp;
1235 struct net_device *ndev = dev->ndev;
1236
1237 /* Send the packet out. If the if makes a significant perf
1238 * difference, then we can store the TMR0 value in "dev"
1239 * instead
1240 */
1241 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1242 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1243 else
1244 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1245
1246 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1247 netif_stop_queue(ndev);
1248 DBG2(dev, "stopped TX queue" NL);
1249 }
1250
1251 ndev->trans_start = jiffies;
1252 ++dev->stats.tx_packets;
1253 dev->stats.tx_bytes += len;
1254
1255 return 0;
1256}
1257
1258/* Tx lock BH */
1259static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1260{
1261 struct emac_instance *dev = netdev_priv(ndev);
1262 unsigned int len = skb->len;
1263 int slot;
1264
1265 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1266 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1267
1268 slot = dev->tx_slot++;
1269 if (dev->tx_slot == NUM_TX_BUFF) {
1270 dev->tx_slot = 0;
1271 ctrl |= MAL_TX_CTRL_WRAP;
1272 }
1273
1274 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1275
1276 dev->tx_skb[slot] = skb;
1277 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1278 skb->data, len,
1279 DMA_TO_DEVICE);
1280 dev->tx_desc[slot].data_len = (u16) len;
1281 wmb();
1282 dev->tx_desc[slot].ctrl = ctrl;
1283
1284 return emac_xmit_finish(dev, len);
1285}
1286
1287#ifdef CONFIG_IBM_NEW_EMAC_TAH
1288static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1289 u32 pd, int len, int last, u16 base_ctrl)
1290{
1291 while (1) {
1292 u16 ctrl = base_ctrl;
1293 int chunk = min(len, MAL_MAX_TX_SIZE);
1294 len -= chunk;
1295
1296 slot = (slot + 1) % NUM_TX_BUFF;
1297
1298 if (last && !len)
1299 ctrl |= MAL_TX_CTRL_LAST;
1300 if (slot == NUM_TX_BUFF - 1)
1301 ctrl |= MAL_TX_CTRL_WRAP;
1302
1303 dev->tx_skb[slot] = NULL;
1304 dev->tx_desc[slot].data_ptr = pd;
1305 dev->tx_desc[slot].data_len = (u16) chunk;
1306 dev->tx_desc[slot].ctrl = ctrl;
1307 ++dev->tx_cnt;
1308
1309 if (!len)
1310 break;
1311
1312 pd += chunk;
1313 }
1314 return slot;
1315}
1316
1317/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1318static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1319{
1320 struct emac_instance *dev = netdev_priv(ndev);
1321 int nr_frags = skb_shinfo(skb)->nr_frags;
1322 int len = skb->len, chunk;
1323 int slot, i;
1324 u16 ctrl;
1325 u32 pd;
1326
1327 /* This is common "fast" path */
1328 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1329 return emac_start_xmit(skb, ndev);
1330
1331 len -= skb->data_len;
1332
1333 /* Note, this is only an *estimation*, we can still run out of empty
1334 * slots because of the additional fragmentation into
1335 * MAL_MAX_TX_SIZE-sized chunks
1336 */
1337 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1338 goto stop_queue;
1339
1340 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1341 emac_tx_csum(dev, skb);
1342 slot = dev->tx_slot;
1343
1344 /* skb data */
1345 dev->tx_skb[slot] = NULL;
1346 chunk = min(len, MAL_MAX_TX_SIZE);
1347 dev->tx_desc[slot].data_ptr = pd =
1348 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1349 dev->tx_desc[slot].data_len = (u16) chunk;
1350 len -= chunk;
1351 if (unlikely(len))
1352 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1353 ctrl);
1354 /* skb fragments */
1355 for (i = 0; i < nr_frags; ++i) {
1356 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1357 len = frag->size;
1358
1359 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1360 goto undo_frame;
1361
1362 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1363 DMA_TO_DEVICE);
1364
1365 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1366 ctrl);
1367 }
1368
1369 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1370
1371 /* Attach skb to the last slot so we don't release it too early */
1372 dev->tx_skb[slot] = skb;
1373
1374 /* Send the packet out */
1375 if (dev->tx_slot == NUM_TX_BUFF - 1)
1376 ctrl |= MAL_TX_CTRL_WRAP;
1377 wmb();
1378 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1379 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1380
1381 return emac_xmit_finish(dev, skb->len);
1382
1383 undo_frame:
1384 /* Well, too bad. Our previous estimation was overly optimistic.
1385 * Undo everything.
1386 */
1387 while (slot != dev->tx_slot) {
1388 dev->tx_desc[slot].ctrl = 0;
1389 --dev->tx_cnt;
1390 if (--slot < 0)
1391 slot = NUM_TX_BUFF - 1;
1392 }
1393 ++dev->estats.tx_undo;
1394
1395 stop_queue:
1396 netif_stop_queue(ndev);
1397 DBG2(dev, "stopped TX queue" NL);
1398 return 1;
1399}
1400#else
1401# define emac_start_xmit_sg emac_start_xmit
1402#endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1403
1404/* Tx lock BHs */
1405static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1406{
1407 struct emac_error_stats *st = &dev->estats;
1408
1409 DBG(dev, "BD TX error %04x" NL, ctrl);
1410
1411 ++st->tx_bd_errors;
1412 if (ctrl & EMAC_TX_ST_BFCS)
1413 ++st->tx_bd_bad_fcs;
1414 if (ctrl & EMAC_TX_ST_LCS)
1415 ++st->tx_bd_carrier_loss;
1416 if (ctrl & EMAC_TX_ST_ED)
1417 ++st->tx_bd_excessive_deferral;
1418 if (ctrl & EMAC_TX_ST_EC)
1419 ++st->tx_bd_excessive_collisions;
1420 if (ctrl & EMAC_TX_ST_LC)
1421 ++st->tx_bd_late_collision;
1422 if (ctrl & EMAC_TX_ST_MC)
1423 ++st->tx_bd_multple_collisions;
1424 if (ctrl & EMAC_TX_ST_SC)
1425 ++st->tx_bd_single_collision;
1426 if (ctrl & EMAC_TX_ST_UR)
1427 ++st->tx_bd_underrun;
1428 if (ctrl & EMAC_TX_ST_SQE)
1429 ++st->tx_bd_sqe;
1430}
1431
1432static void emac_poll_tx(void *param)
1433{
1434 struct emac_instance *dev = param;
1435 u32 bad_mask;
1436
1437 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1438
1439 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1440 bad_mask = EMAC_IS_BAD_TX_TAH;
1441 else
1442 bad_mask = EMAC_IS_BAD_TX;
1443
1444 netif_tx_lock_bh(dev->ndev);
1445 if (dev->tx_cnt) {
1446 u16 ctrl;
1447 int slot = dev->ack_slot, n = 0;
1448 again:
1449 ctrl = dev->tx_desc[slot].ctrl;
1450 if (!(ctrl & MAL_TX_CTRL_READY)) {
1451 struct sk_buff *skb = dev->tx_skb[slot];
1452 ++n;
1453
1454 if (skb) {
1455 dev_kfree_skb(skb);
1456 dev->tx_skb[slot] = NULL;
1457 }
1458 slot = (slot + 1) % NUM_TX_BUFF;
1459
1460 if (unlikely(ctrl & bad_mask))
1461 emac_parse_tx_error(dev, ctrl);
1462
1463 if (--dev->tx_cnt)
1464 goto again;
1465 }
1466 if (n) {
1467 dev->ack_slot = slot;
1468 if (netif_queue_stopped(dev->ndev) &&
1469 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1470 netif_wake_queue(dev->ndev);
1471
1472 DBG2(dev, "tx %d pkts" NL, n);
1473 }
1474 }
1475 netif_tx_unlock_bh(dev->ndev);
1476}
1477
1478static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1479 int len)
1480{
1481 struct sk_buff *skb = dev->rx_skb[slot];
1482
1483 DBG2(dev, "recycle %d %d" NL, slot, len);
1484
1485 if (len)
1486 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1487 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1488
1489 dev->rx_desc[slot].data_len = 0;
1490 wmb();
1491 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1492 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1493}
1494
1495static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1496{
1497 struct emac_error_stats *st = &dev->estats;
1498
1499 DBG(dev, "BD RX error %04x" NL, ctrl);
1500
1501 ++st->rx_bd_errors;
1502 if (ctrl & EMAC_RX_ST_OE)
1503 ++st->rx_bd_overrun;
1504 if (ctrl & EMAC_RX_ST_BP)
1505 ++st->rx_bd_bad_packet;
1506 if (ctrl & EMAC_RX_ST_RP)
1507 ++st->rx_bd_runt_packet;
1508 if (ctrl & EMAC_RX_ST_SE)
1509 ++st->rx_bd_short_event;
1510 if (ctrl & EMAC_RX_ST_AE)
1511 ++st->rx_bd_alignment_error;
1512 if (ctrl & EMAC_RX_ST_BFCS)
1513 ++st->rx_bd_bad_fcs;
1514 if (ctrl & EMAC_RX_ST_PTL)
1515 ++st->rx_bd_packet_too_long;
1516 if (ctrl & EMAC_RX_ST_ORE)
1517 ++st->rx_bd_out_of_range;
1518 if (ctrl & EMAC_RX_ST_IRE)
1519 ++st->rx_bd_in_range;
1520}
1521
1522static inline void emac_rx_csum(struct emac_instance *dev,
1523 struct sk_buff *skb, u16 ctrl)
1524{
1525#ifdef CONFIG_IBM_NEW_EMAC_TAH
1526 if (!ctrl && dev->tah_dev) {
1527 skb->ip_summed = CHECKSUM_UNNECESSARY;
1528 ++dev->stats.rx_packets_csum;
1529 }
1530#endif
1531}
1532
1533static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1534{
1535 if (likely(dev->rx_sg_skb != NULL)) {
1536 int len = dev->rx_desc[slot].data_len;
1537 int tot_len = dev->rx_sg_skb->len + len;
1538
1539 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1540 ++dev->estats.rx_dropped_mtu;
1541 dev_kfree_skb(dev->rx_sg_skb);
1542 dev->rx_sg_skb = NULL;
1543 } else {
bef1bc95 1544 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1545 dev->rx_skb[slot]->data, len);
1546 skb_put(dev->rx_sg_skb, len);
1547 emac_recycle_rx_skb(dev, slot, len);
1548 return 0;
1549 }
1550 }
1551 emac_recycle_rx_skb(dev, slot, 0);
1552 return -1;
1553}
1554
1555/* NAPI poll context */
1556static int emac_poll_rx(void *param, int budget)
1557{
1558 struct emac_instance *dev = param;
1559 int slot = dev->rx_slot, received = 0;
1560
1561 DBG2(dev, "poll_rx(%d)" NL, budget);
1562
1563 again:
1564 while (budget > 0) {
1565 int len;
1566 struct sk_buff *skb;
1567 u16 ctrl = dev->rx_desc[slot].ctrl;
1568
1569 if (ctrl & MAL_RX_CTRL_EMPTY)
1570 break;
1571
1572 skb = dev->rx_skb[slot];
1573 mb();
1574 len = dev->rx_desc[slot].data_len;
1575
1576 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1577 goto sg;
1578
1579 ctrl &= EMAC_BAD_RX_MASK;
1580 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1581 emac_parse_rx_error(dev, ctrl);
1582 ++dev->estats.rx_dropped_error;
1583 emac_recycle_rx_skb(dev, slot, 0);
1584 len = 0;
1585 goto next;
1586 }
1587
1588 if (len && len < EMAC_RX_COPY_THRESH) {
1589 struct sk_buff *copy_skb =
1590 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1591 if (unlikely(!copy_skb))
1592 goto oom;
1593
1594 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1595 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1596 len + 2);
1597 emac_recycle_rx_skb(dev, slot, len);
1598 skb = copy_skb;
1599 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1600 goto oom;
1601
1602 skb_put(skb, len);
1603 push_packet:
1604 skb->dev = dev->ndev;
1605 skb->protocol = eth_type_trans(skb, dev->ndev);
1606 emac_rx_csum(dev, skb, ctrl);
1607
1608 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1609 ++dev->estats.rx_dropped_stack;
1610 next:
1611 ++dev->stats.rx_packets;
1612 skip:
1613 dev->stats.rx_bytes += len;
1614 slot = (slot + 1) % NUM_RX_BUFF;
1615 --budget;
1616 ++received;
1617 continue;
1618 sg:
1619 if (ctrl & MAL_RX_CTRL_FIRST) {
1620 BUG_ON(dev->rx_sg_skb);
1621 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1622 DBG(dev, "rx OOM %d" NL, slot);
1623 ++dev->estats.rx_dropped_oom;
1624 emac_recycle_rx_skb(dev, slot, 0);
1625 } else {
1626 dev->rx_sg_skb = skb;
1627 skb_put(skb, len);
1628 }
1629 } else if (!emac_rx_sg_append(dev, slot) &&
1630 (ctrl & MAL_RX_CTRL_LAST)) {
1631
1632 skb = dev->rx_sg_skb;
1633 dev->rx_sg_skb = NULL;
1634
1635 ctrl &= EMAC_BAD_RX_MASK;
1636 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1637 emac_parse_rx_error(dev, ctrl);
1638 ++dev->estats.rx_dropped_error;
1639 dev_kfree_skb(skb);
1640 len = 0;
1641 } else
1642 goto push_packet;
1643 }
1644 goto skip;
1645 oom:
1646 DBG(dev, "rx OOM %d" NL, slot);
1647 /* Drop the packet and recycle skb */
1648 ++dev->estats.rx_dropped_oom;
1649 emac_recycle_rx_skb(dev, slot, 0);
1650 goto next;
1651 }
1652
1653 if (received) {
1654 DBG2(dev, "rx %d BDs" NL, received);
1655 dev->rx_slot = slot;
1656 }
1657
1658 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1659 mb();
1660 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1661 DBG2(dev, "rx restart" NL);
1662 received = 0;
1663 goto again;
1664 }
1665
1666 if (dev->rx_sg_skb) {
1667 DBG2(dev, "dropping partial rx packet" NL);
1668 ++dev->estats.rx_dropped_error;
1669 dev_kfree_skb(dev->rx_sg_skb);
1670 dev->rx_sg_skb = NULL;
1671 }
1672
1673 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1674 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1675 emac_rx_enable(dev);
1676 dev->rx_slot = 0;
1677 }
1678 return received;
1679}
1680
1681/* NAPI poll context */
1682static int emac_peek_rx(void *param)
1683{
1684 struct emac_instance *dev = param;
1685
1686 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1687}
1688
1689/* NAPI poll context */
1690static int emac_peek_rx_sg(void *param)
1691{
1692 struct emac_instance *dev = param;
1693
1694 int slot = dev->rx_slot;
1695 while (1) {
1696 u16 ctrl = dev->rx_desc[slot].ctrl;
1697 if (ctrl & MAL_RX_CTRL_EMPTY)
1698 return 0;
1699 else if (ctrl & MAL_RX_CTRL_LAST)
1700 return 1;
1701
1702 slot = (slot + 1) % NUM_RX_BUFF;
1703
1704 /* I'm just being paranoid here :) */
1705 if (unlikely(slot == dev->rx_slot))
1706 return 0;
1707 }
1708}
1709
1710/* Hard IRQ */
1711static void emac_rxde(void *param)
1712{
1713 struct emac_instance *dev = param;
1714
1715 ++dev->estats.rx_stopped;
1716 emac_rx_disable_async(dev);
1717}
1718
1719/* Hard IRQ */
1720static irqreturn_t emac_irq(int irq, void *dev_instance)
1721{
1722 struct emac_instance *dev = dev_instance;
1723 struct emac_regs __iomem *p = dev->emacp;
1724 struct emac_error_stats *st = &dev->estats;
1725 u32 isr;
1726
1727 spin_lock(&dev->lock);
1728
1729 isr = in_be32(&p->isr);
1730 out_be32(&p->isr, isr);
1731
1732 DBG(dev, "isr = %08x" NL, isr);
1733
1734 if (isr & EMAC4_ISR_TXPE)
1735 ++st->tx_parity;
1736 if (isr & EMAC4_ISR_RXPE)
1737 ++st->rx_parity;
1738 if (isr & EMAC4_ISR_TXUE)
1739 ++st->tx_underrun;
1740 if (isr & EMAC4_ISR_RXOE)
1741 ++st->rx_fifo_overrun;
1742 if (isr & EMAC_ISR_OVR)
1743 ++st->rx_overrun;
1744 if (isr & EMAC_ISR_BP)
1745 ++st->rx_bad_packet;
1746 if (isr & EMAC_ISR_RP)
1747 ++st->rx_runt_packet;
1748 if (isr & EMAC_ISR_SE)
1749 ++st->rx_short_event;
1750 if (isr & EMAC_ISR_ALE)
1751 ++st->rx_alignment_error;
1752 if (isr & EMAC_ISR_BFCS)
1753 ++st->rx_bad_fcs;
1754 if (isr & EMAC_ISR_PTLE)
1755 ++st->rx_packet_too_long;
1756 if (isr & EMAC_ISR_ORE)
1757 ++st->rx_out_of_range;
1758 if (isr & EMAC_ISR_IRE)
1759 ++st->rx_in_range;
1760 if (isr & EMAC_ISR_SQE)
1761 ++st->tx_sqe;
1762 if (isr & EMAC_ISR_TE)
1763 ++st->tx_errors;
1764
1765 spin_unlock(&dev->lock);
1766
1767 return IRQ_HANDLED;
1768}
1769
1770static struct net_device_stats *emac_stats(struct net_device *ndev)
1771{
1772 struct emac_instance *dev = netdev_priv(ndev);
1773 struct emac_stats *st = &dev->stats;
1774 struct emac_error_stats *est = &dev->estats;
1775 struct net_device_stats *nst = &dev->nstats;
1776 unsigned long flags;
1777
1778 DBG2(dev, "stats" NL);
1779
1780 /* Compute "legacy" statistics */
1781 spin_lock_irqsave(&dev->lock, flags);
1782 nst->rx_packets = (unsigned long)st->rx_packets;
1783 nst->rx_bytes = (unsigned long)st->rx_bytes;
1784 nst->tx_packets = (unsigned long)st->tx_packets;
1785 nst->tx_bytes = (unsigned long)st->tx_bytes;
1786 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1787 est->rx_dropped_error +
1788 est->rx_dropped_resize +
1789 est->rx_dropped_mtu);
1790 nst->tx_dropped = (unsigned long)est->tx_dropped;
1791
1792 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1793 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1794 est->rx_fifo_overrun +
1795 est->rx_overrun);
1796 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1797 est->rx_alignment_error);
1798 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1799 est->rx_bad_fcs);
1800 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1801 est->rx_bd_short_event +
1802 est->rx_bd_packet_too_long +
1803 est->rx_bd_out_of_range +
1804 est->rx_bd_in_range +
1805 est->rx_runt_packet +
1806 est->rx_short_event +
1807 est->rx_packet_too_long +
1808 est->rx_out_of_range +
1809 est->rx_in_range);
1810
1811 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1812 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1813 est->tx_underrun);
1814 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1815 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1816 est->tx_bd_excessive_collisions +
1817 est->tx_bd_late_collision +
1818 est->tx_bd_multple_collisions);
1819 spin_unlock_irqrestore(&dev->lock, flags);
1820 return nst;
1821}
1822
1823static struct mal_commac_ops emac_commac_ops = {
1824 .poll_tx = &emac_poll_tx,
1825 .poll_rx = &emac_poll_rx,
1826 .peek_rx = &emac_peek_rx,
1827 .rxde = &emac_rxde,
1828};
1829
1830static struct mal_commac_ops emac_commac_sg_ops = {
1831 .poll_tx = &emac_poll_tx,
1832 .poll_rx = &emac_poll_rx,
1833 .peek_rx = &emac_peek_rx_sg,
1834 .rxde = &emac_rxde,
1835};
1836
1837/* Ethtool support */
1838static int emac_ethtool_get_settings(struct net_device *ndev,
1839 struct ethtool_cmd *cmd)
1840{
1841 struct emac_instance *dev = netdev_priv(ndev);
1842
1843 cmd->supported = dev->phy.features;
1844 cmd->port = PORT_MII;
1845 cmd->phy_address = dev->phy.address;
1846 cmd->transceiver =
1847 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1848
1849 mutex_lock(&dev->link_lock);
1850 cmd->advertising = dev->phy.advertising;
1851 cmd->autoneg = dev->phy.autoneg;
1852 cmd->speed = dev->phy.speed;
1853 cmd->duplex = dev->phy.duplex;
1854 mutex_unlock(&dev->link_lock);
1855
1856 return 0;
1857}
1858
1859static int emac_ethtool_set_settings(struct net_device *ndev,
1860 struct ethtool_cmd *cmd)
1861{
1862 struct emac_instance *dev = netdev_priv(ndev);
1863 u32 f = dev->phy.features;
1864
1865 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1866 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1867
1868 /* Basic sanity checks */
1869 if (dev->phy.address < 0)
1870 return -EOPNOTSUPP;
1871 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1872 return -EINVAL;
1873 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1874 return -EINVAL;
1875 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1876 return -EINVAL;
1877
1878 if (cmd->autoneg == AUTONEG_DISABLE) {
1879 switch (cmd->speed) {
1880 case SPEED_10:
1881 if (cmd->duplex == DUPLEX_HALF
1882 && !(f & SUPPORTED_10baseT_Half))
1883 return -EINVAL;
1884 if (cmd->duplex == DUPLEX_FULL
1885 && !(f & SUPPORTED_10baseT_Full))
1886 return -EINVAL;
1887 break;
1888 case SPEED_100:
1889 if (cmd->duplex == DUPLEX_HALF
1890 && !(f & SUPPORTED_100baseT_Half))
1891 return -EINVAL;
1892 if (cmd->duplex == DUPLEX_FULL
1893 && !(f & SUPPORTED_100baseT_Full))
1894 return -EINVAL;
1895 break;
1896 case SPEED_1000:
1897 if (cmd->duplex == DUPLEX_HALF
1898 && !(f & SUPPORTED_1000baseT_Half))
1899 return -EINVAL;
1900 if (cmd->duplex == DUPLEX_FULL
1901 && !(f & SUPPORTED_1000baseT_Full))
1902 return -EINVAL;
1903 break;
1904 default:
1905 return -EINVAL;
1906 }
1907
1908 mutex_lock(&dev->link_lock);
1909 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1910 cmd->duplex);
1911 mutex_unlock(&dev->link_lock);
1912
1913 } else {
1914 if (!(f & SUPPORTED_Autoneg))
1915 return -EINVAL;
1916
1917 mutex_lock(&dev->link_lock);
1918 dev->phy.def->ops->setup_aneg(&dev->phy,
1919 (cmd->advertising & f) |
1920 (dev->phy.advertising &
1921 (ADVERTISED_Pause |
1922 ADVERTISED_Asym_Pause)));
1923 mutex_unlock(&dev->link_lock);
1924 }
1925 emac_force_link_update(dev);
1926
1927 return 0;
1928}
1929
1930static void emac_ethtool_get_ringparam(struct net_device *ndev,
1931 struct ethtool_ringparam *rp)
1932{
1933 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1934 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1935}
1936
1937static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1938 struct ethtool_pauseparam *pp)
1939{
1940 struct emac_instance *dev = netdev_priv(ndev);
1941
1942 mutex_lock(&dev->link_lock);
1943 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1944 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1945 pp->autoneg = 1;
1946
1947 if (dev->phy.duplex == DUPLEX_FULL) {
1948 if (dev->phy.pause)
1949 pp->rx_pause = pp->tx_pause = 1;
1950 else if (dev->phy.asym_pause)
1951 pp->tx_pause = 1;
1952 }
1953 mutex_unlock(&dev->link_lock);
1954}
1955
1956static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1957{
1958 struct emac_instance *dev = netdev_priv(ndev);
1959
eb4d84f1 1960 return dev->tah_dev != NULL;
1d3bb996
DG
1961}
1962
1963static int emac_get_regs_len(struct emac_instance *dev)
1964{
1965 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1966 return sizeof(struct emac_ethtool_regs_subhdr) +
1967 EMAC4_ETHTOOL_REGS_SIZE;
1968 else
1969 return sizeof(struct emac_ethtool_regs_subhdr) +
1970 EMAC_ETHTOOL_REGS_SIZE;
1971}
1972
1973static int emac_ethtool_get_regs_len(struct net_device *ndev)
1974{
1975 struct emac_instance *dev = netdev_priv(ndev);
1976 int size;
1977
1978 size = sizeof(struct emac_ethtool_regs_hdr) +
1979 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1980 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1981 size += zmii_get_regs_len(dev->zmii_dev);
1982 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1983 size += rgmii_get_regs_len(dev->rgmii_dev);
1984 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1985 size += tah_get_regs_len(dev->tah_dev);
1986
1987 return size;
1988}
1989
1990static void *emac_dump_regs(struct emac_instance *dev, void *buf)
1991{
1992 struct emac_ethtool_regs_subhdr *hdr = buf;
1993
1994 hdr->index = dev->cell_index;
1995 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
1996 hdr->version = EMAC4_ETHTOOL_REGS_VER;
1997 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
1998 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
1999 } else {
2000 hdr->version = EMAC_ETHTOOL_REGS_VER;
2001 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2002 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2003 }
2004}
2005
2006static void emac_ethtool_get_regs(struct net_device *ndev,
2007 struct ethtool_regs *regs, void *buf)
2008{
2009 struct emac_instance *dev = netdev_priv(ndev);
2010 struct emac_ethtool_regs_hdr *hdr = buf;
2011
2012 hdr->components = 0;
2013 buf = hdr + 1;
2014
2015 buf = mal_dump_regs(dev->mal, buf);
2016 buf = emac_dump_regs(dev, buf);
2017 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2018 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2019 buf = zmii_dump_regs(dev->zmii_dev, buf);
2020 }
2021 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2022 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2023 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2024 }
2025 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2026 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2027 buf = tah_dump_regs(dev->tah_dev, buf);
2028 }
2029}
2030
2031static int emac_ethtool_nway_reset(struct net_device *ndev)
2032{
2033 struct emac_instance *dev = netdev_priv(ndev);
2034 int res = 0;
2035
2036 DBG(dev, "nway_reset" NL);
2037
2038 if (dev->phy.address < 0)
2039 return -EOPNOTSUPP;
2040
2041 mutex_lock(&dev->link_lock);
2042 if (!dev->phy.autoneg) {
2043 res = -EINVAL;
2044 goto out;
2045 }
2046
2047 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2048 out:
2049 mutex_unlock(&dev->link_lock);
2050 emac_force_link_update(dev);
2051 return res;
2052}
2053
2054static int emac_ethtool_get_stats_count(struct net_device *ndev)
2055{
2056 return EMAC_ETHTOOL_STATS_COUNT;
2057}
2058
2059static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2060 u8 * buf)
2061{
2062 if (stringset == ETH_SS_STATS)
2063 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2064}
2065
2066static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2067 struct ethtool_stats *estats,
2068 u64 * tmp_stats)
2069{
2070 struct emac_instance *dev = netdev_priv(ndev);
2071
2072 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2073 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2074 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2075}
2076
2077static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2078 struct ethtool_drvinfo *info)
2079{
2080 struct emac_instance *dev = netdev_priv(ndev);
2081
2082 strcpy(info->driver, "ibm_emac");
2083 strcpy(info->version, DRV_VERSION);
2084 info->fw_version[0] = '\0';
2085 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2086 dev->cell_index, dev->ofdev->node->full_name);
2087 info->n_stats = emac_ethtool_get_stats_count(ndev);
2088 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2089}
2090
2091static const struct ethtool_ops emac_ethtool_ops = {
2092 .get_settings = emac_ethtool_get_settings,
2093 .set_settings = emac_ethtool_set_settings,
2094 .get_drvinfo = emac_ethtool_get_drvinfo,
2095
2096 .get_regs_len = emac_ethtool_get_regs_len,
2097 .get_regs = emac_ethtool_get_regs,
2098
2099 .nway_reset = emac_ethtool_nway_reset,
2100
2101 .get_ringparam = emac_ethtool_get_ringparam,
2102 .get_pauseparam = emac_ethtool_get_pauseparam,
2103
2104 .get_rx_csum = emac_ethtool_get_rx_csum,
2105
2106 .get_strings = emac_ethtool_get_strings,
2107 .get_stats_count = emac_ethtool_get_stats_count,
2108 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2109
2110 .get_link = ethtool_op_get_link,
2111 .get_tx_csum = ethtool_op_get_tx_csum,
2112 .get_sg = ethtool_op_get_sg,
2113};
2114
2115static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2116{
2117 struct emac_instance *dev = netdev_priv(ndev);
2118 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2119
2120 DBG(dev, "ioctl %08x" NL, cmd);
2121
2122 if (dev->phy.address < 0)
2123 return -EOPNOTSUPP;
2124
2125 switch (cmd) {
2126 case SIOCGMIIPHY:
2127 case SIOCDEVPRIVATE:
2128 data[0] = dev->phy.address;
2129 /* Fall through */
2130 case SIOCGMIIREG:
2131 case SIOCDEVPRIVATE + 1:
2132 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2133 return 0;
2134
2135 case SIOCSMIIREG:
2136 case SIOCDEVPRIVATE + 2:
2137 if (!capable(CAP_NET_ADMIN))
2138 return -EPERM;
2139 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2140 return 0;
2141 default:
2142 return -EOPNOTSUPP;
2143 }
2144}
2145
2146struct emac_depentry {
2147 u32 phandle;
2148 struct device_node *node;
2149 struct of_device *ofdev;
2150 void *drvdata;
2151};
2152
2153#define EMAC_DEP_MAL_IDX 0
2154#define EMAC_DEP_ZMII_IDX 1
2155#define EMAC_DEP_RGMII_IDX 2
2156#define EMAC_DEP_TAH_IDX 3
2157#define EMAC_DEP_MDIO_IDX 4
2158#define EMAC_DEP_PREV_IDX 5
2159#define EMAC_DEP_COUNT 6
2160
2161static int __devinit emac_check_deps(struct emac_instance *dev,
2162 struct emac_depentry *deps)
2163{
2164 int i, there = 0;
2165 struct device_node *np;
2166
2167 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2168 /* no dependency on that item, allright */
2169 if (deps[i].phandle == 0) {
2170 there++;
2171 continue;
2172 }
2173 /* special case for blist as the dependency might go away */
2174 if (i == EMAC_DEP_PREV_IDX) {
2175 np = *(dev->blist - 1);
2176 if (np == NULL) {
2177 deps[i].phandle = 0;
2178 there++;
2179 continue;
2180 }
2181 if (deps[i].node == NULL)
2182 deps[i].node = of_node_get(np);
2183 }
2184 if (deps[i].node == NULL)
2185 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2186 if (deps[i].node == NULL)
2187 continue;
2188 if (deps[i].ofdev == NULL)
2189 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2190 if (deps[i].ofdev == NULL)
2191 continue;
2192 if (deps[i].drvdata == NULL)
2193 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2194 if (deps[i].drvdata != NULL)
2195 there++;
2196 }
2197 return (there == EMAC_DEP_COUNT);
2198}
2199
2200static void emac_put_deps(struct emac_instance *dev)
2201{
2202 if (dev->mal_dev)
2203 of_dev_put(dev->mal_dev);
2204 if (dev->zmii_dev)
2205 of_dev_put(dev->zmii_dev);
2206 if (dev->rgmii_dev)
2207 of_dev_put(dev->rgmii_dev);
2208 if (dev->mdio_dev)
2209 of_dev_put(dev->mdio_dev);
2210 if (dev->tah_dev)
2211 of_dev_put(dev->tah_dev);
2212}
2213
2214static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2215 unsigned long action, void *data)
2216{
2217 /* We are only intereted in device addition */
2218 if (action == BUS_NOTIFY_BOUND_DRIVER)
2219 wake_up_all(&emac_probe_wait);
2220 return 0;
2221}
2222
2223static struct notifier_block emac_of_bus_notifier = {
2224 .notifier_call = emac_of_bus_notify
2225};
2226
2227static int __devinit emac_wait_deps(struct emac_instance *dev)
2228{
2229 struct emac_depentry deps[EMAC_DEP_COUNT];
2230 int i, err;
2231
2232 memset(&deps, 0, sizeof(deps));
2233
2234 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2235 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2236 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2237 if (dev->tah_ph)
2238 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2239 if (dev->mdio_ph)
2240 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2241 if (dev->blist && dev->blist > emac_boot_list)
2242 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2243 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2244 wait_event_timeout(emac_probe_wait,
2245 emac_check_deps(dev, deps),
2246 EMAC_PROBE_DEP_TIMEOUT);
2247 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2248 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2249 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2250 if (deps[i].node)
2251 of_node_put(deps[i].node);
2252 if (err && deps[i].ofdev)
2253 of_dev_put(deps[i].ofdev);
2254 }
2255 if (err == 0) {
2256 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2257 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2258 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2259 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2260 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2261 }
2262 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2263 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2264 return err;
2265}
2266
2267static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2268 u32 *val, int fatal)
2269{
2270 int len;
2271 const u32 *prop = of_get_property(np, name, &len);
2272 if (prop == NULL || len < sizeof(u32)) {
2273 if (fatal)
2274 printk(KERN_ERR "%s: missing %s property\n",
2275 np->full_name, name);
2276 return -ENODEV;
2277 }
2278 *val = *prop;
2279 return 0;
2280}
2281
2282static int __devinit emac_init_phy(struct emac_instance *dev)
2283{
2284 struct device_node *np = dev->ofdev->node;
2285 struct net_device *ndev = dev->ndev;
2286 u32 phy_map, adv;
2287 int i;
2288
2289 dev->phy.dev = ndev;
2290 dev->phy.mode = dev->phy_mode;
2291
2292 /* PHY-less configuration.
2293 * XXX I probably should move these settings to the dev tree
2294 */
2295 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2296 emac_reset(dev);
2297
2298 /* PHY-less configuration.
2299 * XXX I probably should move these settings to the dev tree
2300 */
2301 dev->phy.address = -1;
2302 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2303 dev->phy.pause = 1;
2304
2305 return 0;
2306 }
2307
2308 mutex_lock(&emac_phy_map_lock);
2309 phy_map = dev->phy_map | busy_phy_map;
2310
2311 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2312
2313 dev->phy.mdio_read = emac_mdio_read;
2314 dev->phy.mdio_write = emac_mdio_write;
2315
2316 /* Configure EMAC with defaults so we can at least use MDIO
2317 * This is needed mostly for 440GX
2318 */
2319 if (emac_phy_gpcs(dev->phy.mode)) {
2320 /* XXX
2321 * Make GPCS PHY address equal to EMAC index.
2322 * We probably should take into account busy_phy_map
2323 * and/or phy_map here.
2324 *
2325 * Note that the busy_phy_map is currently global
2326 * while it should probably be per-ASIC...
2327 */
2328 dev->phy.address = dev->cell_index;
2329 }
2330
2331 emac_configure(dev);
2332
2333 if (dev->phy_address != 0xffffffff)
2334 phy_map = ~(1 << dev->phy_address);
2335
2336 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2337 if (!(phy_map & 1)) {
2338 int r;
2339 busy_phy_map |= 1 << i;
2340
2341 /* Quick check if there is a PHY at the address */
2342 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2343 if (r == 0xffff || r < 0)
2344 continue;
2345 if (!emac_mii_phy_probe(&dev->phy, i))
2346 break;
2347 }
2348 mutex_unlock(&emac_phy_map_lock);
2349 if (i == 0x20) {
2350 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2351 return -ENXIO;
2352 }
2353
2354 /* Init PHY */
2355 if (dev->phy.def->ops->init)
2356 dev->phy.def->ops->init(&dev->phy);
2357
2358 /* Disable any PHY features not supported by the platform */
2359 dev->phy.def->features &= ~dev->phy_feat_exc;
2360
2361 /* Setup initial link parameters */
2362 if (dev->phy.features & SUPPORTED_Autoneg) {
2363 adv = dev->phy.features;
2364 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2365 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2366 /* Restart autonegotiation */
2367 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2368 } else {
2369 u32 f = dev->phy.def->features;
2370 int speed = SPEED_10, fd = DUPLEX_HALF;
2371
2372 /* Select highest supported speed/duplex */
2373 if (f & SUPPORTED_1000baseT_Full) {
2374 speed = SPEED_1000;
2375 fd = DUPLEX_FULL;
2376 } else if (f & SUPPORTED_1000baseT_Half)
2377 speed = SPEED_1000;
2378 else if (f & SUPPORTED_100baseT_Full) {
2379 speed = SPEED_100;
2380 fd = DUPLEX_FULL;
2381 } else if (f & SUPPORTED_100baseT_Half)
2382 speed = SPEED_100;
2383 else if (f & SUPPORTED_10baseT_Full)
2384 fd = DUPLEX_FULL;
2385
2386 /* Force link parameters */
2387 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2388 }
2389 return 0;
2390}
2391
2392static int __devinit emac_init_config(struct emac_instance *dev)
2393{
2394 struct device_node *np = dev->ofdev->node;
2395 const void *p;
2396 unsigned int plen;
2397 const char *pm, *phy_modes[] = {
2398 [PHY_MODE_NA] = "",
2399 [PHY_MODE_MII] = "mii",
2400 [PHY_MODE_RMII] = "rmii",
2401 [PHY_MODE_SMII] = "smii",
2402 [PHY_MODE_RGMII] = "rgmii",
2403 [PHY_MODE_TBI] = "tbi",
2404 [PHY_MODE_GMII] = "gmii",
2405 [PHY_MODE_RTBI] = "rtbi",
2406 [PHY_MODE_SGMII] = "sgmii",
2407 };
2408
2409 /* Read config from device-tree */
2410 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2411 return -ENXIO;
2412 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2413 return -ENXIO;
2414 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2415 return -ENXIO;
2416 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2417 return -ENXIO;
2418 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2419 dev->max_mtu = 1500;
2420 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2421 dev->rx_fifo_size = 2048;
2422 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2423 dev->tx_fifo_size = 2048;
2424 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2425 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2426 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2427 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2428 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2429 dev->phy_address = 0xffffffff;
2430 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2431 dev->phy_map = 0xffffffff;
2432 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2433 return -ENXIO;
2434 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2435 dev->tah_ph = 0;
2436 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2437 dev->tah_ph = 0;
2438 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2439 dev->mdio_ph = 0;
2440 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2441 dev->zmii_ph = 0;;
2442 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2443 dev->zmii_port = 0xffffffff;;
2444 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2445 dev->rgmii_ph = 0;;
2446 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2447 dev->rgmii_port = 0xffffffff;;
2448 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2449 dev->fifo_entry_size = 16;
2450 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2451 dev->mal_burst_size = 256;
2452
2453 /* PHY mode needs some decoding */
2454 dev->phy_mode = PHY_MODE_NA;
2455 pm = of_get_property(np, "phy-mode", &plen);
2456 if (pm != NULL) {
2457 int i;
2458 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2459 if (!strcasecmp(pm, phy_modes[i])) {
2460 dev->phy_mode = i;
2461 break;
2462 }
2463 }
2464
2465 /* Backward compat with non-final DT */
2466 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2467 u32 nmode = *(const u32 *)pm;
2468 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2469 dev->phy_mode = nmode;
2470 }
2471
2472 /* Check EMAC version */
2473 if (of_device_is_compatible(np, "ibm,emac4"))
2474 dev->features |= EMAC_FTR_EMAC4;
2475 if (of_device_is_compatible(np, "ibm,emac-axon")
2476 || of_device_is_compatible(np, "ibm,emac-440epx"))
2477 dev->features |= EMAC_FTR_HAS_AXON_STACR
2478 | EMAC_FTR_STACR_OC_INVERT;
2479 if (of_device_is_compatible(np, "ibm,emac-440spe"))
2480 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2481
2482 /* Fixup some feature bits based on the device tree and verify
2483 * we have support for them compiled in
2484 */
2485 if (dev->tah_ph != 0) {
2486#ifdef CONFIG_IBM_NEW_EMAC_TAH
2487 dev->features |= EMAC_FTR_HAS_TAH;
2488#else
2489 printk(KERN_ERR "%s: TAH support not enabled !\n",
2490 np->full_name);
2491 return -ENXIO;
2492#endif
2493 }
2494
2495 if (dev->zmii_ph != 0) {
2496#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2497 dev->features |= EMAC_FTR_HAS_ZMII;
2498#else
2499 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2500 np->full_name);
2501 return -ENXIO;
2502#endif
2503 }
2504
2505 if (dev->rgmii_ph != 0) {
2506#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2507 dev->features |= EMAC_FTR_HAS_RGMII;
2508#else
2509 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2510 np->full_name);
2511 return -ENXIO;
2512#endif
2513 }
2514
2515 /* Read MAC-address */
2516 p = of_get_property(np, "local-mac-address", NULL);
2517 if (p == NULL) {
2518 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2519 np->full_name);
2520 return -ENXIO;
2521 }
2522 memcpy(dev->ndev->dev_addr, p, 6);
2523
2524 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2525 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2526 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2527 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2528 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2529
2530 return 0;
2531}
2532
2533static int __devinit emac_probe(struct of_device *ofdev,
2534 const struct of_device_id *match)
2535{
2536 struct net_device *ndev;
2537 struct emac_instance *dev;
2538 struct device_node *np = ofdev->node;
2539 struct device_node **blist = NULL;
2540 int err, i;
2541
2542 /* Find ourselves in the bootlist if we are there */
2543 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2544 if (emac_boot_list[i] == np)
2545 blist = &emac_boot_list[i];
2546
2547 /* Allocate our net_device structure */
2548 err = -ENOMEM;
2549 ndev = alloc_etherdev(sizeof(struct emac_instance));
2550 if (!ndev) {
2551 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2552 np->full_name);
2553 goto err_gone;
2554 }
2555 dev = netdev_priv(ndev);
2556 dev->ndev = ndev;
2557 dev->ofdev = ofdev;
2558 dev->blist = blist;
1d3bb996
DG
2559 SET_NETDEV_DEV(ndev, &ofdev->dev);
2560
2561 /* Initialize some embedded data structures */
2562 mutex_init(&dev->mdio_lock);
2563 mutex_init(&dev->link_lock);
2564 spin_lock_init(&dev->lock);
2565 INIT_WORK(&dev->reset_work, emac_reset_work);
2566
2567 /* Init various config data based on device-tree */
2568 err = emac_init_config(dev);
2569 if (err != 0)
2570 goto err_free;
2571
2572 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2573 dev->emac_irq = irq_of_parse_and_map(np, 0);
2574 dev->wol_irq = irq_of_parse_and_map(np, 1);
2575 if (dev->emac_irq == NO_IRQ) {
2576 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2577 goto err_free;
2578 }
2579 ndev->irq = dev->emac_irq;
2580
2581 /* Map EMAC regs */
2582 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2583 printk(KERN_ERR "%s: Can't get registers address\n",
2584 np->full_name);
2585 goto err_irq_unmap;
2586 }
2587 // TODO : request_mem_region
2588 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2589 if (dev->emacp == NULL) {
2590 printk(KERN_ERR "%s: Can't map device registers!\n",
2591 np->full_name);
2592 err = -ENOMEM;
2593 goto err_irq_unmap;
2594 }
2595
2596 /* Wait for dependent devices */
2597 err = emac_wait_deps(dev);
2598 if (err) {
2599 printk(KERN_ERR
2600 "%s: Timeout waiting for dependent devices\n",
2601 np->full_name);
2602 /* display more info about what's missing ? */
2603 goto err_reg_unmap;
2604 }
2605 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2606 if (dev->mdio_dev != NULL)
2607 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2608
2609 /* Register with MAL */
2610 dev->commac.ops = &emac_commac_ops;
2611 dev->commac.dev = dev;
2612 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2613 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2614 err = mal_register_commac(dev->mal, &dev->commac);
2615 if (err) {
2616 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2617 np->full_name, dev->mal_dev->node->full_name);
2618 goto err_rel_deps;
2619 }
2620 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2621 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2622
2623 /* Get pointers to BD rings */
2624 dev->tx_desc =
2625 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2626 dev->rx_desc =
2627 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2628
2629 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2630 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2631
2632 /* Clean rings */
2633 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2634 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2635
2636 /* Attach to ZMII, if needed */
2637 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2638 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2639 goto err_unreg_commac;
2640
2641 /* Attach to RGMII, if needed */
2642 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2643 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2644 goto err_detach_zmii;
2645
2646 /* Attach to TAH, if needed */
2647 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2648 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2649 goto err_detach_rgmii;
2650
2651 /* Set some link defaults before we can find out real parameters */
2652 dev->phy.speed = SPEED_100;
2653 dev->phy.duplex = DUPLEX_FULL;
2654 dev->phy.autoneg = AUTONEG_DISABLE;
2655 dev->phy.pause = dev->phy.asym_pause = 0;
2656 dev->stop_timeout = STOP_TIMEOUT_100;
2657 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2658
2659 /* Find PHY if any */
2660 err = emac_init_phy(dev);
2661 if (err != 0)
2662 goto err_detach_tah;
2663
2664 /* Fill in the driver function table */
2665 ndev->open = &emac_open;
2666#ifdef CONFIG_IBM_NEW_EMAC_TAH
2667 if (dev->tah_dev) {
2668 ndev->hard_start_xmit = &emac_start_xmit_sg;
2669 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2670 } else
2671#endif
2672 ndev->hard_start_xmit = &emac_start_xmit;
2673 ndev->tx_timeout = &emac_tx_timeout;
2674 ndev->watchdog_timeo = 5 * HZ;
2675 ndev->stop = &emac_close;
2676 ndev->get_stats = &emac_stats;
2677 ndev->set_multicast_list = &emac_set_multicast_list;
2678 ndev->do_ioctl = &emac_ioctl;
2679 if (emac_phy_supports_gige(dev->phy_mode)) {
2680 ndev->change_mtu = &emac_change_mtu;
2681 dev->commac.ops = &emac_commac_sg_ops;
2682 }
2683 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2684
2685 netif_carrier_off(ndev);
2686 netif_stop_queue(ndev);
2687
2688 err = register_netdev(ndev);
2689 if (err) {
2690 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2691 np->full_name, err);
2692 goto err_detach_tah;
2693 }
2694
2695 /* Set our drvdata last as we don't want them visible until we are
2696 * fully initialized
2697 */
2698 wmb();
2699 dev_set_drvdata(&ofdev->dev, dev);
2700
2701 /* There's a new kid in town ! Let's tell everybody */
2702 wake_up_all(&emac_probe_wait);
2703
2704
2705 printk(KERN_INFO
2706 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2707 ndev->name, dev->cell_index, np->full_name,
2708 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2709 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2710
2711 if (dev->phy.address >= 0)
2712 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2713 dev->phy.def->name, dev->phy.address);
2714
2715 emac_dbg_register(dev);
2716
2717 /* Life is good */
2718 return 0;
2719
2720 /* I have a bad feeling about this ... */
2721
2722 err_detach_tah:
2723 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2724 tah_detach(dev->tah_dev, dev->tah_port);
2725 err_detach_rgmii:
2726 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2727 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2728 err_detach_zmii:
2729 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2730 zmii_detach(dev->zmii_dev, dev->zmii_port);
2731 err_unreg_commac:
2732 mal_unregister_commac(dev->mal, &dev->commac);
2733 err_rel_deps:
2734 emac_put_deps(dev);
2735 err_reg_unmap:
2736 iounmap(dev->emacp);
2737 err_irq_unmap:
2738 if (dev->wol_irq != NO_IRQ)
2739 irq_dispose_mapping(dev->wol_irq);
2740 if (dev->emac_irq != NO_IRQ)
2741 irq_dispose_mapping(dev->emac_irq);
2742 err_free:
2743 kfree(ndev);
2744 err_gone:
2745 /* if we were on the bootlist, remove us as we won't show up and
2746 * wake up all waiters to notify them in case they were waiting
2747 * on us
2748 */
2749 if (blist) {
2750 *blist = NULL;
2751 wake_up_all(&emac_probe_wait);
2752 }
2753 return err;
2754}
2755
2756static int __devexit emac_remove(struct of_device *ofdev)
2757{
2758 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2759
2760 DBG(dev, "remove" NL);
2761
2762 dev_set_drvdata(&ofdev->dev, NULL);
2763
2764 unregister_netdev(dev->ndev);
2765
61dbcece
BH
2766 flush_scheduled_work();
2767
1d3bb996
DG
2768 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2769 tah_detach(dev->tah_dev, dev->tah_port);
2770 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2771 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2772 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2773 zmii_detach(dev->zmii_dev, dev->zmii_port);
2774
2775 mal_unregister_commac(dev->mal, &dev->commac);
2776 emac_put_deps(dev);
2777
2778 emac_dbg_unregister(dev);
2779 iounmap(dev->emacp);
2780
2781 if (dev->wol_irq != NO_IRQ)
2782 irq_dispose_mapping(dev->wol_irq);
2783 if (dev->emac_irq != NO_IRQ)
2784 irq_dispose_mapping(dev->emac_irq);
2785
2786 kfree(dev->ndev);
2787
2788 return 0;
2789}
2790
2791/* XXX Features in here should be replaced by properties... */
2792static struct of_device_id emac_match[] =
2793{
2794 {
2795 .type = "network",
2796 .compatible = "ibm,emac",
2797 },
2798 {
2799 .type = "network",
2800 .compatible = "ibm,emac4",
2801 },
2802 {},
2803};
2804
2805static struct of_platform_driver emac_driver = {
2806 .name = "emac",
2807 .match_table = emac_match,
2808
2809 .probe = emac_probe,
2810 .remove = emac_remove,
2811};
2812
2813static void __init emac_make_bootlist(void)
2814{
2815 struct device_node *np = NULL;
2816 int j, max, i = 0, k;
2817 int cell_indices[EMAC_BOOT_LIST_SIZE];
2818
2819 /* Collect EMACs */
2820 while((np = of_find_all_nodes(np)) != NULL) {
2821 const u32 *idx;
2822
2823 if (of_match_node(emac_match, np) == NULL)
2824 continue;
2825 if (of_get_property(np, "unused", NULL))
2826 continue;
2827 idx = of_get_property(np, "cell-index", NULL);
2828 if (idx == NULL)
2829 continue;
2830 cell_indices[i] = *idx;
2831 emac_boot_list[i++] = of_node_get(np);
2832 if (i >= EMAC_BOOT_LIST_SIZE) {
2833 of_node_put(np);
2834 break;
2835 }
2836 }
2837 max = i;
2838
2839 /* Bubble sort them (doh, what a creative algorithm :-) */
2840 for (i = 0; max > 1 && (i < (max - 1)); i++)
2841 for (j = i; j < max; j++) {
2842 if (cell_indices[i] > cell_indices[j]) {
2843 np = emac_boot_list[i];
2844 emac_boot_list[i] = emac_boot_list[j];
2845 emac_boot_list[j] = np;
2846 k = cell_indices[i];
2847 cell_indices[i] = cell_indices[j];
2848 cell_indices[j] = k;
2849 }
2850 }
2851}
2852
2853static int __init emac_init(void)
2854{
2855 int rc;
2856
2857 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2858
2859 /* Init debug stuff */
2860 emac_init_debug();
2861
2862 /* Build EMAC boot list */
2863 emac_make_bootlist();
2864
2865 /* Init submodules */
2866 rc = mal_init();
2867 if (rc)
2868 goto err;
2869 rc = zmii_init();
2870 if (rc)
2871 goto err_mal;
2872 rc = rgmii_init();
2873 if (rc)
2874 goto err_zmii;
2875 rc = tah_init();
2876 if (rc)
2877 goto err_rgmii;
2878 rc = of_register_platform_driver(&emac_driver);
2879 if (rc)
2880 goto err_tah;
2881
2882 return 0;
2883
2884 err_tah:
2885 tah_exit();
2886 err_rgmii:
2887 rgmii_exit();
2888 err_zmii:
2889 zmii_exit();
2890 err_mal:
2891 mal_exit();
2892 err:
2893 return rc;
2894}
2895
2896static void __exit emac_exit(void)
2897{
2898 int i;
2899
2900 of_unregister_platform_driver(&emac_driver);
2901
2902 tah_exit();
2903 rgmii_exit();
2904 zmii_exit();
2905 mal_exit();
2906 emac_fini_debug();
2907
2908 /* Destroy EMAC boot list */
2909 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2910 if (emac_boot_list[i])
2911 of_node_put(emac_boot_list[i]);
2912}
2913
2914module_init(emac_init);
2915module_exit(emac_exit);
This page took 0.227753 seconds and 5 git commands to generate.