sky2: version 1.3
[deliverable/linux.git] / drivers / net / au1000_eth.c
CommitLineData
1da177e4
LT
1/*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
5 * Copyright 2001,2002,2003 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Author: MontaVista Software, Inc.
13 * ppopov@mvista.com or source@mvista.com
14 *
15 * ########################################################################
16 *
17 * This program is free software; you can distribute it and/or modify it
18 * under the terms of the GNU General Public License (Version 2) as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 * for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
29 *
30 * ########################################################################
31 *
32 *
33 */
34
0c0abbc5 35#include <linux/config.h>
1da177e4
LT
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39#include <linux/string.h>
40#include <linux/timer.h>
41#include <linux/errno.h>
42#include <linux/in.h>
43#include <linux/ioport.h>
44#include <linux/bitops.h>
45#include <linux/slab.h>
46#include <linux/interrupt.h>
47#include <linux/pci.h>
48#include <linux/init.h>
49#include <linux/netdevice.h>
50#include <linux/etherdevice.h>
51#include <linux/ethtool.h>
52#include <linux/mii.h>
53#include <linux/skbuff.h>
54#include <linux/delay.h>
55#include <asm/mipsregs.h>
56#include <asm/irq.h>
57#include <asm/io.h>
58#include <asm/processor.h>
59
60#include <asm/mach-au1x00/au1000.h>
61#include <asm/cpu.h>
62#include "au1000_eth.h"
63
64#ifdef AU1000_ETH_DEBUG
65static int au1000_debug = 5;
66#else
67static int au1000_debug = 3;
68#endif
69
70#define DRV_NAME "au1000eth"
71#define DRV_VERSION "1.5"
72#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
73#define DRV_DESC "Au1xxx on-chip Ethernet driver"
74
75MODULE_AUTHOR(DRV_AUTHOR);
76MODULE_DESCRIPTION(DRV_DESC);
77MODULE_LICENSE("GPL");
78
79// prototypes
80static void hard_stop(struct net_device *);
81static void enable_rx_tx(struct net_device *dev);
82static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
83static int au1000_init(struct net_device *);
84static int au1000_open(struct net_device *);
85static int au1000_close(struct net_device *);
86static int au1000_tx(struct sk_buff *, struct net_device *);
87static int au1000_rx(struct net_device *);
88static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
89static void au1000_tx_timeout(struct net_device *);
90static int au1000_set_config(struct net_device *dev, struct ifmap *map);
91static void set_rx_mode(struct net_device *);
92static struct net_device_stats *au1000_get_stats(struct net_device *);
1da177e4
LT
93static void au1000_timer(unsigned long);
94static int au1000_ioctl(struct net_device *, struct ifreq *, int);
95static int mdio_read(struct net_device *, int, int);
96static void mdio_write(struct net_device *, int, int, u16);
97static void dump_mii(struct net_device *dev, int phy_id);
98
99// externs
100extern void ack_rise_edge_irq(unsigned int);
101extern int get_ethernet_addr(char *ethernet_addr);
102extern void str2eaddr(unsigned char *ea, unsigned char *str);
103extern char * __init prom_getcmdline(void);
104
105/*
106 * Theory of operation
107 *
108 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
109 * There are four receive and four transmit descriptors. These
110 * descriptors are not in memory; rather, they are just a set of
111 * hardware registers.
112 *
113 * Since the Au1000 has a coherent data cache, the receive and
114 * transmit buffers are allocated from the KSEG0 segment. The
115 * hardware registers, however, are still mapped at KSEG1 to
116 * make sure there's no out-of-order writes, and that all writes
117 * complete immediately.
118 */
119
120/* These addresses are only used if yamon doesn't tell us what
121 * the mac address is, and the mac address is not passed on the
122 * command line.
123 */
124static unsigned char au1000_mac_addr[6] __devinitdata = {
125 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
126};
127
128#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
129#define RUN_AT(x) (jiffies + (x))
130
131// For reading/writing 32-bit words from/to DMA memory
132#define cpu_to_dma32 cpu_to_be32
133#define dma32_to_cpu be32_to_cpu
134
135struct au1000_private *au_macs[NUM_ETH_INTERFACES];
136
137/* FIXME
138 * All of the PHY code really should be detached from the MAC
139 * code.
140 */
141
142/* Default advertise */
143#define GENMII_DEFAULT_ADVERTISE \
144 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
145 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
146 ADVERTISED_Autoneg
147
148#define GENMII_DEFAULT_FEATURES \
149 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
150 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
151 SUPPORTED_Autoneg
152
1da177e4
LT
153int bcm_5201_init(struct net_device *dev, int phy_addr)
154{
155 s16 data;
156
157 /* Stop auto-negotiation */
158 data = mdio_read(dev, phy_addr, MII_CONTROL);
159 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
160
161 /* Set advertisement to 10/100 and Half/Full duplex
162 * (full capabilities) */
163 data = mdio_read(dev, phy_addr, MII_ANADV);
164 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
165 mdio_write(dev, phy_addr, MII_ANADV, data);
166
167 /* Restart auto-negotiation */
168 data = mdio_read(dev, phy_addr, MII_CONTROL);
169 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
170 mdio_write(dev, phy_addr, MII_CONTROL, data);
171
172 if (au1000_debug > 4)
173 dump_mii(dev, phy_addr);
174 return 0;
175}
176
177int bcm_5201_reset(struct net_device *dev, int phy_addr)
178{
179 s16 mii_control, timeout;
180
181 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
182 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
183 mdelay(1);
184 for (timeout = 100; timeout > 0; --timeout) {
185 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
186 if ((mii_control & MII_CNTL_RESET) == 0)
187 break;
188 mdelay(1);
189 }
190 if (mii_control & MII_CNTL_RESET) {
191 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
192 return -1;
193 }
194 return 0;
195}
196
197int
198bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
199{
200 u16 mii_data;
201 struct au1000_private *aup;
202
203 if (!dev) {
204 printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
205 return -1;
206 }
207 aup = (struct au1000_private *) dev->priv;
208
209 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
210 if (mii_data & MII_STAT_LINK) {
211 *link = 1;
212 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
213 if (mii_data & MII_AUX_100) {
214 if (mii_data & MII_AUX_FDX) {
215 *speed = IF_PORT_100BASEFX;
216 dev->if_port = IF_PORT_100BASEFX;
217 }
218 else {
219 *speed = IF_PORT_100BASETX;
220 dev->if_port = IF_PORT_100BASETX;
221 }
222 }
223 else {
224 *speed = IF_PORT_10BASET;
225 dev->if_port = IF_PORT_10BASET;
226 }
227
228 }
229 else {
230 *link = 0;
231 *speed = 0;
232 dev->if_port = IF_PORT_UNKNOWN;
233 }
234 return 0;
235}
236
237int lsi_80227_init(struct net_device *dev, int phy_addr)
238{
239 if (au1000_debug > 4)
240 printk("lsi_80227_init\n");
241
242 /* restart auto-negotiation */
243 mdio_write(dev, phy_addr, MII_CONTROL,
244 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
245 mdelay(1);
246
247 /* set up LEDs to correct display */
248#ifdef CONFIG_MIPS_MTX1
249 mdio_write(dev, phy_addr, 17, 0xff80);
250#else
251 mdio_write(dev, phy_addr, 17, 0xffc0);
252#endif
253
254 if (au1000_debug > 4)
255 dump_mii(dev, phy_addr);
256 return 0;
257}
258
259int lsi_80227_reset(struct net_device *dev, int phy_addr)
260{
261 s16 mii_control, timeout;
262
263 if (au1000_debug > 4) {
264 printk("lsi_80227_reset\n");
265 dump_mii(dev, phy_addr);
266 }
267
268 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
269 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
270 mdelay(1);
271 for (timeout = 100; timeout > 0; --timeout) {
272 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
273 if ((mii_control & MII_CNTL_RESET) == 0)
274 break;
275 mdelay(1);
276 }
277 if (mii_control & MII_CNTL_RESET) {
278 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
279 return -1;
280 }
281 return 0;
282}
283
284int
285lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
286{
287 u16 mii_data;
288 struct au1000_private *aup;
289
290 if (!dev) {
291 printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
292 return -1;
293 }
294 aup = (struct au1000_private *) dev->priv;
295
296 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
297 if (mii_data & MII_STAT_LINK) {
298 *link = 1;
299 mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
300 if (mii_data & MII_LSI_PHY_STAT_SPD) {
301 if (mii_data & MII_LSI_PHY_STAT_FDX) {
302 *speed = IF_PORT_100BASEFX;
303 dev->if_port = IF_PORT_100BASEFX;
304 }
305 else {
306 *speed = IF_PORT_100BASETX;
307 dev->if_port = IF_PORT_100BASETX;
308 }
309 }
310 else {
311 *speed = IF_PORT_10BASET;
312 dev->if_port = IF_PORT_10BASET;
313 }
314
315 }
316 else {
317 *link = 0;
318 *speed = 0;
319 dev->if_port = IF_PORT_UNKNOWN;
320 }
321 return 0;
322}
323
324int am79c901_init(struct net_device *dev, int phy_addr)
325{
326 printk("am79c901_init\n");
327 return 0;
328}
329
330int am79c901_reset(struct net_device *dev, int phy_addr)
331{
332 printk("am79c901_reset\n");
333 return 0;
334}
335
336int
337am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
338{
339 return 0;
340}
341
342int am79c874_init(struct net_device *dev, int phy_addr)
343{
344 s16 data;
345
346 /* 79c874 has quit resembled bit assignments to BCM5201 */
347 if (au1000_debug > 4)
348 printk("am79c847_init\n");
349
350 /* Stop auto-negotiation */
351 data = mdio_read(dev, phy_addr, MII_CONTROL);
352 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
353
354 /* Set advertisement to 10/100 and Half/Full duplex
355 * (full capabilities) */
356 data = mdio_read(dev, phy_addr, MII_ANADV);
357 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
358 mdio_write(dev, phy_addr, MII_ANADV, data);
359
360 /* Restart auto-negotiation */
361 data = mdio_read(dev, phy_addr, MII_CONTROL);
362 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
363
364 mdio_write(dev, phy_addr, MII_CONTROL, data);
365
366 if (au1000_debug > 4) dump_mii(dev, phy_addr);
367 return 0;
368}
369
370int am79c874_reset(struct net_device *dev, int phy_addr)
371{
372 s16 mii_control, timeout;
373
374 if (au1000_debug > 4)
375 printk("am79c874_reset\n");
376
377 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
378 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
379 mdelay(1);
380 for (timeout = 100; timeout > 0; --timeout) {
381 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
382 if ((mii_control & MII_CNTL_RESET) == 0)
383 break;
384 mdelay(1);
385 }
386 if (mii_control & MII_CNTL_RESET) {
387 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
388 return -1;
389 }
390 return 0;
391}
392
393int
394am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
395{
396 u16 mii_data;
397 struct au1000_private *aup;
398
399 // printk("am79c874_status\n");
400 if (!dev) {
401 printk(KERN_ERR "am79c874_status error: NULL dev\n");
402 return -1;
403 }
404
405 aup = (struct au1000_private *) dev->priv;
406 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
407
408 if (mii_data & MII_STAT_LINK) {
409 *link = 1;
410 mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
411 if (mii_data & MII_AMD_PHY_STAT_SPD) {
412 if (mii_data & MII_AMD_PHY_STAT_FDX) {
413 *speed = IF_PORT_100BASEFX;
414 dev->if_port = IF_PORT_100BASEFX;
415 }
416 else {
417 *speed = IF_PORT_100BASETX;
418 dev->if_port = IF_PORT_100BASETX;
419 }
420 }
421 else {
422 *speed = IF_PORT_10BASET;
423 dev->if_port = IF_PORT_10BASET;
424 }
425
426 }
427 else {
428 *link = 0;
429 *speed = 0;
430 dev->if_port = IF_PORT_UNKNOWN;
431 }
432 return 0;
433}
434
435int lxt971a_init(struct net_device *dev, int phy_addr)
436{
437 if (au1000_debug > 4)
438 printk("lxt971a_init\n");
439
440 /* restart auto-negotiation */
441 mdio_write(dev, phy_addr, MII_CONTROL,
442 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
443
444 /* set up LEDs to correct display */
445 mdio_write(dev, phy_addr, 20, 0x0422);
446
447 if (au1000_debug > 4)
448 dump_mii(dev, phy_addr);
449 return 0;
450}
451
452int lxt971a_reset(struct net_device *dev, int phy_addr)
453{
454 s16 mii_control, timeout;
455
456 if (au1000_debug > 4) {
457 printk("lxt971a_reset\n");
458 dump_mii(dev, phy_addr);
459 }
460
461 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
462 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
463 mdelay(1);
464 for (timeout = 100; timeout > 0; --timeout) {
465 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
466 if ((mii_control & MII_CNTL_RESET) == 0)
467 break;
468 mdelay(1);
469 }
470 if (mii_control & MII_CNTL_RESET) {
471 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
472 return -1;
473 }
474 return 0;
475}
476
477int
478lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
479{
480 u16 mii_data;
481 struct au1000_private *aup;
482
483 if (!dev) {
484 printk(KERN_ERR "lxt971a_status error: NULL dev\n");
485 return -1;
486 }
487 aup = (struct au1000_private *) dev->priv;
488
489 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
490 if (mii_data & MII_STAT_LINK) {
491 *link = 1;
492 mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
493 if (mii_data & MII_INTEL_PHY_STAT_SPD) {
494 if (mii_data & MII_INTEL_PHY_STAT_FDX) {
495 *speed = IF_PORT_100BASEFX;
496 dev->if_port = IF_PORT_100BASEFX;
497 }
498 else {
499 *speed = IF_PORT_100BASETX;
500 dev->if_port = IF_PORT_100BASETX;
501 }
502 }
503 else {
504 *speed = IF_PORT_10BASET;
505 dev->if_port = IF_PORT_10BASET;
506 }
507
508 }
509 else {
510 *link = 0;
511 *speed = 0;
512 dev->if_port = IF_PORT_UNKNOWN;
513 }
514 return 0;
515}
516
517int ks8995m_init(struct net_device *dev, int phy_addr)
518{
519 s16 data;
520
521// printk("ks8995m_init\n");
522 /* Stop auto-negotiation */
523 data = mdio_read(dev, phy_addr, MII_CONTROL);
524 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
525
526 /* Set advertisement to 10/100 and Half/Full duplex
527 * (full capabilities) */
528 data = mdio_read(dev, phy_addr, MII_ANADV);
529 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
530 mdio_write(dev, phy_addr, MII_ANADV, data);
531
532 /* Restart auto-negotiation */
533 data = mdio_read(dev, phy_addr, MII_CONTROL);
534 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
535 mdio_write(dev, phy_addr, MII_CONTROL, data);
536
537 if (au1000_debug > 4) dump_mii(dev, phy_addr);
538
539 return 0;
540}
541
542int ks8995m_reset(struct net_device *dev, int phy_addr)
543{
544 s16 mii_control, timeout;
545
546// printk("ks8995m_reset\n");
547 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
548 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
549 mdelay(1);
550 for (timeout = 100; timeout > 0; --timeout) {
551 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
552 if ((mii_control & MII_CNTL_RESET) == 0)
553 break;
554 mdelay(1);
555 }
556 if (mii_control & MII_CNTL_RESET) {
557 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
558 return -1;
559 }
560 return 0;
561}
562
563int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
564{
565 u16 mii_data;
566 struct au1000_private *aup;
567
568 if (!dev) {
569 printk(KERN_ERR "ks8995m_status error: NULL dev\n");
570 return -1;
571 }
572 aup = (struct au1000_private *) dev->priv;
573
574 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
575 if (mii_data & MII_STAT_LINK) {
576 *link = 1;
577 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
578 if (mii_data & MII_AUX_100) {
579 if (mii_data & MII_AUX_FDX) {
580 *speed = IF_PORT_100BASEFX;
581 dev->if_port = IF_PORT_100BASEFX;
582 }
583 else {
584 *speed = IF_PORT_100BASETX;
585 dev->if_port = IF_PORT_100BASETX;
586 }
587 }
588 else {
589 *speed = IF_PORT_10BASET;
590 dev->if_port = IF_PORT_10BASET;
591 }
592
593 }
594 else {
595 *link = 0;
596 *speed = 0;
597 dev->if_port = IF_PORT_UNKNOWN;
598 }
599 return 0;
600}
601
602int
603smsc_83C185_init (struct net_device *dev, int phy_addr)
604{
605 s16 data;
606
607 if (au1000_debug > 4)
608 printk("smsc_83C185_init\n");
609
610 /* Stop auto-negotiation */
611 data = mdio_read(dev, phy_addr, MII_CONTROL);
612 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
613
614 /* Set advertisement to 10/100 and Half/Full duplex
615 * (full capabilities) */
616 data = mdio_read(dev, phy_addr, MII_ANADV);
617 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
618 mdio_write(dev, phy_addr, MII_ANADV, data);
619
620 /* Restart auto-negotiation */
621 data = mdio_read(dev, phy_addr, MII_CONTROL);
622 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
623
624 mdio_write(dev, phy_addr, MII_CONTROL, data);
625
626 if (au1000_debug > 4) dump_mii(dev, phy_addr);
627 return 0;
628}
629
630int
631smsc_83C185_reset (struct net_device *dev, int phy_addr)
632{
633 s16 mii_control, timeout;
634
635 if (au1000_debug > 4)
636 printk("smsc_83C185_reset\n");
637
638 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
639 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
640 mdelay(1);
641 for (timeout = 100; timeout > 0; --timeout) {
642 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
643 if ((mii_control & MII_CNTL_RESET) == 0)
644 break;
645 mdelay(1);
646 }
647 if (mii_control & MII_CNTL_RESET) {
648 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
649 return -1;
650 }
651 return 0;
652}
653
654int
655smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
656{
657 u16 mii_data;
658 struct au1000_private *aup;
659
660 if (!dev) {
661 printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
662 return -1;
663 }
664
665 aup = (struct au1000_private *) dev->priv;
666 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
667
668 if (mii_data & MII_STAT_LINK) {
669 *link = 1;
670 mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
671 if (mii_data & (1<<3)) {
672 if (mii_data & (1<<4)) {
673 *speed = IF_PORT_100BASEFX;
674 dev->if_port = IF_PORT_100BASEFX;
675 }
676 else {
677 *speed = IF_PORT_100BASETX;
678 dev->if_port = IF_PORT_100BASETX;
679 }
680 }
681 else {
682 *speed = IF_PORT_10BASET;
683 dev->if_port = IF_PORT_10BASET;
684 }
685 }
686 else {
687 *link = 0;
688 *speed = 0;
689 dev->if_port = IF_PORT_UNKNOWN;
690 }
691 return 0;
692}
693
694
695#ifdef CONFIG_MIPS_BOSPORUS
696int stub_init(struct net_device *dev, int phy_addr)
697{
698 //printk("PHY stub_init\n");
699 return 0;
700}
701
702int stub_reset(struct net_device *dev, int phy_addr)
703{
704 //printk("PHY stub_reset\n");
705 return 0;
706}
707
708int
709stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
710{
711 //printk("PHY stub_status\n");
712 *link = 1;
713 /* hmmm, revisit */
714 *speed = IF_PORT_100BASEFX;
715 dev->if_port = IF_PORT_100BASEFX;
716 return 0;
717}
718#endif
719
720struct phy_ops bcm_5201_ops = {
721 bcm_5201_init,
722 bcm_5201_reset,
723 bcm_5201_status,
724};
725
726struct phy_ops am79c874_ops = {
727 am79c874_init,
728 am79c874_reset,
729 am79c874_status,
730};
731
732struct phy_ops am79c901_ops = {
733 am79c901_init,
734 am79c901_reset,
735 am79c901_status,
736};
737
738struct phy_ops lsi_80227_ops = {
739 lsi_80227_init,
740 lsi_80227_reset,
741 lsi_80227_status,
742};
743
744struct phy_ops lxt971a_ops = {
745 lxt971a_init,
746 lxt971a_reset,
747 lxt971a_status,
748};
749
750struct phy_ops ks8995m_ops = {
751 ks8995m_init,
752 ks8995m_reset,
753 ks8995m_status,
754};
755
756struct phy_ops smsc_83C185_ops = {
757 smsc_83C185_init,
758 smsc_83C185_reset,
759 smsc_83C185_status,
760};
761
762#ifdef CONFIG_MIPS_BOSPORUS
763struct phy_ops stub_ops = {
764 stub_init,
765 stub_reset,
766 stub_status,
767};
768#endif
769
770static struct mii_chip_info {
771 const char * name;
772 u16 phy_id0;
773 u16 phy_id1;
774 struct phy_ops *phy_ops;
775 int dual_phy;
776} mii_chip_table[] = {
777 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
778 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
779 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
7f553e3d 780 {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
1da177e4
LT
781 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
782 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
783 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
784 {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
785 {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
786 {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
787#ifdef CONFIG_MIPS_BOSPORUS
788 {"Stub", 0x1234, 0x5678, &stub_ops },
789#endif
790 {0,},
791};
792
793static int mdio_read(struct net_device *dev, int phy_id, int reg)
794{
795 struct au1000_private *aup = (struct au1000_private *) dev->priv;
796 volatile u32 *mii_control_reg;
797 volatile u32 *mii_data_reg;
798 u32 timedout = 20;
799 u32 mii_control;
800
801 #ifdef CONFIG_BCM5222_DUAL_PHY
802 /* First time we probe, it's for the mac0 phy.
803 * Since we haven't determined yet that we have a dual phy,
804 * aup->mii->mii_control_reg won't be setup and we'll
805 * default to the else statement.
806 * By the time we probe for the mac1 phy, the mii_control_reg
807 * will be setup to be the address of the mac0 phy control since
808 * both phys are controlled through mac0.
809 */
810 if (aup->mii && aup->mii->mii_control_reg) {
811 mii_control_reg = aup->mii->mii_control_reg;
812 mii_data_reg = aup->mii->mii_data_reg;
813 }
814 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
815 /* assume both phys are controlled through mac0 */
816 mii_control_reg = au_macs[0]->mii->mii_control_reg;
817 mii_data_reg = au_macs[0]->mii->mii_data_reg;
818 }
819 else
820 #endif
821 {
822 /* default control and data reg addresses */
823 mii_control_reg = &aup->mac->mii_control;
824 mii_data_reg = &aup->mac->mii_data;
825 }
826
827 while (*mii_control_reg & MAC_MII_BUSY) {
828 mdelay(1);
829 if (--timedout == 0) {
830 printk(KERN_ERR "%s: read_MII busy timeout!!\n",
831 dev->name);
832 return -1;
833 }
834 }
835
836 mii_control = MAC_SET_MII_SELECT_REG(reg) |
837 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
838
839 *mii_control_reg = mii_control;
840
841 timedout = 20;
842 while (*mii_control_reg & MAC_MII_BUSY) {
843 mdelay(1);
844 if (--timedout == 0) {
845 printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
846 dev->name);
847 return -1;
848 }
849 }
850 return (int)*mii_data_reg;
851}
852
853static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
854{
855 struct au1000_private *aup = (struct au1000_private *) dev->priv;
856 volatile u32 *mii_control_reg;
857 volatile u32 *mii_data_reg;
858 u32 timedout = 20;
859 u32 mii_control;
860
861 #ifdef CONFIG_BCM5222_DUAL_PHY
862 if (aup->mii && aup->mii->mii_control_reg) {
863 mii_control_reg = aup->mii->mii_control_reg;
864 mii_data_reg = aup->mii->mii_data_reg;
865 }
866 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
867 /* assume both phys are controlled through mac0 */
868 mii_control_reg = au_macs[0]->mii->mii_control_reg;
869 mii_data_reg = au_macs[0]->mii->mii_data_reg;
870 }
871 else
872 #endif
873 {
874 /* default control and data reg addresses */
875 mii_control_reg = &aup->mac->mii_control;
876 mii_data_reg = &aup->mac->mii_data;
877 }
878
879 while (*mii_control_reg & MAC_MII_BUSY) {
880 mdelay(1);
881 if (--timedout == 0) {
882 printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
883 dev->name);
884 return;
885 }
886 }
887
888 mii_control = MAC_SET_MII_SELECT_REG(reg) |
889 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
890
891 *mii_data_reg = value;
892 *mii_control_reg = mii_control;
893}
894
895
896static void dump_mii(struct net_device *dev, int phy_id)
897{
898 int i, val;
899
900 for (i = 0; i < 7; i++) {
901 if ((val = mdio_read(dev, phy_id, i)) >= 0)
902 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
903 }
904 for (i = 16; i < 25; i++) {
905 if ((val = mdio_read(dev, phy_id, i)) >= 0)
906 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
907 }
908}
909
910static int mii_probe (struct net_device * dev)
911{
912 struct au1000_private *aup = (struct au1000_private *) dev->priv;
913 int phy_addr;
914#ifdef CONFIG_MIPS_BOSPORUS
915 int phy_found=0;
916#endif
917
918 /* search for total of 32 possible mii phy addresses */
919 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
920 u16 mii_status;
921 u16 phy_id0, phy_id1;
922 int i;
923
924 #ifdef CONFIG_BCM5222_DUAL_PHY
925 /* Mask the already found phy, try next one */
926 if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
927 if (au_macs[0]->phy_addr == phy_addr)
928 continue;
929 }
930 #endif
931
932 mii_status = mdio_read(dev, phy_addr, MII_STATUS);
933 if (mii_status == 0xffff || mii_status == 0x0000)
934 /* the mii is not accessable, try next one */
935 continue;
936
937 phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
938 phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
939
940 /* search our mii table for the current mii */
941 for (i = 0; mii_chip_table[i].phy_id1; i++) {
942 if (phy_id0 == mii_chip_table[i].phy_id0 &&
943 phy_id1 == mii_chip_table[i].phy_id1) {
944 struct mii_phy * mii_phy = aup->mii;
945
946 printk(KERN_INFO "%s: %s at phy address %d\n",
947 dev->name, mii_chip_table[i].name,
948 phy_addr);
949#ifdef CONFIG_MIPS_BOSPORUS
950 phy_found = 1;
951#endif
952 mii_phy->chip_info = mii_chip_table+i;
953 aup->phy_addr = phy_addr;
954 aup->want_autoneg = 1;
955 aup->phy_ops = mii_chip_table[i].phy_ops;
956 aup->phy_ops->phy_init(dev,phy_addr);
957
958 // Check for dual-phy and then store required
959 // values and set indicators. We need to do
960 // this now since mdio_{read,write} need the
961 // control and data register addresses.
962 #ifdef CONFIG_BCM5222_DUAL_PHY
963 if ( mii_chip_table[i].dual_phy) {
964
965 /* assume both phys are controlled
966 * through MAC0. Board specific? */
967
968 /* sanity check */
969 if (!au_macs[0] || !au_macs[0]->mii)
970 return -1;
971 aup->mii->mii_control_reg = (u32 *)
972 &au_macs[0]->mac->mii_control;
973 aup->mii->mii_data_reg = (u32 *)
974 &au_macs[0]->mac->mii_data;
975 }
976 #endif
977 goto found;
978 }
979 }
980 }
981found:
982
983#ifdef CONFIG_MIPS_BOSPORUS
984 /* This is a workaround for the Micrel/Kendin 5 port switch
985 The second MAC doesn't see a PHY connected... so we need to
986 trick it into thinking we have one.
987
988 If this kernel is run on another Au1500 development board
989 the stub will be found as well as the actual PHY. However,
990 the last found PHY will be used... usually at Addr 31 (Db1500).
991 */
992 if ( (!phy_found) )
993 {
994 u16 phy_id0, phy_id1;
995 int i;
996
997 phy_id0 = 0x1234;
998 phy_id1 = 0x5678;
999
1000 /* search our mii table for the current mii */
1001 for (i = 0; mii_chip_table[i].phy_id1; i++) {
1002 if (phy_id0 == mii_chip_table[i].phy_id0 &&
1003 phy_id1 == mii_chip_table[i].phy_id1) {
1004 struct mii_phy * mii_phy;
1005
1006 printk(KERN_INFO "%s: %s at phy address %d\n",
1007 dev->name, mii_chip_table[i].name,
1008 phy_addr);
1009 mii_phy = kmalloc(sizeof(struct mii_phy),
1010 GFP_KERNEL);
1011 if (mii_phy) {
1012 mii_phy->chip_info = mii_chip_table+i;
1013 aup->phy_addr = phy_addr;
1014 mii_phy->next = aup->mii;
1015 aup->phy_ops =
1016 mii_chip_table[i].phy_ops;
1017 aup->mii = mii_phy;
1018 aup->phy_ops->phy_init(dev,phy_addr);
1019 } else {
1020 printk(KERN_ERR "%s: out of memory\n",
1021 dev->name);
1022 return -1;
1023 }
1024 mii_phy->chip_info = mii_chip_table+i;
1025 aup->phy_addr = phy_addr;
1026 aup->phy_ops = mii_chip_table[i].phy_ops;
1027 aup->phy_ops->phy_init(dev,phy_addr);
1028 break;
1029 }
1030 }
1031 }
1032 if (aup->mac_id == 0) {
1033 /* the Bosporus phy responds to addresses 0-5 but
1034 * 5 is the correct one.
1035 */
1036 aup->phy_addr = 5;
1037 }
1038#endif
1039
1040 if (aup->mii->chip_info == NULL) {
7f553e3d 1041 printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
1da177e4
LT
1042 dev->name);
1043 return -1;
1044 }
1045
1046 printk(KERN_INFO "%s: Using %s as default\n",
1047 dev->name, aup->mii->chip_info->name);
1048
1049 return 0;
1050}
1051
1052
1053/*
1054 * Buffer allocation/deallocation routines. The buffer descriptor returned
1055 * has the virtual and dma address of a buffer suitable for
1056 * both, receive and transmit operations.
1057 */
1058static db_dest_t *GetFreeDB(struct au1000_private *aup)
1059{
1060 db_dest_t *pDB;
1061 pDB = aup->pDBfree;
1062
1063 if (pDB) {
1064 aup->pDBfree = pDB->pnext;
1065 }
1066 return pDB;
1067}
1068
1069void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
1070{
1071 db_dest_t *pDBfree = aup->pDBfree;
1072 if (pDBfree)
1073 pDBfree->pnext = pDB;
1074 aup->pDBfree = pDB;
1075}
1076
1077static void enable_rx_tx(struct net_device *dev)
1078{
1079 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1080
1081 if (au1000_debug > 4)
1082 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
1083
1084 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
1085 au_sync_delay(10);
1086}
1087
1088static void hard_stop(struct net_device *dev)
1089{
1090 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1091
1092 if (au1000_debug > 4)
1093 printk(KERN_INFO "%s: hard stop\n", dev->name);
1094
1095 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
1096 au_sync_delay(10);
1097}
1098
1099
1100static void reset_mac(struct net_device *dev)
1101{
1102 int i;
1103 u32 flags;
1104 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1105
1106 if (au1000_debug > 4)
1107 printk(KERN_INFO "%s: reset mac, aup %x\n",
1108 dev->name, (unsigned)aup);
1109
1110 spin_lock_irqsave(&aup->lock, flags);
1111 if (aup->timer.function == &au1000_timer) {/* check if timer initted */
1112 del_timer(&aup->timer);
1113 }
1114
1115 hard_stop(dev);
1116 #ifdef CONFIG_BCM5222_DUAL_PHY
1117 if (aup->mac_id != 0) {
1118 #endif
1119 /* If BCM5222, we can't leave MAC0 in reset because then
1120 * we can't access the dual phy for ETH1 */
1121 *aup->enable = MAC_EN_CLOCK_ENABLE;
1122 au_sync_delay(2);
1123 *aup->enable = 0;
1124 au_sync_delay(2);
1125 #ifdef CONFIG_BCM5222_DUAL_PHY
1126 }
1127 #endif
1128 aup->tx_full = 0;
1129 for (i = 0; i < NUM_RX_DMA; i++) {
1130 /* reset control bits */
1131 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
1132 }
1133 for (i = 0; i < NUM_TX_DMA; i++) {
1134 /* reset control bits */
1135 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
1136 }
1137 spin_unlock_irqrestore(&aup->lock, flags);
1138}
1139
1140
1141/*
1142 * Setup the receive and transmit "rings". These pointers are the addresses
1143 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
1144 * these are not descriptors sitting in memory.
1145 */
1146static void
1147setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
1148{
1149 int i;
1150
1151 for (i = 0; i < NUM_RX_DMA; i++) {
1152 aup->rx_dma_ring[i] =
1153 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
1154 }
1155 for (i = 0; i < NUM_TX_DMA; i++) {
1156 aup->tx_dma_ring[i] =
1157 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
1158 }
1159}
1160
1161static struct {
1162 int port;
1163 u32 base_addr;
1164 u32 macen_addr;
1165 int irq;
1166 struct net_device *dev;
1167} iflist[2];
1168
1169static int num_ifs;
1170
1171/*
1172 * Setup the base address and interupt of the Au1xxx ethernet macs
1173 * based on cpu type and whether the interface is enabled in sys_pinfunc
1174 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1175 */
1176static int __init au1000_init_module(void)
1177{
1178 struct cpuinfo_mips *c = &current_cpu_data;
1179 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1180 struct net_device *dev;
1181 int i, found_one = 0;
1182
1183 switch (c->cputype) {
1184#ifdef CONFIG_SOC_AU1000
1185 case CPU_AU1000:
1186 num_ifs = 2 - ni;
1187 iflist[0].base_addr = AU1000_ETH0_BASE;
1188 iflist[1].base_addr = AU1000_ETH1_BASE;
1189 iflist[0].macen_addr = AU1000_MAC0_ENABLE;
1190 iflist[1].macen_addr = AU1000_MAC1_ENABLE;
1191 iflist[0].irq = AU1000_MAC0_DMA_INT;
1192 iflist[1].irq = AU1000_MAC1_DMA_INT;
1193 break;
1194#endif
1195#ifdef CONFIG_SOC_AU1100
1196 case CPU_AU1100:
1197 num_ifs = 1 - ni;
1198 iflist[0].base_addr = AU1100_ETH0_BASE;
1199 iflist[0].macen_addr = AU1100_MAC0_ENABLE;
1200 iflist[0].irq = AU1100_MAC0_DMA_INT;
1201 break;
1202#endif
1203#ifdef CONFIG_SOC_AU1500
1204 case CPU_AU1500:
1205 num_ifs = 2 - ni;
1206 iflist[0].base_addr = AU1500_ETH0_BASE;
1207 iflist[1].base_addr = AU1500_ETH1_BASE;
1208 iflist[0].macen_addr = AU1500_MAC0_ENABLE;
1209 iflist[1].macen_addr = AU1500_MAC1_ENABLE;
1210 iflist[0].irq = AU1500_MAC0_DMA_INT;
1211 iflist[1].irq = AU1500_MAC1_DMA_INT;
1212 break;
1213#endif
1214#ifdef CONFIG_SOC_AU1550
1215 case CPU_AU1550:
1216 num_ifs = 2 - ni;
1217 iflist[0].base_addr = AU1550_ETH0_BASE;
1218 iflist[1].base_addr = AU1550_ETH1_BASE;
1219 iflist[0].macen_addr = AU1550_MAC0_ENABLE;
1220 iflist[1].macen_addr = AU1550_MAC1_ENABLE;
1221 iflist[0].irq = AU1550_MAC0_DMA_INT;
1222 iflist[1].irq = AU1550_MAC1_DMA_INT;
1223 break;
1224#endif
1225 default:
1226 num_ifs = 0;
1227 }
1228 for(i = 0; i < num_ifs; i++) {
1229 dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
1230 iflist[i].dev = dev;
1231 if (dev)
1232 found_one++;
1233 }
1234 if (!found_one)
1235 return -ENODEV;
1236 return 0;
1237}
1238
1239static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
1240{
1241 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1242 u16 ctl, adv;
1243
1244 /* Setup standard advertise */
1245 adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
1246 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1247 if (advertise & ADVERTISED_10baseT_Half)
1248 adv |= ADVERTISE_10HALF;
1249 if (advertise & ADVERTISED_10baseT_Full)
1250 adv |= ADVERTISE_10FULL;
1251 if (advertise & ADVERTISED_100baseT_Half)
1252 adv |= ADVERTISE_100HALF;
1253 if (advertise & ADVERTISED_100baseT_Full)
1254 adv |= ADVERTISE_100FULL;
1255 mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
1256
1257 /* Start/Restart aneg */
1258 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1259 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1260 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1261
1262 return 0;
1263}
1264
1265static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
1266{
1267 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1268 u16 ctl;
1269
1270 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1271 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
1272
1273 /* First reset the PHY */
1274 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
1275
1276 /* Select speed & duplex */
1277 switch (speed) {
1278 case SPEED_10:
1279 break;
1280 case SPEED_100:
1281 ctl |= BMCR_SPEED100;
1282 break;
1283 case SPEED_1000:
1284 default:
1285 return -EINVAL;
1286 }
1287 if (fd == DUPLEX_FULL)
1288 ctl |= BMCR_FULLDPLX;
1289 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1290
1291 return 0;
1292}
1293
1294
1295static void
1296au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
1297{
1298 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1299 u32 advertise;
1300 int autoneg;
1301 int forced_speed;
1302 int forced_duplex;
1303
1304 /* Default advertise */
1305 advertise = GENMII_DEFAULT_ADVERTISE;
1306 autoneg = aup->want_autoneg;
1307 forced_speed = SPEED_100;
1308 forced_duplex = DUPLEX_FULL;
1309
1310 /* Setup link parameters */
1311 if (cmd) {
1312 if (cmd->autoneg == AUTONEG_ENABLE) {
1313 advertise = cmd->advertising;
1314 autoneg = 1;
1315 } else {
1316 autoneg = 0;
1317
1318 forced_speed = cmd->speed;
1319 forced_duplex = cmd->duplex;
1320 }
1321 }
1322
1323 /* Configure PHY & start aneg */
1324 aup->want_autoneg = autoneg;
1325 if (autoneg)
1326 au1000_setup_aneg(dev, advertise);
1327 else
1328 au1000_setup_forced(dev, forced_speed, forced_duplex);
1329 mod_timer(&aup->timer, jiffies + HZ);
1330}
1331
1332static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1333{
1334 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1335 u16 link, speed;
1336
1337 cmd->supported = GENMII_DEFAULT_FEATURES;
1338 cmd->advertising = GENMII_DEFAULT_ADVERTISE;
1339 cmd->port = PORT_MII;
1340 cmd->transceiver = XCVR_EXTERNAL;
1341 cmd->phy_address = aup->phy_addr;
1342 spin_lock_irq(&aup->lock);
1343 cmd->autoneg = aup->want_autoneg;
1344 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1345 if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
1346 cmd->speed = SPEED_100;
1347 else if (speed == IF_PORT_10BASET)
1348 cmd->speed = SPEED_10;
1349 if (link && (dev->if_port == IF_PORT_100BASEFX))
1350 cmd->duplex = DUPLEX_FULL;
1351 else
1352 cmd->duplex = DUPLEX_HALF;
1353 spin_unlock_irq(&aup->lock);
1354 return 0;
1355}
1356
1357static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1358{
1359 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1360 unsigned long features = GENMII_DEFAULT_FEATURES;
1361
1362 if (!capable(CAP_NET_ADMIN))
1363 return -EPERM;
1364
1365 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1366 return -EINVAL;
1367 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1368 return -EINVAL;
1369 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1370 return -EINVAL;
1371 if (cmd->autoneg == AUTONEG_DISABLE)
1372 switch (cmd->speed) {
1373 case SPEED_10:
1374 if (cmd->duplex == DUPLEX_HALF &&
1375 (features & SUPPORTED_10baseT_Half) == 0)
1376 return -EINVAL;
1377 if (cmd->duplex == DUPLEX_FULL &&
1378 (features & SUPPORTED_10baseT_Full) == 0)
1379 return -EINVAL;
1380 break;
1381 case SPEED_100:
1382 if (cmd->duplex == DUPLEX_HALF &&
1383 (features & SUPPORTED_100baseT_Half) == 0)
1384 return -EINVAL;
1385 if (cmd->duplex == DUPLEX_FULL &&
1386 (features & SUPPORTED_100baseT_Full) == 0)
1387 return -EINVAL;
1388 break;
1389 default:
1390 return -EINVAL;
1391 }
1392 else if ((features & SUPPORTED_Autoneg) == 0)
1393 return -EINVAL;
1394
1395 spin_lock_irq(&aup->lock);
1396 au1000_start_link(dev, cmd);
1397 spin_unlock_irq(&aup->lock);
1398 return 0;
1399}
1400
1401static int au1000_nway_reset(struct net_device *dev)
1402{
1403 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1404
1405 if (!aup->want_autoneg)
1406 return -EINVAL;
1407 spin_lock_irq(&aup->lock);
1408 au1000_start_link(dev, NULL);
1409 spin_unlock_irq(&aup->lock);
1410 return 0;
1411}
1412
1413static void
1414au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1415{
1416 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1417
1418 strcpy(info->driver, DRV_NAME);
1419 strcpy(info->version, DRV_VERSION);
1420 info->fw_version[0] = '\0';
1421 sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
1422 info->regdump_len = 0;
1423}
1424
1425static u32 au1000_get_link(struct net_device *dev)
1426{
1427 return netif_carrier_ok(dev);
1428}
1429
1430static struct ethtool_ops au1000_ethtool_ops = {
1431 .get_settings = au1000_get_settings,
1432 .set_settings = au1000_set_settings,
1433 .get_drvinfo = au1000_get_drvinfo,
1434 .nway_reset = au1000_nway_reset,
1435 .get_link = au1000_get_link
1436};
1437
1438static struct net_device *
1439au1000_probe(u32 ioaddr, int irq, int port_num)
1440{
1441 static unsigned version_printed = 0;
1442 struct au1000_private *aup = NULL;
1443 struct net_device *dev = NULL;
1444 db_dest_t *pDB, *pDBfree;
1445 char *pmac, *argptr;
1446 char ethaddr[6];
1447 int i, err;
1448
1449 if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
1450 return NULL;
1451
1452 if (version_printed++ == 0)
1453 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1454
1455 dev = alloc_etherdev(sizeof(struct au1000_private));
1456 if (!dev) {
1457 printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
1458 return NULL;
1459 }
1460
1461 if ((err = register_netdev(dev))) {
1462 printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
1463 err);
1464 free_netdev(dev);
1465 return NULL;
1466 }
1467
1468 printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
1469 dev->name, ioaddr, irq);
1470
1471 aup = dev->priv;
1472
1473 /* Allocate the data buffers */
1474 /* Snooping works fine with eth on all au1xxx */
1475 aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
1476 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1477 &aup->dma_addr,
1478 0);
1479 if (!aup->vaddr) {
1480 free_netdev(dev);
1481 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1482 return NULL;
1483 }
1484
1485 /* aup->mac is the base address of the MAC's registers */
1486 aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
1487 /* Setup some variables for quick register address access */
1488 if (ioaddr == iflist[0].base_addr)
1489 {
1490 /* check env variables first */
1491 if (!get_ethernet_addr(ethaddr)) {
1492 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1493 } else {
1494 /* Check command line */
1495 argptr = prom_getcmdline();
1496 if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
1497 printk(KERN_INFO "%s: No mac address found\n",
1498 dev->name);
1499 /* use the hard coded mac addresses */
1500 } else {
1501 str2eaddr(ethaddr, pmac + strlen("ethaddr="));
1502 memcpy(au1000_mac_addr, ethaddr,
1503 sizeof(au1000_mac_addr));
1504 }
1505 }
1506 aup->enable = (volatile u32 *)
1507 ((unsigned long)iflist[0].macen_addr);
1508 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1509 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1510 aup->mac_id = 0;
1511 au_macs[0] = aup;
1512 }
1513 else
1514 if (ioaddr == iflist[1].base_addr)
1515 {
1516 aup->enable = (volatile u32 *)
1517 ((unsigned long)iflist[1].macen_addr);
1518 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1519 dev->dev_addr[4] += 0x10;
1520 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1521 aup->mac_id = 1;
1522 au_macs[1] = aup;
1523 }
1524 else
1525 {
1526 printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
1527 }
1528
1529 /* bring the device out of reset, otherwise probing the mii
1530 * will hang */
1531 *aup->enable = MAC_EN_CLOCK_ENABLE;
1532 au_sync_delay(2);
1533 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1534 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1535 au_sync_delay(2);
1536
1537 aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
1538 if (!aup->mii) {
1539 printk(KERN_ERR "%s: out of memory\n", dev->name);
1540 goto err_out;
1541 }
7f553e3d
RB
1542 aup->mii->next = NULL;
1543 aup->mii->chip_info = NULL;
1544 aup->mii->status = 0;
1da177e4
LT
1545 aup->mii->mii_control_reg = 0;
1546 aup->mii->mii_data_reg = 0;
1547
1548 if (mii_probe(dev) != 0) {
1549 goto err_out;
1550 }
1551
1552 pDBfree = NULL;
1553 /* setup the data buffer descriptors and attach a buffer to each one */
1554 pDB = aup->db;
1555 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1556 pDB->pnext = pDBfree;
1557 pDBfree = pDB;
1558 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1559 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1560 pDB++;
1561 }
1562 aup->pDBfree = pDBfree;
1563
1564 for (i = 0; i < NUM_RX_DMA; i++) {
1565 pDB = GetFreeDB(aup);
1566 if (!pDB) {
1567 goto err_out;
1568 }
1569 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1570 aup->rx_db_inuse[i] = pDB;
1571 }
1572 for (i = 0; i < NUM_TX_DMA; i++) {
1573 pDB = GetFreeDB(aup);
1574 if (!pDB) {
1575 goto err_out;
1576 }
1577 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1578 aup->tx_dma_ring[i]->len = 0;
1579 aup->tx_db_inuse[i] = pDB;
1580 }
1581
1582 spin_lock_init(&aup->lock);
1583 dev->base_addr = ioaddr;
1584 dev->irq = irq;
1585 dev->open = au1000_open;
1586 dev->hard_start_xmit = au1000_tx;
1587 dev->stop = au1000_close;
1588 dev->get_stats = au1000_get_stats;
1589 dev->set_multicast_list = &set_rx_mode;
1590 dev->do_ioctl = &au1000_ioctl;
1591 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1592 dev->set_config = &au1000_set_config;
1593 dev->tx_timeout = au1000_tx_timeout;
1594 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1595
1596 /*
1597 * The boot code uses the ethernet controller, so reset it to start
1598 * fresh. au1000_init() expects that the device is in reset state.
1599 */
1600 reset_mac(dev);
1601
1602 return dev;
1603
1604err_out:
1605 /* here we should have a valid dev plus aup-> register addresses
1606 * so we can reset the mac properly.*/
1607 reset_mac(dev);
b4558ea9 1608 kfree(aup->mii);
1da177e4
LT
1609 for (i = 0; i < NUM_RX_DMA; i++) {
1610 if (aup->rx_db_inuse[i])
1611 ReleaseDB(aup, aup->rx_db_inuse[i]);
1612 }
1613 for (i = 0; i < NUM_TX_DMA; i++) {
1614 if (aup->tx_db_inuse[i])
1615 ReleaseDB(aup, aup->tx_db_inuse[i]);
1616 }
1617 dma_free_noncoherent(NULL,
1618 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1619 (void *)aup->vaddr,
1620 aup->dma_addr);
1621 unregister_netdev(dev);
1622 free_netdev(dev);
1623 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1624 return NULL;
1625}
1626
1627/*
1628 * Initialize the interface.
1629 *
1630 * When the device powers up, the clocks are disabled and the
1631 * mac is in reset state. When the interface is closed, we
1632 * do the same -- reset the device and disable the clocks to
1633 * conserve power. Thus, whenever au1000_init() is called,
1634 * the device should already be in reset state.
1635 */
1636static int au1000_init(struct net_device *dev)
1637{
1638 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1639 u32 flags;
1640 int i;
1641 u32 control;
1642 u16 link, speed;
1643
1644 if (au1000_debug > 4)
1645 printk("%s: au1000_init\n", dev->name);
1646
1647 spin_lock_irqsave(&aup->lock, flags);
1648
1649 /* bring the device out of reset */
1650 *aup->enable = MAC_EN_CLOCK_ENABLE;
1651 au_sync_delay(2);
1652 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1653 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1654 au_sync_delay(20);
1655
1656 aup->mac->control = 0;
1657 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
1658 aup->tx_tail = aup->tx_head;
1659 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
1660
1661 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
1662 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
1663 dev->dev_addr[1]<<8 | dev->dev_addr[0];
1664
1665 for (i = 0; i < NUM_RX_DMA; i++) {
1666 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
1667 }
1668 au_sync();
1669
1670 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1671 control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
1672#ifndef CONFIG_CPU_LITTLE_ENDIAN
1673 control |= MAC_BIG_ENDIAN;
1674#endif
1675 if (link && (dev->if_port == IF_PORT_100BASEFX)) {
1676 control |= MAC_FULL_DUPLEX;
1677 }
1678
1da177e4
LT
1679 aup->mac->control = control;
1680 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1681 au_sync();
1682
1683 spin_unlock_irqrestore(&aup->lock, flags);
1684 return 0;
1685}
1686
1687static void au1000_timer(unsigned long data)
1688{
1689 struct net_device *dev = (struct net_device *)data;
1690 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1691 unsigned char if_port;
1692 u16 link, speed;
1693
1694 if (!dev) {
1695 /* fatal error, don't restart the timer */
1696 printk(KERN_ERR "au1000_timer error: NULL dev\n");
1697 return;
1698 }
1699
1700 if_port = dev->if_port;
1701 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1702 if (link) {
7d17c1d6 1703 if (!netif_carrier_ok(dev)) {
1da177e4 1704 netif_carrier_on(dev);
1da177e4
LT
1705 printk(KERN_INFO "%s: link up\n", dev->name);
1706 }
1707 }
1708 else {
7d17c1d6 1709 if (netif_carrier_ok(dev)) {
1da177e4 1710 netif_carrier_off(dev);
1da177e4
LT
1711 dev->if_port = 0;
1712 printk(KERN_INFO "%s: link down\n", dev->name);
1713 }
1714 }
1715 }
1716
1717 if (link && (dev->if_port != if_port) &&
1718 (dev->if_port != IF_PORT_UNKNOWN)) {
1719 hard_stop(dev);
1720 if (dev->if_port == IF_PORT_100BASEFX) {
1721 printk(KERN_INFO "%s: going to full duplex\n",
1722 dev->name);
1723 aup->mac->control |= MAC_FULL_DUPLEX;
1724 au_sync_delay(1);
1725 }
1726 else {
1727 aup->mac->control &= ~MAC_FULL_DUPLEX;
1728 au_sync_delay(1);
1729 }
1730 enable_rx_tx(dev);
1731 }
1732
1733 aup->timer.expires = RUN_AT((1*HZ));
1734 aup->timer.data = (unsigned long)dev;
1735 aup->timer.function = &au1000_timer; /* timer handler */
1736 add_timer(&aup->timer);
1737
1738}
1739
1740static int au1000_open(struct net_device *dev)
1741{
1742 int retval;
1743 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1744
1745 if (au1000_debug > 4)
1746 printk("%s: open: dev=%p\n", dev->name, dev);
1747
1748 if ((retval = au1000_init(dev))) {
1749 printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
1750 free_irq(dev->irq, dev);
1751 return retval;
1752 }
1753 netif_start_queue(dev);
1754
1755 if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
1756 dev->name, dev))) {
1757 printk(KERN_ERR "%s: unable to get IRQ %d\n",
1758 dev->name, dev->irq);
1759 return retval;
1760 }
1761
1762 init_timer(&aup->timer); /* used in ioctl() */
1763 aup->timer.expires = RUN_AT((3*HZ));
1764 aup->timer.data = (unsigned long)dev;
1765 aup->timer.function = &au1000_timer; /* timer handler */
1766 add_timer(&aup->timer);
1767
1768 if (au1000_debug > 4)
1769 printk("%s: open: Initialization done.\n", dev->name);
1770
1771 return 0;
1772}
1773
1774static int au1000_close(struct net_device *dev)
1775{
1776 u32 flags;
1777 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1778
1779 if (au1000_debug > 4)
1780 printk("%s: close: dev=%p\n", dev->name, dev);
1781
1782 reset_mac(dev);
1783
1784 spin_lock_irqsave(&aup->lock, flags);
1785
1786 /* stop the device */
1787 netif_stop_queue(dev);
1788
1789 /* disable the interrupt */
1790 free_irq(dev->irq, dev);
1791 spin_unlock_irqrestore(&aup->lock, flags);
1792
1793 return 0;
1794}
1795
1796static void __exit au1000_cleanup_module(void)
1797{
1798 int i, j;
1799 struct net_device *dev;
1800 struct au1000_private *aup;
1801
1802 for (i = 0; i < num_ifs; i++) {
1803 dev = iflist[i].dev;
1804 if (dev) {
1805 aup = (struct au1000_private *) dev->priv;
1806 unregister_netdev(dev);
b4558ea9 1807 kfree(aup->mii);
1da177e4
LT
1808 for (j = 0; j < NUM_RX_DMA; j++) {
1809 if (aup->rx_db_inuse[j])
1810 ReleaseDB(aup, aup->rx_db_inuse[j]);
1811 }
1812 for (j = 0; j < NUM_TX_DMA; j++) {
1813 if (aup->tx_db_inuse[j])
1814 ReleaseDB(aup, aup->tx_db_inuse[j]);
1815 }
1816 dma_free_noncoherent(NULL,
1817 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1818 (void *)aup->vaddr,
1819 aup->dma_addr);
1820 free_netdev(dev);
1821 release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
1822 }
1823 }
1824}
1825
c2d3d4b9 1826static void update_tx_stats(struct net_device *dev, u32 status)
1da177e4
LT
1827{
1828 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1829 struct net_device_stats *ps = &aup->stats;
1830
1da177e4
LT
1831 if (status & TX_FRAME_ABORTED) {
1832 if (dev->if_port == IF_PORT_100BASEFX) {
1833 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1834 /* any other tx errors are only valid
1835 * in half duplex mode */
1836 ps->tx_errors++;
1837 ps->tx_aborted_errors++;
1838 }
1839 }
1840 else {
1841 ps->tx_errors++;
1842 ps->tx_aborted_errors++;
1843 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1844 ps->tx_carrier_errors++;
1845 }
1846 }
1847}
1848
1849
1850/*
1851 * Called from the interrupt service routine to acknowledge
1852 * the TX DONE bits. This is a must if the irq is setup as
1853 * edge triggered.
1854 */
1855static void au1000_tx_ack(struct net_device *dev)
1856{
1857 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1858 volatile tx_dma_t *ptxd;
1859
1860 ptxd = aup->tx_dma_ring[aup->tx_tail];
1861
1862 while (ptxd->buff_stat & TX_T_DONE) {
c2d3d4b9 1863 update_tx_stats(dev, ptxd->status);
1da177e4
LT
1864 ptxd->buff_stat &= ~TX_T_DONE;
1865 ptxd->len = 0;
1866 au_sync();
1867
1868 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1869 ptxd = aup->tx_dma_ring[aup->tx_tail];
1870
1871 if (aup->tx_full) {
1872 aup->tx_full = 0;
1873 netif_wake_queue(dev);
1874 }
1875 }
1876}
1877
1878
1879/*
1880 * Au1000 transmit routine.
1881 */
1882static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1883{
1884 struct au1000_private *aup = (struct au1000_private *) dev->priv;
c2d3d4b9 1885 struct net_device_stats *ps = &aup->stats;
1da177e4
LT
1886 volatile tx_dma_t *ptxd;
1887 u32 buff_stat;
1888 db_dest_t *pDB;
1889 int i;
1890
1891 if (au1000_debug > 5)
1892 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1893 dev->name, (unsigned)aup, skb->len,
1894 skb->data, aup->tx_head);
1895
1896 ptxd = aup->tx_dma_ring[aup->tx_head];
1897 buff_stat = ptxd->buff_stat;
1898 if (buff_stat & TX_DMA_ENABLE) {
1899 /* We've wrapped around and the transmitter is still busy */
1900 netif_stop_queue(dev);
1901 aup->tx_full = 1;
1902 return 1;
1903 }
1904 else if (buff_stat & TX_T_DONE) {
c2d3d4b9 1905 update_tx_stats(dev, ptxd->status);
1da177e4
LT
1906 ptxd->len = 0;
1907 }
1908
1909 if (aup->tx_full) {
1910 aup->tx_full = 0;
1911 netif_wake_queue(dev);
1912 }
1913
1914 pDB = aup->tx_db_inuse[aup->tx_head];
1915 memcpy((void *)pDB->vaddr, skb->data, skb->len);
1916 if (skb->len < ETH_ZLEN) {
1917 for (i=skb->len; i<ETH_ZLEN; i++) {
1918 ((char *)pDB->vaddr)[i] = 0;
1919 }
1920 ptxd->len = ETH_ZLEN;
1921 }
1922 else
1923 ptxd->len = skb->len;
1924
c2d3d4b9
SS
1925 ps->tx_packets++;
1926 ps->tx_bytes += ptxd->len;
1927
1da177e4
LT
1928 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1929 au_sync();
1930 dev_kfree_skb(skb);
1931 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1932 dev->trans_start = jiffies;
1933 return 0;
1934}
1935
1da177e4
LT
1936static inline void update_rx_stats(struct net_device *dev, u32 status)
1937{
1938 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1939 struct net_device_stats *ps = &aup->stats;
1940
1941 ps->rx_packets++;
1942 if (status & RX_MCAST_FRAME)
1943 ps->multicast++;
1944
1945 if (status & RX_ERROR) {
1946 ps->rx_errors++;
1947 if (status & RX_MISSED_FRAME)
1948 ps->rx_missed_errors++;
1949 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1950 ps->rx_length_errors++;
1951 if (status & RX_CRC_ERROR)
1952 ps->rx_crc_errors++;
1953 if (status & RX_COLL)
1954 ps->collisions++;
1955 }
1956 else
1957 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1958
1959}
1960
1961/*
1962 * Au1000 receive routine.
1963 */
1964static int au1000_rx(struct net_device *dev)
1965{
1966 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1967 struct sk_buff *skb;
1968 volatile rx_dma_t *prxd;
1969 u32 buff_stat, status;
1970 db_dest_t *pDB;
1971 u32 frmlen;
1972
1973 if (au1000_debug > 5)
1974 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1975
1976 prxd = aup->rx_dma_ring[aup->rx_head];
1977 buff_stat = prxd->buff_stat;
1978 while (buff_stat & RX_T_DONE) {
1979 status = prxd->status;
1980 pDB = aup->rx_db_inuse[aup->rx_head];
1981 update_rx_stats(dev, status);
1982 if (!(status & RX_ERROR)) {
1983
1984 /* good frame */
1985 frmlen = (status & RX_FRAME_LEN_MASK);
1986 frmlen -= 4; /* Remove FCS */
1987 skb = dev_alloc_skb(frmlen + 2);
1988 if (skb == NULL) {
1989 printk(KERN_ERR
1990 "%s: Memory squeeze, dropping packet.\n",
1991 dev->name);
1992 aup->stats.rx_dropped++;
1993 continue;
1994 }
1995 skb->dev = dev;
1996 skb_reserve(skb, 2); /* 16 byte IP header align */
1997 eth_copy_and_sum(skb,
1998 (unsigned char *)pDB->vaddr, frmlen, 0);
1999 skb_put(skb, frmlen);
2000 skb->protocol = eth_type_trans(skb, dev);
2001 netif_rx(skb); /* pass the packet to upper layers */
2002 }
2003 else {
2004 if (au1000_debug > 4) {
2005 if (status & RX_MISSED_FRAME)
2006 printk("rx miss\n");
2007 if (status & RX_WDOG_TIMER)
2008 printk("rx wdog\n");
2009 if (status & RX_RUNT)
2010 printk("rx runt\n");
2011 if (status & RX_OVERLEN)
2012 printk("rx overlen\n");
2013 if (status & RX_COLL)
2014 printk("rx coll\n");
2015 if (status & RX_MII_ERROR)
2016 printk("rx mii error\n");
2017 if (status & RX_CRC_ERROR)
2018 printk("rx crc error\n");
2019 if (status & RX_LEN_ERROR)
2020 printk("rx len error\n");
2021 if (status & RX_U_CNTRL_FRAME)
2022 printk("rx u control frame\n");
2023 if (status & RX_MISSED_FRAME)
2024 printk("rx miss\n");
2025 }
2026 }
2027 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
2028 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
2029 au_sync();
2030
2031 /* next descriptor */
2032 prxd = aup->rx_dma_ring[aup->rx_head];
2033 buff_stat = prxd->buff_stat;
2034 dev->last_rx = jiffies;
2035 }
2036 return 0;
2037}
2038
2039
2040/*
2041 * Au1000 interrupt service routine.
2042 */
2043static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2044{
2045 struct net_device *dev = (struct net_device *) dev_id;
2046
2047 if (dev == NULL) {
2048 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
2049 return IRQ_RETVAL(1);
2050 }
2051
2052 /* Handle RX interrupts first to minimize chance of overrun */
2053
2054 au1000_rx(dev);
2055 au1000_tx_ack(dev);
2056 return IRQ_RETVAL(1);
2057}
2058
2059
2060/*
2061 * The Tx ring has been full longer than the watchdog timeout
2062 * value. The transmitter must be hung?
2063 */
2064static void au1000_tx_timeout(struct net_device *dev)
2065{
2066 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
2067 reset_mac(dev);
2068 au1000_init(dev);
2069 dev->trans_start = jiffies;
2070 netif_wake_queue(dev);
2071}
2072
2073
2074static unsigned const ethernet_polynomial = 0x04c11db7U;
2075static inline u32 ether_crc(int length, unsigned char *data)
2076{
2077 int crc = -1;
2078
2079 while(--length >= 0) {
2080 unsigned char current_octet = *data++;
2081 int bit;
2082 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2083 crc = (crc << 1) ^
2084 ((crc < 0) ^ (current_octet & 1) ?
2085 ethernet_polynomial : 0);
2086 }
2087 return crc;
2088}
2089
2090static void set_rx_mode(struct net_device *dev)
2091{
2092 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2093
2094 if (au1000_debug > 4)
2095 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
2096
2097 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2098 aup->mac->control |= MAC_PROMISCUOUS;
2099 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
2100 } else if ((dev->flags & IFF_ALLMULTI) ||
2101 dev->mc_count > MULTICAST_FILTER_LIMIT) {
2102 aup->mac->control |= MAC_PASS_ALL_MULTI;
2103 aup->mac->control &= ~MAC_PROMISCUOUS;
2104 printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
2105 } else {
2106 int i;
2107 struct dev_mc_list *mclist;
2108 u32 mc_filter[2]; /* Multicast hash filter */
2109
2110 mc_filter[1] = mc_filter[0] = 0;
2111 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2112 i++, mclist = mclist->next) {
2113 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
2114 (long *)mc_filter);
2115 }
2116 aup->mac->multi_hash_high = mc_filter[1];
2117 aup->mac->multi_hash_low = mc_filter[0];
2118 aup->mac->control &= ~MAC_PROMISCUOUS;
2119 aup->mac->control |= MAC_HASH_MODE;
2120 }
2121}
2122
2123
2124static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2125{
2126 struct au1000_private *aup = (struct au1000_private *)dev->priv;
2127 u16 *data = (u16 *)&rq->ifr_ifru;
2128
2129 switch(cmd) {
2130 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2131 case SIOCGMIIPHY:
2132 if (!netif_running(dev)) return -EINVAL;
2133 data[0] = aup->phy_addr;
2134 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2135 case SIOCGMIIREG:
2136 data[3] = mdio_read(dev, data[0], data[1]);
2137 return 0;
2138 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2139 case SIOCSMIIREG:
2140 if (!capable(CAP_NET_ADMIN))
2141 return -EPERM;
2142 mdio_write(dev, data[0], data[1],data[2]);
2143 return 0;
2144 default:
2145 return -EOPNOTSUPP;
2146 }
2147
2148}
2149
2150
2151static int au1000_set_config(struct net_device *dev, struct ifmap *map)
2152{
2153 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2154 u16 control;
2155
2156 if (au1000_debug > 4) {
2157 printk("%s: set_config called: dev->if_port %d map->port %x\n",
2158 dev->name, dev->if_port, map->port);
2159 }
2160
2161 switch(map->port){
2162 case IF_PORT_UNKNOWN: /* use auto here */
2163 printk(KERN_INFO "%s: config phy for aneg\n",
2164 dev->name);
2165 dev->if_port = map->port;
2166 /* Link Down: the timer will bring it up */
2167 netif_carrier_off(dev);
2168
2169 /* read current control */
2170 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2171 control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
2172
2173 /* enable auto negotiation and reset the negotiation */
2174 mdio_write(dev, aup->phy_addr, MII_CONTROL,
2175 control | MII_CNTL_AUTO |
2176 MII_CNTL_RST_AUTO);
2177
2178 break;
2179
2180 case IF_PORT_10BASET: /* 10BaseT */
2181 printk(KERN_INFO "%s: config phy for 10BaseT\n",
2182 dev->name);
2183 dev->if_port = map->port;
2184
2185 /* Link Down: the timer will bring it up */
2186 netif_carrier_off(dev);
2187
2188 /* set Speed to 10Mbps, Half Duplex */
2189 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2190 control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
2191 MII_CNTL_FDX);
2192
2193 /* disable auto negotiation and force 10M/HD mode*/
2194 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2195 break;
2196
2197 case IF_PORT_100BASET: /* 100BaseT */
2198 case IF_PORT_100BASETX: /* 100BaseTx */
2199 printk(KERN_INFO "%s: config phy for 100BaseTX\n",
2200 dev->name);
2201 dev->if_port = map->port;
2202
2203 /* Link Down: the timer will bring it up */
2204 netif_carrier_off(dev);
2205
2206 /* set Speed to 100Mbps, Half Duplex */
2207 /* disable auto negotiation and enable 100MBit Mode */
2208 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2209 control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
2210 control |= MII_CNTL_F100;
2211 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2212 break;
2213
2214 case IF_PORT_100BASEFX: /* 100BaseFx */
2215 printk(KERN_INFO "%s: config phy for 100BaseFX\n",
2216 dev->name);
2217 dev->if_port = map->port;
2218
2219 /* Link Down: the timer will bring it up */
2220 netif_carrier_off(dev);
2221
2222 /* set Speed to 100Mbps, Full Duplex */
2223 /* disable auto negotiation and enable 100MBit Mode */
2224 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2225 control &= ~MII_CNTL_AUTO;
2226 control |= MII_CNTL_F100 | MII_CNTL_FDX;
2227 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2228 break;
2229 case IF_PORT_10BASE2: /* 10Base2 */
2230 case IF_PORT_AUI: /* AUI */
2231 /* These Modes are not supported (are they?)*/
2232 printk(KERN_ERR "%s: 10Base2/AUI not supported",
2233 dev->name);
2234 return -EOPNOTSUPP;
2235 break;
2236
2237 default:
2238 printk(KERN_ERR "%s: Invalid media selected",
2239 dev->name);
2240 return -EINVAL;
2241 }
2242 return 0;
2243}
2244
2245static struct net_device_stats *au1000_get_stats(struct net_device *dev)
2246{
2247 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2248
2249 if (au1000_debug > 4)
2250 printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
2251
2252 if (netif_device_present(dev)) {
2253 return &aup->stats;
2254 }
2255 return 0;
2256}
2257
2258module_init(au1000_init_module);
2259module_exit(au1000_cleanup_module);
This page took 0.221096 seconds and 5 git commands to generate.