[PATCH] 3c5zz ethernet: fix section warnings
[deliverable/linux.git] / drivers / net / au1000_eth.c
CommitLineData
1da177e4
LT
1/*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
89be0501 5 * Copyright 2001-2003, 2006 MontaVista Software Inc.
1da177e4
LT
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Author: MontaVista Software, Inc.
13 * ppopov@mvista.com or source@mvista.com
14 *
15 * ########################################################################
16 *
17 * This program is free software; you can distribute it and/or modify it
18 * under the terms of the GNU General Public License (Version 2) as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 * for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
29 *
30 * ########################################################################
31 *
32 *
33 */
34
0c0abbc5 35#include <linux/config.h>
1da177e4
LT
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39#include <linux/string.h>
40#include <linux/timer.h>
41#include <linux/errno.h>
42#include <linux/in.h>
43#include <linux/ioport.h>
44#include <linux/bitops.h>
45#include <linux/slab.h>
46#include <linux/interrupt.h>
47#include <linux/pci.h>
48#include <linux/init.h>
49#include <linux/netdevice.h>
50#include <linux/etherdevice.h>
51#include <linux/ethtool.h>
52#include <linux/mii.h>
53#include <linux/skbuff.h>
54#include <linux/delay.h>
8cd35da0 55#include <linux/crc32.h>
1da177e4
LT
56#include <asm/mipsregs.h>
57#include <asm/irq.h>
58#include <asm/io.h>
59#include <asm/processor.h>
60
61#include <asm/mach-au1x00/au1000.h>
62#include <asm/cpu.h>
63#include "au1000_eth.h"
64
65#ifdef AU1000_ETH_DEBUG
66static int au1000_debug = 5;
67#else
68static int au1000_debug = 3;
69#endif
70
89be0501 71#define DRV_NAME "au1000_eth"
1da177e4
LT
72#define DRV_VERSION "1.5"
73#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
74#define DRV_DESC "Au1xxx on-chip Ethernet driver"
75
76MODULE_AUTHOR(DRV_AUTHOR);
77MODULE_DESCRIPTION(DRV_DESC);
78MODULE_LICENSE("GPL");
79
80// prototypes
81static void hard_stop(struct net_device *);
82static void enable_rx_tx(struct net_device *dev);
89be0501 83static struct net_device * au1000_probe(int port_num);
1da177e4
LT
84static int au1000_init(struct net_device *);
85static int au1000_open(struct net_device *);
86static int au1000_close(struct net_device *);
87static int au1000_tx(struct sk_buff *, struct net_device *);
88static int au1000_rx(struct net_device *);
89static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
90static void au1000_tx_timeout(struct net_device *);
91static int au1000_set_config(struct net_device *dev, struct ifmap *map);
92static void set_rx_mode(struct net_device *);
93static struct net_device_stats *au1000_get_stats(struct net_device *);
1da177e4
LT
94static void au1000_timer(unsigned long);
95static int au1000_ioctl(struct net_device *, struct ifreq *, int);
96static int mdio_read(struct net_device *, int, int);
97static void mdio_write(struct net_device *, int, int, u16);
98static void dump_mii(struct net_device *dev, int phy_id);
99
100// externs
101extern void ack_rise_edge_irq(unsigned int);
102extern int get_ethernet_addr(char *ethernet_addr);
103extern void str2eaddr(unsigned char *ea, unsigned char *str);
104extern char * __init prom_getcmdline(void);
105
106/*
107 * Theory of operation
108 *
109 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
110 * There are four receive and four transmit descriptors. These
111 * descriptors are not in memory; rather, they are just a set of
112 * hardware registers.
113 *
114 * Since the Au1000 has a coherent data cache, the receive and
115 * transmit buffers are allocated from the KSEG0 segment. The
116 * hardware registers, however, are still mapped at KSEG1 to
117 * make sure there's no out-of-order writes, and that all writes
118 * complete immediately.
119 */
120
121/* These addresses are only used if yamon doesn't tell us what
122 * the mac address is, and the mac address is not passed on the
123 * command line.
124 */
125static unsigned char au1000_mac_addr[6] __devinitdata = {
126 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
127};
128
129#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
130#define RUN_AT(x) (jiffies + (x))
131
132// For reading/writing 32-bit words from/to DMA memory
133#define cpu_to_dma32 cpu_to_be32
134#define dma32_to_cpu be32_to_cpu
135
136struct au1000_private *au_macs[NUM_ETH_INTERFACES];
137
138/* FIXME
139 * All of the PHY code really should be detached from the MAC
140 * code.
141 */
142
143/* Default advertise */
144#define GENMII_DEFAULT_ADVERTISE \
145 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
146 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
147 ADVERTISED_Autoneg
148
149#define GENMII_DEFAULT_FEATURES \
150 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
151 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
152 SUPPORTED_Autoneg
153
1da177e4
LT
154int bcm_5201_init(struct net_device *dev, int phy_addr)
155{
156 s16 data;
157
158 /* Stop auto-negotiation */
159 data = mdio_read(dev, phy_addr, MII_CONTROL);
160 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
161
162 /* Set advertisement to 10/100 and Half/Full duplex
163 * (full capabilities) */
164 data = mdio_read(dev, phy_addr, MII_ANADV);
165 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
166 mdio_write(dev, phy_addr, MII_ANADV, data);
167
168 /* Restart auto-negotiation */
169 data = mdio_read(dev, phy_addr, MII_CONTROL);
170 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
171 mdio_write(dev, phy_addr, MII_CONTROL, data);
172
173 if (au1000_debug > 4)
174 dump_mii(dev, phy_addr);
175 return 0;
176}
177
178int bcm_5201_reset(struct net_device *dev, int phy_addr)
179{
180 s16 mii_control, timeout;
181
182 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
183 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
184 mdelay(1);
185 for (timeout = 100; timeout > 0; --timeout) {
186 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
187 if ((mii_control & MII_CNTL_RESET) == 0)
188 break;
189 mdelay(1);
190 }
191 if (mii_control & MII_CNTL_RESET) {
192 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
193 return -1;
194 }
195 return 0;
196}
197
198int
199bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
200{
201 u16 mii_data;
202 struct au1000_private *aup;
203
204 if (!dev) {
205 printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
206 return -1;
207 }
208 aup = (struct au1000_private *) dev->priv;
209
210 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
211 if (mii_data & MII_STAT_LINK) {
212 *link = 1;
213 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
214 if (mii_data & MII_AUX_100) {
215 if (mii_data & MII_AUX_FDX) {
216 *speed = IF_PORT_100BASEFX;
217 dev->if_port = IF_PORT_100BASEFX;
218 }
219 else {
220 *speed = IF_PORT_100BASETX;
221 dev->if_port = IF_PORT_100BASETX;
222 }
223 }
224 else {
225 *speed = IF_PORT_10BASET;
226 dev->if_port = IF_PORT_10BASET;
227 }
228
229 }
230 else {
231 *link = 0;
232 *speed = 0;
233 dev->if_port = IF_PORT_UNKNOWN;
234 }
235 return 0;
236}
237
238int lsi_80227_init(struct net_device *dev, int phy_addr)
239{
240 if (au1000_debug > 4)
241 printk("lsi_80227_init\n");
242
243 /* restart auto-negotiation */
244 mdio_write(dev, phy_addr, MII_CONTROL,
245 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
246 mdelay(1);
247
248 /* set up LEDs to correct display */
249#ifdef CONFIG_MIPS_MTX1
250 mdio_write(dev, phy_addr, 17, 0xff80);
251#else
252 mdio_write(dev, phy_addr, 17, 0xffc0);
253#endif
254
255 if (au1000_debug > 4)
256 dump_mii(dev, phy_addr);
257 return 0;
258}
259
260int lsi_80227_reset(struct net_device *dev, int phy_addr)
261{
262 s16 mii_control, timeout;
263
264 if (au1000_debug > 4) {
265 printk("lsi_80227_reset\n");
266 dump_mii(dev, phy_addr);
267 }
268
269 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
270 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
271 mdelay(1);
272 for (timeout = 100; timeout > 0; --timeout) {
273 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
274 if ((mii_control & MII_CNTL_RESET) == 0)
275 break;
276 mdelay(1);
277 }
278 if (mii_control & MII_CNTL_RESET) {
279 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
280 return -1;
281 }
282 return 0;
283}
284
285int
286lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
287{
288 u16 mii_data;
289 struct au1000_private *aup;
290
291 if (!dev) {
292 printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
293 return -1;
294 }
295 aup = (struct au1000_private *) dev->priv;
296
297 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
298 if (mii_data & MII_STAT_LINK) {
299 *link = 1;
300 mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
301 if (mii_data & MII_LSI_PHY_STAT_SPD) {
302 if (mii_data & MII_LSI_PHY_STAT_FDX) {
303 *speed = IF_PORT_100BASEFX;
304 dev->if_port = IF_PORT_100BASEFX;
305 }
306 else {
307 *speed = IF_PORT_100BASETX;
308 dev->if_port = IF_PORT_100BASETX;
309 }
310 }
311 else {
312 *speed = IF_PORT_10BASET;
313 dev->if_port = IF_PORT_10BASET;
314 }
315
316 }
317 else {
318 *link = 0;
319 *speed = 0;
320 dev->if_port = IF_PORT_UNKNOWN;
321 }
322 return 0;
323}
324
325int am79c901_init(struct net_device *dev, int phy_addr)
326{
327 printk("am79c901_init\n");
328 return 0;
329}
330
331int am79c901_reset(struct net_device *dev, int phy_addr)
332{
333 printk("am79c901_reset\n");
334 return 0;
335}
336
337int
338am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
339{
340 return 0;
341}
342
343int am79c874_init(struct net_device *dev, int phy_addr)
344{
345 s16 data;
346
347 /* 79c874 has quit resembled bit assignments to BCM5201 */
348 if (au1000_debug > 4)
349 printk("am79c847_init\n");
350
351 /* Stop auto-negotiation */
352 data = mdio_read(dev, phy_addr, MII_CONTROL);
353 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
354
355 /* Set advertisement to 10/100 and Half/Full duplex
356 * (full capabilities) */
357 data = mdio_read(dev, phy_addr, MII_ANADV);
358 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
359 mdio_write(dev, phy_addr, MII_ANADV, data);
360
361 /* Restart auto-negotiation */
362 data = mdio_read(dev, phy_addr, MII_CONTROL);
363 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
364
365 mdio_write(dev, phy_addr, MII_CONTROL, data);
366
367 if (au1000_debug > 4) dump_mii(dev, phy_addr);
368 return 0;
369}
370
371int am79c874_reset(struct net_device *dev, int phy_addr)
372{
373 s16 mii_control, timeout;
374
375 if (au1000_debug > 4)
376 printk("am79c874_reset\n");
377
378 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
379 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
380 mdelay(1);
381 for (timeout = 100; timeout > 0; --timeout) {
382 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
383 if ((mii_control & MII_CNTL_RESET) == 0)
384 break;
385 mdelay(1);
386 }
387 if (mii_control & MII_CNTL_RESET) {
388 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
389 return -1;
390 }
391 return 0;
392}
393
394int
395am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
396{
397 u16 mii_data;
398 struct au1000_private *aup;
399
400 // printk("am79c874_status\n");
401 if (!dev) {
402 printk(KERN_ERR "am79c874_status error: NULL dev\n");
403 return -1;
404 }
405
406 aup = (struct au1000_private *) dev->priv;
407 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
408
409 if (mii_data & MII_STAT_LINK) {
410 *link = 1;
411 mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
412 if (mii_data & MII_AMD_PHY_STAT_SPD) {
413 if (mii_data & MII_AMD_PHY_STAT_FDX) {
414 *speed = IF_PORT_100BASEFX;
415 dev->if_port = IF_PORT_100BASEFX;
416 }
417 else {
418 *speed = IF_PORT_100BASETX;
419 dev->if_port = IF_PORT_100BASETX;
420 }
421 }
422 else {
423 *speed = IF_PORT_10BASET;
424 dev->if_port = IF_PORT_10BASET;
425 }
426
427 }
428 else {
429 *link = 0;
430 *speed = 0;
431 dev->if_port = IF_PORT_UNKNOWN;
432 }
433 return 0;
434}
435
436int lxt971a_init(struct net_device *dev, int phy_addr)
437{
438 if (au1000_debug > 4)
439 printk("lxt971a_init\n");
440
441 /* restart auto-negotiation */
442 mdio_write(dev, phy_addr, MII_CONTROL,
443 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
444
445 /* set up LEDs to correct display */
446 mdio_write(dev, phy_addr, 20, 0x0422);
447
448 if (au1000_debug > 4)
449 dump_mii(dev, phy_addr);
450 return 0;
451}
452
453int lxt971a_reset(struct net_device *dev, int phy_addr)
454{
455 s16 mii_control, timeout;
456
457 if (au1000_debug > 4) {
458 printk("lxt971a_reset\n");
459 dump_mii(dev, phy_addr);
460 }
461
462 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
463 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
464 mdelay(1);
465 for (timeout = 100; timeout > 0; --timeout) {
466 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
467 if ((mii_control & MII_CNTL_RESET) == 0)
468 break;
469 mdelay(1);
470 }
471 if (mii_control & MII_CNTL_RESET) {
472 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
473 return -1;
474 }
475 return 0;
476}
477
478int
479lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
480{
481 u16 mii_data;
482 struct au1000_private *aup;
483
484 if (!dev) {
485 printk(KERN_ERR "lxt971a_status error: NULL dev\n");
486 return -1;
487 }
488 aup = (struct au1000_private *) dev->priv;
489
490 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
491 if (mii_data & MII_STAT_LINK) {
492 *link = 1;
493 mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
494 if (mii_data & MII_INTEL_PHY_STAT_SPD) {
495 if (mii_data & MII_INTEL_PHY_STAT_FDX) {
496 *speed = IF_PORT_100BASEFX;
497 dev->if_port = IF_PORT_100BASEFX;
498 }
499 else {
500 *speed = IF_PORT_100BASETX;
501 dev->if_port = IF_PORT_100BASETX;
502 }
503 }
504 else {
505 *speed = IF_PORT_10BASET;
506 dev->if_port = IF_PORT_10BASET;
507 }
508
509 }
510 else {
511 *link = 0;
512 *speed = 0;
513 dev->if_port = IF_PORT_UNKNOWN;
514 }
515 return 0;
516}
517
518int ks8995m_init(struct net_device *dev, int phy_addr)
519{
520 s16 data;
521
522// printk("ks8995m_init\n");
523 /* Stop auto-negotiation */
524 data = mdio_read(dev, phy_addr, MII_CONTROL);
525 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
526
527 /* Set advertisement to 10/100 and Half/Full duplex
528 * (full capabilities) */
529 data = mdio_read(dev, phy_addr, MII_ANADV);
530 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
531 mdio_write(dev, phy_addr, MII_ANADV, data);
532
533 /* Restart auto-negotiation */
534 data = mdio_read(dev, phy_addr, MII_CONTROL);
535 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
536 mdio_write(dev, phy_addr, MII_CONTROL, data);
537
538 if (au1000_debug > 4) dump_mii(dev, phy_addr);
539
540 return 0;
541}
542
543int ks8995m_reset(struct net_device *dev, int phy_addr)
544{
545 s16 mii_control, timeout;
546
547// printk("ks8995m_reset\n");
548 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
549 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
550 mdelay(1);
551 for (timeout = 100; timeout > 0; --timeout) {
552 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
553 if ((mii_control & MII_CNTL_RESET) == 0)
554 break;
555 mdelay(1);
556 }
557 if (mii_control & MII_CNTL_RESET) {
558 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
559 return -1;
560 }
561 return 0;
562}
563
564int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
565{
566 u16 mii_data;
567 struct au1000_private *aup;
568
569 if (!dev) {
570 printk(KERN_ERR "ks8995m_status error: NULL dev\n");
571 return -1;
572 }
573 aup = (struct au1000_private *) dev->priv;
574
575 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
576 if (mii_data & MII_STAT_LINK) {
577 *link = 1;
578 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
579 if (mii_data & MII_AUX_100) {
580 if (mii_data & MII_AUX_FDX) {
581 *speed = IF_PORT_100BASEFX;
582 dev->if_port = IF_PORT_100BASEFX;
583 }
584 else {
585 *speed = IF_PORT_100BASETX;
586 dev->if_port = IF_PORT_100BASETX;
587 }
588 }
589 else {
590 *speed = IF_PORT_10BASET;
591 dev->if_port = IF_PORT_10BASET;
592 }
593
594 }
595 else {
596 *link = 0;
597 *speed = 0;
598 dev->if_port = IF_PORT_UNKNOWN;
599 }
600 return 0;
601}
602
603int
604smsc_83C185_init (struct net_device *dev, int phy_addr)
605{
606 s16 data;
607
608 if (au1000_debug > 4)
609 printk("smsc_83C185_init\n");
610
611 /* Stop auto-negotiation */
612 data = mdio_read(dev, phy_addr, MII_CONTROL);
613 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
614
615 /* Set advertisement to 10/100 and Half/Full duplex
616 * (full capabilities) */
617 data = mdio_read(dev, phy_addr, MII_ANADV);
618 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
619 mdio_write(dev, phy_addr, MII_ANADV, data);
620
621 /* Restart auto-negotiation */
622 data = mdio_read(dev, phy_addr, MII_CONTROL);
623 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
624
625 mdio_write(dev, phy_addr, MII_CONTROL, data);
626
627 if (au1000_debug > 4) dump_mii(dev, phy_addr);
628 return 0;
629}
630
631int
632smsc_83C185_reset (struct net_device *dev, int phy_addr)
633{
634 s16 mii_control, timeout;
635
636 if (au1000_debug > 4)
637 printk("smsc_83C185_reset\n");
638
639 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
640 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
641 mdelay(1);
642 for (timeout = 100; timeout > 0; --timeout) {
643 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
644 if ((mii_control & MII_CNTL_RESET) == 0)
645 break;
646 mdelay(1);
647 }
648 if (mii_control & MII_CNTL_RESET) {
649 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
650 return -1;
651 }
652 return 0;
653}
654
655int
656smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
657{
658 u16 mii_data;
659 struct au1000_private *aup;
660
661 if (!dev) {
662 printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
663 return -1;
664 }
665
666 aup = (struct au1000_private *) dev->priv;
667 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
668
669 if (mii_data & MII_STAT_LINK) {
670 *link = 1;
671 mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
672 if (mii_data & (1<<3)) {
673 if (mii_data & (1<<4)) {
674 *speed = IF_PORT_100BASEFX;
675 dev->if_port = IF_PORT_100BASEFX;
676 }
677 else {
678 *speed = IF_PORT_100BASETX;
679 dev->if_port = IF_PORT_100BASETX;
680 }
681 }
682 else {
683 *speed = IF_PORT_10BASET;
684 dev->if_port = IF_PORT_10BASET;
685 }
686 }
687 else {
688 *link = 0;
689 *speed = 0;
690 dev->if_port = IF_PORT_UNKNOWN;
691 }
692 return 0;
693}
694
695
696#ifdef CONFIG_MIPS_BOSPORUS
697int stub_init(struct net_device *dev, int phy_addr)
698{
699 //printk("PHY stub_init\n");
700 return 0;
701}
702
703int stub_reset(struct net_device *dev, int phy_addr)
704{
705 //printk("PHY stub_reset\n");
706 return 0;
707}
708
709int
710stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
711{
712 //printk("PHY stub_status\n");
713 *link = 1;
714 /* hmmm, revisit */
715 *speed = IF_PORT_100BASEFX;
716 dev->if_port = IF_PORT_100BASEFX;
717 return 0;
718}
719#endif
720
721struct phy_ops bcm_5201_ops = {
722 bcm_5201_init,
723 bcm_5201_reset,
724 bcm_5201_status,
725};
726
727struct phy_ops am79c874_ops = {
728 am79c874_init,
729 am79c874_reset,
730 am79c874_status,
731};
732
733struct phy_ops am79c901_ops = {
734 am79c901_init,
735 am79c901_reset,
736 am79c901_status,
737};
738
739struct phy_ops lsi_80227_ops = {
740 lsi_80227_init,
741 lsi_80227_reset,
742 lsi_80227_status,
743};
744
745struct phy_ops lxt971a_ops = {
746 lxt971a_init,
747 lxt971a_reset,
748 lxt971a_status,
749};
750
751struct phy_ops ks8995m_ops = {
752 ks8995m_init,
753 ks8995m_reset,
754 ks8995m_status,
755};
756
757struct phy_ops smsc_83C185_ops = {
758 smsc_83C185_init,
759 smsc_83C185_reset,
760 smsc_83C185_status,
761};
762
763#ifdef CONFIG_MIPS_BOSPORUS
764struct phy_ops stub_ops = {
765 stub_init,
766 stub_reset,
767 stub_status,
768};
769#endif
770
771static struct mii_chip_info {
772 const char * name;
773 u16 phy_id0;
774 u16 phy_id1;
775 struct phy_ops *phy_ops;
776 int dual_phy;
777} mii_chip_table[] = {
778 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
779 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
780 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
7f553e3d 781 {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
1da177e4
LT
782 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
783 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
784 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
785 {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
786 {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
787 {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
788#ifdef CONFIG_MIPS_BOSPORUS
789 {"Stub", 0x1234, 0x5678, &stub_ops },
790#endif
791 {0,},
792};
793
794static int mdio_read(struct net_device *dev, int phy_id, int reg)
795{
796 struct au1000_private *aup = (struct au1000_private *) dev->priv;
797 volatile u32 *mii_control_reg;
798 volatile u32 *mii_data_reg;
799 u32 timedout = 20;
800 u32 mii_control;
801
802 #ifdef CONFIG_BCM5222_DUAL_PHY
803 /* First time we probe, it's for the mac0 phy.
804 * Since we haven't determined yet that we have a dual phy,
805 * aup->mii->mii_control_reg won't be setup and we'll
806 * default to the else statement.
807 * By the time we probe for the mac1 phy, the mii_control_reg
808 * will be setup to be the address of the mac0 phy control since
809 * both phys are controlled through mac0.
810 */
811 if (aup->mii && aup->mii->mii_control_reg) {
812 mii_control_reg = aup->mii->mii_control_reg;
813 mii_data_reg = aup->mii->mii_data_reg;
814 }
815 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
816 /* assume both phys are controlled through mac0 */
817 mii_control_reg = au_macs[0]->mii->mii_control_reg;
818 mii_data_reg = au_macs[0]->mii->mii_data_reg;
819 }
820 else
821 #endif
822 {
823 /* default control and data reg addresses */
824 mii_control_reg = &aup->mac->mii_control;
825 mii_data_reg = &aup->mac->mii_data;
826 }
827
828 while (*mii_control_reg & MAC_MII_BUSY) {
829 mdelay(1);
830 if (--timedout == 0) {
831 printk(KERN_ERR "%s: read_MII busy timeout!!\n",
832 dev->name);
833 return -1;
834 }
835 }
836
837 mii_control = MAC_SET_MII_SELECT_REG(reg) |
838 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
839
840 *mii_control_reg = mii_control;
841
842 timedout = 20;
843 while (*mii_control_reg & MAC_MII_BUSY) {
844 mdelay(1);
845 if (--timedout == 0) {
846 printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
847 dev->name);
848 return -1;
849 }
850 }
851 return (int)*mii_data_reg;
852}
853
854static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
855{
856 struct au1000_private *aup = (struct au1000_private *) dev->priv;
857 volatile u32 *mii_control_reg;
858 volatile u32 *mii_data_reg;
859 u32 timedout = 20;
860 u32 mii_control;
861
862 #ifdef CONFIG_BCM5222_DUAL_PHY
863 if (aup->mii && aup->mii->mii_control_reg) {
864 mii_control_reg = aup->mii->mii_control_reg;
865 mii_data_reg = aup->mii->mii_data_reg;
866 }
867 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
868 /* assume both phys are controlled through mac0 */
869 mii_control_reg = au_macs[0]->mii->mii_control_reg;
870 mii_data_reg = au_macs[0]->mii->mii_data_reg;
871 }
872 else
873 #endif
874 {
875 /* default control and data reg addresses */
876 mii_control_reg = &aup->mac->mii_control;
877 mii_data_reg = &aup->mac->mii_data;
878 }
879
880 while (*mii_control_reg & MAC_MII_BUSY) {
881 mdelay(1);
882 if (--timedout == 0) {
883 printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
884 dev->name);
885 return;
886 }
887 }
888
889 mii_control = MAC_SET_MII_SELECT_REG(reg) |
890 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
891
892 *mii_data_reg = value;
893 *mii_control_reg = mii_control;
894}
895
896
897static void dump_mii(struct net_device *dev, int phy_id)
898{
899 int i, val;
900
901 for (i = 0; i < 7; i++) {
902 if ((val = mdio_read(dev, phy_id, i)) >= 0)
903 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
904 }
905 for (i = 16; i < 25; i++) {
906 if ((val = mdio_read(dev, phy_id, i)) >= 0)
907 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
908 }
909}
910
911static int mii_probe (struct net_device * dev)
912{
913 struct au1000_private *aup = (struct au1000_private *) dev->priv;
914 int phy_addr;
915#ifdef CONFIG_MIPS_BOSPORUS
916 int phy_found=0;
917#endif
918
919 /* search for total of 32 possible mii phy addresses */
920 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
921 u16 mii_status;
922 u16 phy_id0, phy_id1;
923 int i;
924
925 #ifdef CONFIG_BCM5222_DUAL_PHY
926 /* Mask the already found phy, try next one */
927 if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
928 if (au_macs[0]->phy_addr == phy_addr)
929 continue;
930 }
931 #endif
932
933 mii_status = mdio_read(dev, phy_addr, MII_STATUS);
934 if (mii_status == 0xffff || mii_status == 0x0000)
935 /* the mii is not accessable, try next one */
936 continue;
937
938 phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
939 phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
940
941 /* search our mii table for the current mii */
942 for (i = 0; mii_chip_table[i].phy_id1; i++) {
943 if (phy_id0 == mii_chip_table[i].phy_id0 &&
944 phy_id1 == mii_chip_table[i].phy_id1) {
945 struct mii_phy * mii_phy = aup->mii;
946
947 printk(KERN_INFO "%s: %s at phy address %d\n",
948 dev->name, mii_chip_table[i].name,
949 phy_addr);
950#ifdef CONFIG_MIPS_BOSPORUS
951 phy_found = 1;
952#endif
953 mii_phy->chip_info = mii_chip_table+i;
954 aup->phy_addr = phy_addr;
955 aup->want_autoneg = 1;
956 aup->phy_ops = mii_chip_table[i].phy_ops;
957 aup->phy_ops->phy_init(dev,phy_addr);
958
959 // Check for dual-phy and then store required
960 // values and set indicators. We need to do
961 // this now since mdio_{read,write} need the
962 // control and data register addresses.
963 #ifdef CONFIG_BCM5222_DUAL_PHY
964 if ( mii_chip_table[i].dual_phy) {
965
966 /* assume both phys are controlled
967 * through MAC0. Board specific? */
968
969 /* sanity check */
970 if (!au_macs[0] || !au_macs[0]->mii)
971 return -1;
972 aup->mii->mii_control_reg = (u32 *)
973 &au_macs[0]->mac->mii_control;
974 aup->mii->mii_data_reg = (u32 *)
975 &au_macs[0]->mac->mii_data;
976 }
977 #endif
978 goto found;
979 }
980 }
981 }
982found:
983
984#ifdef CONFIG_MIPS_BOSPORUS
985 /* This is a workaround for the Micrel/Kendin 5 port switch
986 The second MAC doesn't see a PHY connected... so we need to
987 trick it into thinking we have one.
988
989 If this kernel is run on another Au1500 development board
990 the stub will be found as well as the actual PHY. However,
991 the last found PHY will be used... usually at Addr 31 (Db1500).
992 */
993 if ( (!phy_found) )
994 {
995 u16 phy_id0, phy_id1;
996 int i;
997
998 phy_id0 = 0x1234;
999 phy_id1 = 0x5678;
1000
1001 /* search our mii table for the current mii */
1002 for (i = 0; mii_chip_table[i].phy_id1; i++) {
1003 if (phy_id0 == mii_chip_table[i].phy_id0 &&
1004 phy_id1 == mii_chip_table[i].phy_id1) {
1005 struct mii_phy * mii_phy;
1006
1007 printk(KERN_INFO "%s: %s at phy address %d\n",
1008 dev->name, mii_chip_table[i].name,
1009 phy_addr);
1010 mii_phy = kmalloc(sizeof(struct mii_phy),
1011 GFP_KERNEL);
1012 if (mii_phy) {
1013 mii_phy->chip_info = mii_chip_table+i;
1014 aup->phy_addr = phy_addr;
1015 mii_phy->next = aup->mii;
1016 aup->phy_ops =
1017 mii_chip_table[i].phy_ops;
1018 aup->mii = mii_phy;
1019 aup->phy_ops->phy_init(dev,phy_addr);
1020 } else {
1021 printk(KERN_ERR "%s: out of memory\n",
1022 dev->name);
1023 return -1;
1024 }
1025 mii_phy->chip_info = mii_chip_table+i;
1026 aup->phy_addr = phy_addr;
1027 aup->phy_ops = mii_chip_table[i].phy_ops;
1028 aup->phy_ops->phy_init(dev,phy_addr);
1029 break;
1030 }
1031 }
1032 }
1033 if (aup->mac_id == 0) {
1034 /* the Bosporus phy responds to addresses 0-5 but
1035 * 5 is the correct one.
1036 */
1037 aup->phy_addr = 5;
1038 }
1039#endif
1040
1041 if (aup->mii->chip_info == NULL) {
7f553e3d 1042 printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
1da177e4
LT
1043 dev->name);
1044 return -1;
1045 }
1046
1047 printk(KERN_INFO "%s: Using %s as default\n",
1048 dev->name, aup->mii->chip_info->name);
1049
1050 return 0;
1051}
1052
1053
1054/*
1055 * Buffer allocation/deallocation routines. The buffer descriptor returned
1056 * has the virtual and dma address of a buffer suitable for
1057 * both, receive and transmit operations.
1058 */
1059static db_dest_t *GetFreeDB(struct au1000_private *aup)
1060{
1061 db_dest_t *pDB;
1062 pDB = aup->pDBfree;
1063
1064 if (pDB) {
1065 aup->pDBfree = pDB->pnext;
1066 }
1067 return pDB;
1068}
1069
1070void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
1071{
1072 db_dest_t *pDBfree = aup->pDBfree;
1073 if (pDBfree)
1074 pDBfree->pnext = pDB;
1075 aup->pDBfree = pDB;
1076}
1077
1078static void enable_rx_tx(struct net_device *dev)
1079{
1080 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1081
1082 if (au1000_debug > 4)
1083 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
1084
1085 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
1086 au_sync_delay(10);
1087}
1088
1089static void hard_stop(struct net_device *dev)
1090{
1091 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1092
1093 if (au1000_debug > 4)
1094 printk(KERN_INFO "%s: hard stop\n", dev->name);
1095
1096 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
1097 au_sync_delay(10);
1098}
1099
1100
1101static void reset_mac(struct net_device *dev)
1102{
1103 int i;
1104 u32 flags;
1105 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1106
1107 if (au1000_debug > 4)
1108 printk(KERN_INFO "%s: reset mac, aup %x\n",
1109 dev->name, (unsigned)aup);
1110
1111 spin_lock_irqsave(&aup->lock, flags);
1112 if (aup->timer.function == &au1000_timer) {/* check if timer initted */
1113 del_timer(&aup->timer);
1114 }
1115
1116 hard_stop(dev);
1117 #ifdef CONFIG_BCM5222_DUAL_PHY
1118 if (aup->mac_id != 0) {
1119 #endif
1120 /* If BCM5222, we can't leave MAC0 in reset because then
1121 * we can't access the dual phy for ETH1 */
1122 *aup->enable = MAC_EN_CLOCK_ENABLE;
1123 au_sync_delay(2);
1124 *aup->enable = 0;
1125 au_sync_delay(2);
1126 #ifdef CONFIG_BCM5222_DUAL_PHY
1127 }
1128 #endif
1129 aup->tx_full = 0;
1130 for (i = 0; i < NUM_RX_DMA; i++) {
1131 /* reset control bits */
1132 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
1133 }
1134 for (i = 0; i < NUM_TX_DMA; i++) {
1135 /* reset control bits */
1136 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
1137 }
1138 spin_unlock_irqrestore(&aup->lock, flags);
1139}
1140
1141
1142/*
1143 * Setup the receive and transmit "rings". These pointers are the addresses
1144 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
1145 * these are not descriptors sitting in memory.
1146 */
1147static void
1148setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
1149{
1150 int i;
1151
1152 for (i = 0; i < NUM_RX_DMA; i++) {
1153 aup->rx_dma_ring[i] =
1154 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
1155 }
1156 for (i = 0; i < NUM_TX_DMA; i++) {
1157 aup->tx_dma_ring[i] =
1158 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
1159 }
1160}
1161
1162static struct {
1da177e4
LT
1163 u32 base_addr;
1164 u32 macen_addr;
1165 int irq;
1166 struct net_device *dev;
89be0501
SS
1167} iflist[2] = {
1168#ifdef CONFIG_SOC_AU1000
1169 {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
1170 {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
1171#endif
1172#ifdef CONFIG_SOC_AU1100
1173 {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
1174#endif
1175#ifdef CONFIG_SOC_AU1500
1176 {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
1177 {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
1178#endif
1179#ifdef CONFIG_SOC_AU1550
1180 {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
1181 {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
1182#endif
1183};
1da177e4
LT
1184
1185static int num_ifs;
1186
1187/*
1188 * Setup the base address and interupt of the Au1xxx ethernet macs
1189 * based on cpu type and whether the interface is enabled in sys_pinfunc
1190 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1191 */
1192static int __init au1000_init_module(void)
1193{
1da177e4
LT
1194 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1195 struct net_device *dev;
1196 int i, found_one = 0;
1197
89be0501
SS
1198 num_ifs = NUM_ETH_INTERFACES - ni;
1199
1da177e4 1200 for(i = 0; i < num_ifs; i++) {
89be0501 1201 dev = au1000_probe(i);
1da177e4
LT
1202 iflist[i].dev = dev;
1203 if (dev)
1204 found_one++;
1205 }
1206 if (!found_one)
1207 return -ENODEV;
1208 return 0;
1209}
1210
1211static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
1212{
1213 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1214 u16 ctl, adv;
1215
1216 /* Setup standard advertise */
1217 adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
1218 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1219 if (advertise & ADVERTISED_10baseT_Half)
1220 adv |= ADVERTISE_10HALF;
1221 if (advertise & ADVERTISED_10baseT_Full)
1222 adv |= ADVERTISE_10FULL;
1223 if (advertise & ADVERTISED_100baseT_Half)
1224 adv |= ADVERTISE_100HALF;
1225 if (advertise & ADVERTISED_100baseT_Full)
1226 adv |= ADVERTISE_100FULL;
1227 mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
1228
1229 /* Start/Restart aneg */
1230 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1231 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1232 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1233
1234 return 0;
1235}
1236
1237static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
1238{
1239 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1240 u16 ctl;
1241
1242 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1243 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
1244
1245 /* First reset the PHY */
1246 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
1247
1248 /* Select speed & duplex */
1249 switch (speed) {
1250 case SPEED_10:
1251 break;
1252 case SPEED_100:
1253 ctl |= BMCR_SPEED100;
1254 break;
1255 case SPEED_1000:
1256 default:
1257 return -EINVAL;
1258 }
1259 if (fd == DUPLEX_FULL)
1260 ctl |= BMCR_FULLDPLX;
1261 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1262
1263 return 0;
1264}
1265
1266
1267static void
1268au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
1269{
1270 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1271 u32 advertise;
1272 int autoneg;
1273 int forced_speed;
1274 int forced_duplex;
1275
1276 /* Default advertise */
1277 advertise = GENMII_DEFAULT_ADVERTISE;
1278 autoneg = aup->want_autoneg;
1279 forced_speed = SPEED_100;
1280 forced_duplex = DUPLEX_FULL;
1281
1282 /* Setup link parameters */
1283 if (cmd) {
1284 if (cmd->autoneg == AUTONEG_ENABLE) {
1285 advertise = cmd->advertising;
1286 autoneg = 1;
1287 } else {
1288 autoneg = 0;
1289
1290 forced_speed = cmd->speed;
1291 forced_duplex = cmd->duplex;
1292 }
1293 }
1294
1295 /* Configure PHY & start aneg */
1296 aup->want_autoneg = autoneg;
1297 if (autoneg)
1298 au1000_setup_aneg(dev, advertise);
1299 else
1300 au1000_setup_forced(dev, forced_speed, forced_duplex);
1301 mod_timer(&aup->timer, jiffies + HZ);
1302}
1303
1304static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1305{
1306 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1307 u16 link, speed;
1308
1309 cmd->supported = GENMII_DEFAULT_FEATURES;
1310 cmd->advertising = GENMII_DEFAULT_ADVERTISE;
1311 cmd->port = PORT_MII;
1312 cmd->transceiver = XCVR_EXTERNAL;
1313 cmd->phy_address = aup->phy_addr;
1314 spin_lock_irq(&aup->lock);
1315 cmd->autoneg = aup->want_autoneg;
1316 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1317 if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
1318 cmd->speed = SPEED_100;
1319 else if (speed == IF_PORT_10BASET)
1320 cmd->speed = SPEED_10;
1321 if (link && (dev->if_port == IF_PORT_100BASEFX))
1322 cmd->duplex = DUPLEX_FULL;
1323 else
1324 cmd->duplex = DUPLEX_HALF;
1325 spin_unlock_irq(&aup->lock);
1326 return 0;
1327}
1328
1329static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1330{
1331 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1332 unsigned long features = GENMII_DEFAULT_FEATURES;
1333
1334 if (!capable(CAP_NET_ADMIN))
1335 return -EPERM;
1336
1337 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1338 return -EINVAL;
1339 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1340 return -EINVAL;
1341 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1342 return -EINVAL;
1343 if (cmd->autoneg == AUTONEG_DISABLE)
1344 switch (cmd->speed) {
1345 case SPEED_10:
1346 if (cmd->duplex == DUPLEX_HALF &&
1347 (features & SUPPORTED_10baseT_Half) == 0)
1348 return -EINVAL;
1349 if (cmd->duplex == DUPLEX_FULL &&
1350 (features & SUPPORTED_10baseT_Full) == 0)
1351 return -EINVAL;
1352 break;
1353 case SPEED_100:
1354 if (cmd->duplex == DUPLEX_HALF &&
1355 (features & SUPPORTED_100baseT_Half) == 0)
1356 return -EINVAL;
1357 if (cmd->duplex == DUPLEX_FULL &&
1358 (features & SUPPORTED_100baseT_Full) == 0)
1359 return -EINVAL;
1360 break;
1361 default:
1362 return -EINVAL;
1363 }
1364 else if ((features & SUPPORTED_Autoneg) == 0)
1365 return -EINVAL;
1366
1367 spin_lock_irq(&aup->lock);
1368 au1000_start_link(dev, cmd);
1369 spin_unlock_irq(&aup->lock);
1370 return 0;
1371}
1372
1373static int au1000_nway_reset(struct net_device *dev)
1374{
1375 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1376
1377 if (!aup->want_autoneg)
1378 return -EINVAL;
1379 spin_lock_irq(&aup->lock);
1380 au1000_start_link(dev, NULL);
1381 spin_unlock_irq(&aup->lock);
1382 return 0;
1383}
1384
1385static void
1386au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1387{
1388 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1389
1390 strcpy(info->driver, DRV_NAME);
1391 strcpy(info->version, DRV_VERSION);
1392 info->fw_version[0] = '\0';
1393 sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
1394 info->regdump_len = 0;
1395}
1396
1397static u32 au1000_get_link(struct net_device *dev)
1398{
1399 return netif_carrier_ok(dev);
1400}
1401
1402static struct ethtool_ops au1000_ethtool_ops = {
1403 .get_settings = au1000_get_settings,
1404 .set_settings = au1000_set_settings,
1405 .get_drvinfo = au1000_get_drvinfo,
1406 .nway_reset = au1000_nway_reset,
1407 .get_link = au1000_get_link
1408};
1409
89be0501 1410static struct net_device * au1000_probe(int port_num)
1da177e4
LT
1411{
1412 static unsigned version_printed = 0;
1413 struct au1000_private *aup = NULL;
1414 struct net_device *dev = NULL;
1415 db_dest_t *pDB, *pDBfree;
1416 char *pmac, *argptr;
1417 char ethaddr[6];
89be0501
SS
1418 int irq, i, err;
1419 u32 base, macen;
1420
1421 if (port_num >= NUM_ETH_INTERFACES)
1422 return NULL;
1da177e4 1423
89be0501
SS
1424 base = CPHYSADDR(iflist[port_num].base_addr );
1425 macen = CPHYSADDR(iflist[port_num].macen_addr);
1426 irq = iflist[port_num].irq;
1427
1428 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
1429 !request_mem_region(macen, 4, "Au1x00 ENET"))
1da177e4
LT
1430 return NULL;
1431
89be0501 1432 if (version_printed++ == 0)
1da177e4
LT
1433 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1434
1435 dev = alloc_etherdev(sizeof(struct au1000_private));
1436 if (!dev) {
89be0501 1437 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
1da177e4
LT
1438 return NULL;
1439 }
1440
89be0501
SS
1441 if ((err = register_netdev(dev)) != 0) {
1442 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1443 DRV_NAME, err);
1da177e4
LT
1444 free_netdev(dev);
1445 return NULL;
1446 }
1447
89be0501
SS
1448 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
1449 dev->name, base, irq);
1da177e4
LT
1450
1451 aup = dev->priv;
1452
1453 /* Allocate the data buffers */
1454 /* Snooping works fine with eth on all au1xxx */
89be0501
SS
1455 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1456 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1457 &aup->dma_addr, 0);
1da177e4
LT
1458 if (!aup->vaddr) {
1459 free_netdev(dev);
89be0501
SS
1460 release_mem_region( base, MAC_IOSIZE);
1461 release_mem_region(macen, 4);
1da177e4
LT
1462 return NULL;
1463 }
1464
1465 /* aup->mac is the base address of the MAC's registers */
89be0501
SS
1466 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
1467
1da177e4 1468 /* Setup some variables for quick register address access */
89be0501
SS
1469 aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
1470 aup->mac_id = port_num;
1471 au_macs[port_num] = aup;
1472
1473 if (port_num == 0) {
1474 /* Check the environment variables first */
1475 if (get_ethernet_addr(ethaddr) == 0)
1da177e4 1476 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
89be0501 1477 else {
1da177e4
LT
1478 /* Check command line */
1479 argptr = prom_getcmdline();
89be0501
SS
1480 if ((pmac = strstr(argptr, "ethaddr=")) == NULL)
1481 printk(KERN_INFO "%s: No MAC address found\n",
1482 dev->name);
1483 /* Use the hard coded MAC addresses */
1484 else {
1da177e4
LT
1485 str2eaddr(ethaddr, pmac + strlen("ethaddr="));
1486 memcpy(au1000_mac_addr, ethaddr,
89be0501 1487 sizeof(au1000_mac_addr));
1da177e4
LT
1488 }
1489 }
89be0501 1490
1da177e4 1491 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
89be0501 1492 } else if (port_num == 1)
1da177e4 1493 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1da177e4 1494
89be0501
SS
1495 /*
1496 * Assign to the Ethernet ports two consecutive MAC addresses
1497 * to match those that are printed on their stickers
1498 */
1499 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1500 dev->dev_addr[5] += port_num;
1501
1502 /* Bring the device out of reset, otherwise probing the MII will hang */
1da177e4
LT
1503 *aup->enable = MAC_EN_CLOCK_ENABLE;
1504 au_sync_delay(2);
89be0501
SS
1505 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 |
1506 MAC_EN_CLOCK_ENABLE;
1da177e4
LT
1507 au_sync_delay(2);
1508
1509 aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
1510 if (!aup->mii) {
1511 printk(KERN_ERR "%s: out of memory\n", dev->name);
1512 goto err_out;
1513 }
7f553e3d
RB
1514 aup->mii->next = NULL;
1515 aup->mii->chip_info = NULL;
1516 aup->mii->status = 0;
1da177e4
LT
1517 aup->mii->mii_control_reg = 0;
1518 aup->mii->mii_data_reg = 0;
1519
1520 if (mii_probe(dev) != 0) {
1521 goto err_out;
1522 }
1523
1524 pDBfree = NULL;
1525 /* setup the data buffer descriptors and attach a buffer to each one */
1526 pDB = aup->db;
1527 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1528 pDB->pnext = pDBfree;
1529 pDBfree = pDB;
1530 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1531 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1532 pDB++;
1533 }
1534 aup->pDBfree = pDBfree;
1535
1536 for (i = 0; i < NUM_RX_DMA; i++) {
1537 pDB = GetFreeDB(aup);
1538 if (!pDB) {
1539 goto err_out;
1540 }
1541 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1542 aup->rx_db_inuse[i] = pDB;
1543 }
1544 for (i = 0; i < NUM_TX_DMA; i++) {
1545 pDB = GetFreeDB(aup);
1546 if (!pDB) {
1547 goto err_out;
1548 }
1549 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1550 aup->tx_dma_ring[i]->len = 0;
1551 aup->tx_db_inuse[i] = pDB;
1552 }
1553
1554 spin_lock_init(&aup->lock);
89be0501 1555 dev->base_addr = base;
1da177e4
LT
1556 dev->irq = irq;
1557 dev->open = au1000_open;
1558 dev->hard_start_xmit = au1000_tx;
1559 dev->stop = au1000_close;
1560 dev->get_stats = au1000_get_stats;
1561 dev->set_multicast_list = &set_rx_mode;
1562 dev->do_ioctl = &au1000_ioctl;
1563 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1564 dev->set_config = &au1000_set_config;
1565 dev->tx_timeout = au1000_tx_timeout;
1566 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1567
1568 /*
1569 * The boot code uses the ethernet controller, so reset it to start
1570 * fresh. au1000_init() expects that the device is in reset state.
1571 */
1572 reset_mac(dev);
1573
1574 return dev;
1575
1576err_out:
1577 /* here we should have a valid dev plus aup-> register addresses
1578 * so we can reset the mac properly.*/
1579 reset_mac(dev);
b4558ea9 1580 kfree(aup->mii);
1da177e4
LT
1581 for (i = 0; i < NUM_RX_DMA; i++) {
1582 if (aup->rx_db_inuse[i])
1583 ReleaseDB(aup, aup->rx_db_inuse[i]);
1584 }
1585 for (i = 0; i < NUM_TX_DMA; i++) {
1586 if (aup->tx_db_inuse[i])
1587 ReleaseDB(aup, aup->tx_db_inuse[i]);
1588 }
89be0501
SS
1589 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1590 (void *)aup->vaddr, aup->dma_addr);
1da177e4
LT
1591 unregister_netdev(dev);
1592 free_netdev(dev);
89be0501
SS
1593 release_mem_region( base, MAC_IOSIZE);
1594 release_mem_region(macen, 4);
1da177e4
LT
1595 return NULL;
1596}
1597
1598/*
1599 * Initialize the interface.
1600 *
1601 * When the device powers up, the clocks are disabled and the
1602 * mac is in reset state. When the interface is closed, we
1603 * do the same -- reset the device and disable the clocks to
1604 * conserve power. Thus, whenever au1000_init() is called,
1605 * the device should already be in reset state.
1606 */
1607static int au1000_init(struct net_device *dev)
1608{
1609 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1610 u32 flags;
1611 int i;
1612 u32 control;
1613 u16 link, speed;
1614
1615 if (au1000_debug > 4)
1616 printk("%s: au1000_init\n", dev->name);
1617
1618 spin_lock_irqsave(&aup->lock, flags);
1619
1620 /* bring the device out of reset */
1621 *aup->enable = MAC_EN_CLOCK_ENABLE;
1622 au_sync_delay(2);
1623 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1624 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1625 au_sync_delay(20);
1626
1627 aup->mac->control = 0;
1628 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
1629 aup->tx_tail = aup->tx_head;
1630 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
1631
1632 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
1633 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
1634 dev->dev_addr[1]<<8 | dev->dev_addr[0];
1635
1636 for (i = 0; i < NUM_RX_DMA; i++) {
1637 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
1638 }
1639 au_sync();
1640
1641 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1642 control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
1643#ifndef CONFIG_CPU_LITTLE_ENDIAN
1644 control |= MAC_BIG_ENDIAN;
1645#endif
1646 if (link && (dev->if_port == IF_PORT_100BASEFX)) {
1647 control |= MAC_FULL_DUPLEX;
1648 }
1649
1da177e4
LT
1650 aup->mac->control = control;
1651 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1652 au_sync();
1653
1654 spin_unlock_irqrestore(&aup->lock, flags);
1655 return 0;
1656}
1657
1658static void au1000_timer(unsigned long data)
1659{
1660 struct net_device *dev = (struct net_device *)data;
1661 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1662 unsigned char if_port;
1663 u16 link, speed;
1664
1665 if (!dev) {
1666 /* fatal error, don't restart the timer */
1667 printk(KERN_ERR "au1000_timer error: NULL dev\n");
1668 return;
1669 }
1670
1671 if_port = dev->if_port;
1672 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1673 if (link) {
7d17c1d6 1674 if (!netif_carrier_ok(dev)) {
1da177e4 1675 netif_carrier_on(dev);
1da177e4
LT
1676 printk(KERN_INFO "%s: link up\n", dev->name);
1677 }
1678 }
1679 else {
7d17c1d6 1680 if (netif_carrier_ok(dev)) {
1da177e4 1681 netif_carrier_off(dev);
1da177e4
LT
1682 dev->if_port = 0;
1683 printk(KERN_INFO "%s: link down\n", dev->name);
1684 }
1685 }
1686 }
1687
1688 if (link && (dev->if_port != if_port) &&
1689 (dev->if_port != IF_PORT_UNKNOWN)) {
1690 hard_stop(dev);
1691 if (dev->if_port == IF_PORT_100BASEFX) {
1692 printk(KERN_INFO "%s: going to full duplex\n",
1693 dev->name);
1694 aup->mac->control |= MAC_FULL_DUPLEX;
1695 au_sync_delay(1);
1696 }
1697 else {
1698 aup->mac->control &= ~MAC_FULL_DUPLEX;
1699 au_sync_delay(1);
1700 }
1701 enable_rx_tx(dev);
1702 }
1703
1704 aup->timer.expires = RUN_AT((1*HZ));
1705 aup->timer.data = (unsigned long)dev;
1706 aup->timer.function = &au1000_timer; /* timer handler */
1707 add_timer(&aup->timer);
1708
1709}
1710
1711static int au1000_open(struct net_device *dev)
1712{
1713 int retval;
1714 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1715
1716 if (au1000_debug > 4)
1717 printk("%s: open: dev=%p\n", dev->name, dev);
1718
1719 if ((retval = au1000_init(dev))) {
1720 printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
1721 free_irq(dev->irq, dev);
1722 return retval;
1723 }
1724 netif_start_queue(dev);
1725
1726 if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
1727 dev->name, dev))) {
1728 printk(KERN_ERR "%s: unable to get IRQ %d\n",
1729 dev->name, dev->irq);
1730 return retval;
1731 }
1732
1733 init_timer(&aup->timer); /* used in ioctl() */
1734 aup->timer.expires = RUN_AT((3*HZ));
1735 aup->timer.data = (unsigned long)dev;
1736 aup->timer.function = &au1000_timer; /* timer handler */
1737 add_timer(&aup->timer);
1738
1739 if (au1000_debug > 4)
1740 printk("%s: open: Initialization done.\n", dev->name);
1741
1742 return 0;
1743}
1744
1745static int au1000_close(struct net_device *dev)
1746{
1747 u32 flags;
1748 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1749
1750 if (au1000_debug > 4)
1751 printk("%s: close: dev=%p\n", dev->name, dev);
1752
1753 reset_mac(dev);
1754
1755 spin_lock_irqsave(&aup->lock, flags);
1756
1757 /* stop the device */
1758 netif_stop_queue(dev);
1759
1760 /* disable the interrupt */
1761 free_irq(dev->irq, dev);
1762 spin_unlock_irqrestore(&aup->lock, flags);
1763
1764 return 0;
1765}
1766
1767static void __exit au1000_cleanup_module(void)
1768{
1769 int i, j;
1770 struct net_device *dev;
1771 struct au1000_private *aup;
1772
1773 for (i = 0; i < num_ifs; i++) {
1774 dev = iflist[i].dev;
1775 if (dev) {
1776 aup = (struct au1000_private *) dev->priv;
1777 unregister_netdev(dev);
b4558ea9 1778 kfree(aup->mii);
89be0501 1779 for (j = 0; j < NUM_RX_DMA; j++)
1da177e4
LT
1780 if (aup->rx_db_inuse[j])
1781 ReleaseDB(aup, aup->rx_db_inuse[j]);
89be0501 1782 for (j = 0; j < NUM_TX_DMA; j++)
1da177e4
LT
1783 if (aup->tx_db_inuse[j])
1784 ReleaseDB(aup, aup->tx_db_inuse[j]);
89be0501
SS
1785 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1786 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1787 (void *)aup->vaddr, aup->dma_addr);
1788 release_mem_region(dev->base_addr, MAC_IOSIZE);
1789 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
1da177e4 1790 free_netdev(dev);
1da177e4
LT
1791 }
1792 }
1793}
1794
c2d3d4b9 1795static void update_tx_stats(struct net_device *dev, u32 status)
1da177e4
LT
1796{
1797 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1798 struct net_device_stats *ps = &aup->stats;
1799
1da177e4
LT
1800 if (status & TX_FRAME_ABORTED) {
1801 if (dev->if_port == IF_PORT_100BASEFX) {
1802 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1803 /* any other tx errors are only valid
1804 * in half duplex mode */
1805 ps->tx_errors++;
1806 ps->tx_aborted_errors++;
1807 }
1808 }
1809 else {
1810 ps->tx_errors++;
1811 ps->tx_aborted_errors++;
1812 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1813 ps->tx_carrier_errors++;
1814 }
1815 }
1816}
1817
1818
1819/*
1820 * Called from the interrupt service routine to acknowledge
1821 * the TX DONE bits. This is a must if the irq is setup as
1822 * edge triggered.
1823 */
1824static void au1000_tx_ack(struct net_device *dev)
1825{
1826 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1827 volatile tx_dma_t *ptxd;
1828
1829 ptxd = aup->tx_dma_ring[aup->tx_tail];
1830
1831 while (ptxd->buff_stat & TX_T_DONE) {
c2d3d4b9 1832 update_tx_stats(dev, ptxd->status);
1da177e4
LT
1833 ptxd->buff_stat &= ~TX_T_DONE;
1834 ptxd->len = 0;
1835 au_sync();
1836
1837 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1838 ptxd = aup->tx_dma_ring[aup->tx_tail];
1839
1840 if (aup->tx_full) {
1841 aup->tx_full = 0;
1842 netif_wake_queue(dev);
1843 }
1844 }
1845}
1846
1847
1848/*
1849 * Au1000 transmit routine.
1850 */
1851static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1852{
1853 struct au1000_private *aup = (struct au1000_private *) dev->priv;
c2d3d4b9 1854 struct net_device_stats *ps = &aup->stats;
1da177e4
LT
1855 volatile tx_dma_t *ptxd;
1856 u32 buff_stat;
1857 db_dest_t *pDB;
1858 int i;
1859
1860 if (au1000_debug > 5)
1861 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1862 dev->name, (unsigned)aup, skb->len,
1863 skb->data, aup->tx_head);
1864
1865 ptxd = aup->tx_dma_ring[aup->tx_head];
1866 buff_stat = ptxd->buff_stat;
1867 if (buff_stat & TX_DMA_ENABLE) {
1868 /* We've wrapped around and the transmitter is still busy */
1869 netif_stop_queue(dev);
1870 aup->tx_full = 1;
1871 return 1;
1872 }
1873 else if (buff_stat & TX_T_DONE) {
c2d3d4b9 1874 update_tx_stats(dev, ptxd->status);
1da177e4
LT
1875 ptxd->len = 0;
1876 }
1877
1878 if (aup->tx_full) {
1879 aup->tx_full = 0;
1880 netif_wake_queue(dev);
1881 }
1882
1883 pDB = aup->tx_db_inuse[aup->tx_head];
1884 memcpy((void *)pDB->vaddr, skb->data, skb->len);
1885 if (skb->len < ETH_ZLEN) {
1886 for (i=skb->len; i<ETH_ZLEN; i++) {
1887 ((char *)pDB->vaddr)[i] = 0;
1888 }
1889 ptxd->len = ETH_ZLEN;
1890 }
1891 else
1892 ptxd->len = skb->len;
1893
c2d3d4b9
SS
1894 ps->tx_packets++;
1895 ps->tx_bytes += ptxd->len;
1896
1da177e4
LT
1897 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1898 au_sync();
1899 dev_kfree_skb(skb);
1900 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1901 dev->trans_start = jiffies;
1902 return 0;
1903}
1904
1da177e4
LT
1905static inline void update_rx_stats(struct net_device *dev, u32 status)
1906{
1907 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1908 struct net_device_stats *ps = &aup->stats;
1909
1910 ps->rx_packets++;
1911 if (status & RX_MCAST_FRAME)
1912 ps->multicast++;
1913
1914 if (status & RX_ERROR) {
1915 ps->rx_errors++;
1916 if (status & RX_MISSED_FRAME)
1917 ps->rx_missed_errors++;
1918 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1919 ps->rx_length_errors++;
1920 if (status & RX_CRC_ERROR)
1921 ps->rx_crc_errors++;
1922 if (status & RX_COLL)
1923 ps->collisions++;
1924 }
1925 else
1926 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1927
1928}
1929
1930/*
1931 * Au1000 receive routine.
1932 */
1933static int au1000_rx(struct net_device *dev)
1934{
1935 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1936 struct sk_buff *skb;
1937 volatile rx_dma_t *prxd;
1938 u32 buff_stat, status;
1939 db_dest_t *pDB;
1940 u32 frmlen;
1941
1942 if (au1000_debug > 5)
1943 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1944
1945 prxd = aup->rx_dma_ring[aup->rx_head];
1946 buff_stat = prxd->buff_stat;
1947 while (buff_stat & RX_T_DONE) {
1948 status = prxd->status;
1949 pDB = aup->rx_db_inuse[aup->rx_head];
1950 update_rx_stats(dev, status);
1951 if (!(status & RX_ERROR)) {
1952
1953 /* good frame */
1954 frmlen = (status & RX_FRAME_LEN_MASK);
1955 frmlen -= 4; /* Remove FCS */
1956 skb = dev_alloc_skb(frmlen + 2);
1957 if (skb == NULL) {
1958 printk(KERN_ERR
1959 "%s: Memory squeeze, dropping packet.\n",
1960 dev->name);
1961 aup->stats.rx_dropped++;
1962 continue;
1963 }
1964 skb->dev = dev;
1965 skb_reserve(skb, 2); /* 16 byte IP header align */
1966 eth_copy_and_sum(skb,
1967 (unsigned char *)pDB->vaddr, frmlen, 0);
1968 skb_put(skb, frmlen);
1969 skb->protocol = eth_type_trans(skb, dev);
1970 netif_rx(skb); /* pass the packet to upper layers */
1971 }
1972 else {
1973 if (au1000_debug > 4) {
1974 if (status & RX_MISSED_FRAME)
1975 printk("rx miss\n");
1976 if (status & RX_WDOG_TIMER)
1977 printk("rx wdog\n");
1978 if (status & RX_RUNT)
1979 printk("rx runt\n");
1980 if (status & RX_OVERLEN)
1981 printk("rx overlen\n");
1982 if (status & RX_COLL)
1983 printk("rx coll\n");
1984 if (status & RX_MII_ERROR)
1985 printk("rx mii error\n");
1986 if (status & RX_CRC_ERROR)
1987 printk("rx crc error\n");
1988 if (status & RX_LEN_ERROR)
1989 printk("rx len error\n");
1990 if (status & RX_U_CNTRL_FRAME)
1991 printk("rx u control frame\n");
1992 if (status & RX_MISSED_FRAME)
1993 printk("rx miss\n");
1994 }
1995 }
1996 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
1997 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
1998 au_sync();
1999
2000 /* next descriptor */
2001 prxd = aup->rx_dma_ring[aup->rx_head];
2002 buff_stat = prxd->buff_stat;
2003 dev->last_rx = jiffies;
2004 }
2005 return 0;
2006}
2007
2008
2009/*
2010 * Au1000 interrupt service routine.
2011 */
2012static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2013{
2014 struct net_device *dev = (struct net_device *) dev_id;
2015
2016 if (dev == NULL) {
2017 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
2018 return IRQ_RETVAL(1);
2019 }
2020
2021 /* Handle RX interrupts first to minimize chance of overrun */
2022
2023 au1000_rx(dev);
2024 au1000_tx_ack(dev);
2025 return IRQ_RETVAL(1);
2026}
2027
2028
2029/*
2030 * The Tx ring has been full longer than the watchdog timeout
2031 * value. The transmitter must be hung?
2032 */
2033static void au1000_tx_timeout(struct net_device *dev)
2034{
2035 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
2036 reset_mac(dev);
2037 au1000_init(dev);
2038 dev->trans_start = jiffies;
2039 netif_wake_queue(dev);
2040}
2041
1da177e4
LT
2042static void set_rx_mode(struct net_device *dev)
2043{
2044 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2045
2046 if (au1000_debug > 4)
2047 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
2048
2049 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2050 aup->mac->control |= MAC_PROMISCUOUS;
2051 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
2052 } else if ((dev->flags & IFF_ALLMULTI) ||
2053 dev->mc_count > MULTICAST_FILTER_LIMIT) {
2054 aup->mac->control |= MAC_PASS_ALL_MULTI;
2055 aup->mac->control &= ~MAC_PROMISCUOUS;
2056 printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
2057 } else {
2058 int i;
2059 struct dev_mc_list *mclist;
2060 u32 mc_filter[2]; /* Multicast hash filter */
2061
2062 mc_filter[1] = mc_filter[0] = 0;
2063 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2064 i++, mclist = mclist->next) {
2065 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
2066 (long *)mc_filter);
2067 }
2068 aup->mac->multi_hash_high = mc_filter[1];
2069 aup->mac->multi_hash_low = mc_filter[0];
2070 aup->mac->control &= ~MAC_PROMISCUOUS;
2071 aup->mac->control |= MAC_HASH_MODE;
2072 }
2073}
2074
2075
2076static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2077{
2078 struct au1000_private *aup = (struct au1000_private *)dev->priv;
2079 u16 *data = (u16 *)&rq->ifr_ifru;
2080
2081 switch(cmd) {
2082 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2083 case SIOCGMIIPHY:
2084 if (!netif_running(dev)) return -EINVAL;
2085 data[0] = aup->phy_addr;
2086 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2087 case SIOCGMIIREG:
2088 data[3] = mdio_read(dev, data[0], data[1]);
2089 return 0;
2090 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2091 case SIOCSMIIREG:
2092 if (!capable(CAP_NET_ADMIN))
2093 return -EPERM;
2094 mdio_write(dev, data[0], data[1],data[2]);
2095 return 0;
2096 default:
2097 return -EOPNOTSUPP;
2098 }
2099
2100}
2101
2102
2103static int au1000_set_config(struct net_device *dev, struct ifmap *map)
2104{
2105 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2106 u16 control;
2107
2108 if (au1000_debug > 4) {
2109 printk("%s: set_config called: dev->if_port %d map->port %x\n",
2110 dev->name, dev->if_port, map->port);
2111 }
2112
2113 switch(map->port){
2114 case IF_PORT_UNKNOWN: /* use auto here */
2115 printk(KERN_INFO "%s: config phy for aneg\n",
2116 dev->name);
2117 dev->if_port = map->port;
2118 /* Link Down: the timer will bring it up */
2119 netif_carrier_off(dev);
2120
2121 /* read current control */
2122 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2123 control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
2124
2125 /* enable auto negotiation and reset the negotiation */
2126 mdio_write(dev, aup->phy_addr, MII_CONTROL,
2127 control | MII_CNTL_AUTO |
2128 MII_CNTL_RST_AUTO);
2129
2130 break;
2131
2132 case IF_PORT_10BASET: /* 10BaseT */
2133 printk(KERN_INFO "%s: config phy for 10BaseT\n",
2134 dev->name);
2135 dev->if_port = map->port;
2136
2137 /* Link Down: the timer will bring it up */
2138 netif_carrier_off(dev);
2139
2140 /* set Speed to 10Mbps, Half Duplex */
2141 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2142 control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
2143 MII_CNTL_FDX);
2144
2145 /* disable auto negotiation and force 10M/HD mode*/
2146 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2147 break;
2148
2149 case IF_PORT_100BASET: /* 100BaseT */
2150 case IF_PORT_100BASETX: /* 100BaseTx */
2151 printk(KERN_INFO "%s: config phy for 100BaseTX\n",
2152 dev->name);
2153 dev->if_port = map->port;
2154
2155 /* Link Down: the timer will bring it up */
2156 netif_carrier_off(dev);
2157
2158 /* set Speed to 100Mbps, Half Duplex */
2159 /* disable auto negotiation and enable 100MBit Mode */
2160 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2161 control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
2162 control |= MII_CNTL_F100;
2163 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2164 break;
2165
2166 case IF_PORT_100BASEFX: /* 100BaseFx */
2167 printk(KERN_INFO "%s: config phy for 100BaseFX\n",
2168 dev->name);
2169 dev->if_port = map->port;
2170
2171 /* Link Down: the timer will bring it up */
2172 netif_carrier_off(dev);
2173
2174 /* set Speed to 100Mbps, Full Duplex */
2175 /* disable auto negotiation and enable 100MBit Mode */
2176 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2177 control &= ~MII_CNTL_AUTO;
2178 control |= MII_CNTL_F100 | MII_CNTL_FDX;
2179 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2180 break;
2181 case IF_PORT_10BASE2: /* 10Base2 */
2182 case IF_PORT_AUI: /* AUI */
2183 /* These Modes are not supported (are they?)*/
2184 printk(KERN_ERR "%s: 10Base2/AUI not supported",
2185 dev->name);
2186 return -EOPNOTSUPP;
2187 break;
2188
2189 default:
2190 printk(KERN_ERR "%s: Invalid media selected",
2191 dev->name);
2192 return -EINVAL;
2193 }
2194 return 0;
2195}
2196
2197static struct net_device_stats *au1000_get_stats(struct net_device *dev)
2198{
2199 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2200
2201 if (au1000_debug > 4)
2202 printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
2203
2204 if (netif_device_present(dev)) {
2205 return &aup->stats;
2206 }
2207 return 0;
2208}
2209
2210module_init(au1000_init_module);
2211module_exit(au1000_cleanup_module);
This page took 0.212688 seconds and 5 git commands to generate.