[PATCH] IOC3: Replace obsolete PCI API
[deliverable/linux.git] / drivers / net / au1000_eth.c
CommitLineData
1da177e4
LT
1/*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
5 * Copyright 2001,2002,2003 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Author: MontaVista Software, Inc.
13 * ppopov@mvista.com or source@mvista.com
14 *
15 * ########################################################################
16 *
17 * This program is free software; you can distribute it and/or modify it
18 * under the terms of the GNU General Public License (Version 2) as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 * for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
29 *
30 * ########################################################################
31 *
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/string.h>
39#include <linux/timer.h>
40#include <linux/errno.h>
41#include <linux/in.h>
42#include <linux/ioport.h>
43#include <linux/bitops.h>
44#include <linux/slab.h>
45#include <linux/interrupt.h>
46#include <linux/pci.h>
47#include <linux/init.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/ethtool.h>
51#include <linux/mii.h>
52#include <linux/skbuff.h>
53#include <linux/delay.h>
54#include <asm/mipsregs.h>
55#include <asm/irq.h>
56#include <asm/io.h>
57#include <asm/processor.h>
58
59#include <asm/mach-au1x00/au1000.h>
60#include <asm/cpu.h>
61#include "au1000_eth.h"
62
63#ifdef AU1000_ETH_DEBUG
64static int au1000_debug = 5;
65#else
66static int au1000_debug = 3;
67#endif
68
69#define DRV_NAME "au1000eth"
70#define DRV_VERSION "1.5"
71#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
72#define DRV_DESC "Au1xxx on-chip Ethernet driver"
73
74MODULE_AUTHOR(DRV_AUTHOR);
75MODULE_DESCRIPTION(DRV_DESC);
76MODULE_LICENSE("GPL");
77
78// prototypes
79static void hard_stop(struct net_device *);
80static void enable_rx_tx(struct net_device *dev);
81static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
82static int au1000_init(struct net_device *);
83static int au1000_open(struct net_device *);
84static int au1000_close(struct net_device *);
85static int au1000_tx(struct sk_buff *, struct net_device *);
86static int au1000_rx(struct net_device *);
87static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
88static void au1000_tx_timeout(struct net_device *);
89static int au1000_set_config(struct net_device *dev, struct ifmap *map);
90static void set_rx_mode(struct net_device *);
91static struct net_device_stats *au1000_get_stats(struct net_device *);
92static inline void update_tx_stats(struct net_device *, u32, u32);
93static inline void update_rx_stats(struct net_device *, u32);
94static void au1000_timer(unsigned long);
95static int au1000_ioctl(struct net_device *, struct ifreq *, int);
96static int mdio_read(struct net_device *, int, int);
97static void mdio_write(struct net_device *, int, int, u16);
98static void dump_mii(struct net_device *dev, int phy_id);
99
100// externs
101extern void ack_rise_edge_irq(unsigned int);
102extern int get_ethernet_addr(char *ethernet_addr);
103extern void str2eaddr(unsigned char *ea, unsigned char *str);
104extern char * __init prom_getcmdline(void);
105
106/*
107 * Theory of operation
108 *
109 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
110 * There are four receive and four transmit descriptors. These
111 * descriptors are not in memory; rather, they are just a set of
112 * hardware registers.
113 *
114 * Since the Au1000 has a coherent data cache, the receive and
115 * transmit buffers are allocated from the KSEG0 segment. The
116 * hardware registers, however, are still mapped at KSEG1 to
117 * make sure there's no out-of-order writes, and that all writes
118 * complete immediately.
119 */
120
121/* These addresses are only used if yamon doesn't tell us what
122 * the mac address is, and the mac address is not passed on the
123 * command line.
124 */
125static unsigned char au1000_mac_addr[6] __devinitdata = {
126 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
127};
128
129#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
130#define RUN_AT(x) (jiffies + (x))
131
132// For reading/writing 32-bit words from/to DMA memory
133#define cpu_to_dma32 cpu_to_be32
134#define dma32_to_cpu be32_to_cpu
135
136struct au1000_private *au_macs[NUM_ETH_INTERFACES];
137
138/* FIXME
139 * All of the PHY code really should be detached from the MAC
140 * code.
141 */
142
143/* Default advertise */
144#define GENMII_DEFAULT_ADVERTISE \
145 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
146 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
147 ADVERTISED_Autoneg
148
149#define GENMII_DEFAULT_FEATURES \
150 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
151 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
152 SUPPORTED_Autoneg
153
1da177e4
LT
154int bcm_5201_init(struct net_device *dev, int phy_addr)
155{
156 s16 data;
157
158 /* Stop auto-negotiation */
159 data = mdio_read(dev, phy_addr, MII_CONTROL);
160 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
161
162 /* Set advertisement to 10/100 and Half/Full duplex
163 * (full capabilities) */
164 data = mdio_read(dev, phy_addr, MII_ANADV);
165 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
166 mdio_write(dev, phy_addr, MII_ANADV, data);
167
168 /* Restart auto-negotiation */
169 data = mdio_read(dev, phy_addr, MII_CONTROL);
170 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
171 mdio_write(dev, phy_addr, MII_CONTROL, data);
172
173 if (au1000_debug > 4)
174 dump_mii(dev, phy_addr);
175 return 0;
176}
177
178int bcm_5201_reset(struct net_device *dev, int phy_addr)
179{
180 s16 mii_control, timeout;
181
182 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
183 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
184 mdelay(1);
185 for (timeout = 100; timeout > 0; --timeout) {
186 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
187 if ((mii_control & MII_CNTL_RESET) == 0)
188 break;
189 mdelay(1);
190 }
191 if (mii_control & MII_CNTL_RESET) {
192 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
193 return -1;
194 }
195 return 0;
196}
197
198int
199bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
200{
201 u16 mii_data;
202 struct au1000_private *aup;
203
204 if (!dev) {
205 printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
206 return -1;
207 }
208 aup = (struct au1000_private *) dev->priv;
209
210 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
211 if (mii_data & MII_STAT_LINK) {
212 *link = 1;
213 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
214 if (mii_data & MII_AUX_100) {
215 if (mii_data & MII_AUX_FDX) {
216 *speed = IF_PORT_100BASEFX;
217 dev->if_port = IF_PORT_100BASEFX;
218 }
219 else {
220 *speed = IF_PORT_100BASETX;
221 dev->if_port = IF_PORT_100BASETX;
222 }
223 }
224 else {
225 *speed = IF_PORT_10BASET;
226 dev->if_port = IF_PORT_10BASET;
227 }
228
229 }
230 else {
231 *link = 0;
232 *speed = 0;
233 dev->if_port = IF_PORT_UNKNOWN;
234 }
235 return 0;
236}
237
238int lsi_80227_init(struct net_device *dev, int phy_addr)
239{
240 if (au1000_debug > 4)
241 printk("lsi_80227_init\n");
242
243 /* restart auto-negotiation */
244 mdio_write(dev, phy_addr, MII_CONTROL,
245 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
246 mdelay(1);
247
248 /* set up LEDs to correct display */
249#ifdef CONFIG_MIPS_MTX1
250 mdio_write(dev, phy_addr, 17, 0xff80);
251#else
252 mdio_write(dev, phy_addr, 17, 0xffc0);
253#endif
254
255 if (au1000_debug > 4)
256 dump_mii(dev, phy_addr);
257 return 0;
258}
259
260int lsi_80227_reset(struct net_device *dev, int phy_addr)
261{
262 s16 mii_control, timeout;
263
264 if (au1000_debug > 4) {
265 printk("lsi_80227_reset\n");
266 dump_mii(dev, phy_addr);
267 }
268
269 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
270 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
271 mdelay(1);
272 for (timeout = 100; timeout > 0; --timeout) {
273 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
274 if ((mii_control & MII_CNTL_RESET) == 0)
275 break;
276 mdelay(1);
277 }
278 if (mii_control & MII_CNTL_RESET) {
279 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
280 return -1;
281 }
282 return 0;
283}
284
285int
286lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
287{
288 u16 mii_data;
289 struct au1000_private *aup;
290
291 if (!dev) {
292 printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
293 return -1;
294 }
295 aup = (struct au1000_private *) dev->priv;
296
297 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
298 if (mii_data & MII_STAT_LINK) {
299 *link = 1;
300 mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
301 if (mii_data & MII_LSI_PHY_STAT_SPD) {
302 if (mii_data & MII_LSI_PHY_STAT_FDX) {
303 *speed = IF_PORT_100BASEFX;
304 dev->if_port = IF_PORT_100BASEFX;
305 }
306 else {
307 *speed = IF_PORT_100BASETX;
308 dev->if_port = IF_PORT_100BASETX;
309 }
310 }
311 else {
312 *speed = IF_PORT_10BASET;
313 dev->if_port = IF_PORT_10BASET;
314 }
315
316 }
317 else {
318 *link = 0;
319 *speed = 0;
320 dev->if_port = IF_PORT_UNKNOWN;
321 }
322 return 0;
323}
324
325int am79c901_init(struct net_device *dev, int phy_addr)
326{
327 printk("am79c901_init\n");
328 return 0;
329}
330
331int am79c901_reset(struct net_device *dev, int phy_addr)
332{
333 printk("am79c901_reset\n");
334 return 0;
335}
336
337int
338am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
339{
340 return 0;
341}
342
343int am79c874_init(struct net_device *dev, int phy_addr)
344{
345 s16 data;
346
347 /* 79c874 has quit resembled bit assignments to BCM5201 */
348 if (au1000_debug > 4)
349 printk("am79c847_init\n");
350
351 /* Stop auto-negotiation */
352 data = mdio_read(dev, phy_addr, MII_CONTROL);
353 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
354
355 /* Set advertisement to 10/100 and Half/Full duplex
356 * (full capabilities) */
357 data = mdio_read(dev, phy_addr, MII_ANADV);
358 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
359 mdio_write(dev, phy_addr, MII_ANADV, data);
360
361 /* Restart auto-negotiation */
362 data = mdio_read(dev, phy_addr, MII_CONTROL);
363 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
364
365 mdio_write(dev, phy_addr, MII_CONTROL, data);
366
367 if (au1000_debug > 4) dump_mii(dev, phy_addr);
368 return 0;
369}
370
371int am79c874_reset(struct net_device *dev, int phy_addr)
372{
373 s16 mii_control, timeout;
374
375 if (au1000_debug > 4)
376 printk("am79c874_reset\n");
377
378 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
379 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
380 mdelay(1);
381 for (timeout = 100; timeout > 0; --timeout) {
382 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
383 if ((mii_control & MII_CNTL_RESET) == 0)
384 break;
385 mdelay(1);
386 }
387 if (mii_control & MII_CNTL_RESET) {
388 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
389 return -1;
390 }
391 return 0;
392}
393
394int
395am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
396{
397 u16 mii_data;
398 struct au1000_private *aup;
399
400 // printk("am79c874_status\n");
401 if (!dev) {
402 printk(KERN_ERR "am79c874_status error: NULL dev\n");
403 return -1;
404 }
405
406 aup = (struct au1000_private *) dev->priv;
407 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
408
409 if (mii_data & MII_STAT_LINK) {
410 *link = 1;
411 mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
412 if (mii_data & MII_AMD_PHY_STAT_SPD) {
413 if (mii_data & MII_AMD_PHY_STAT_FDX) {
414 *speed = IF_PORT_100BASEFX;
415 dev->if_port = IF_PORT_100BASEFX;
416 }
417 else {
418 *speed = IF_PORT_100BASETX;
419 dev->if_port = IF_PORT_100BASETX;
420 }
421 }
422 else {
423 *speed = IF_PORT_10BASET;
424 dev->if_port = IF_PORT_10BASET;
425 }
426
427 }
428 else {
429 *link = 0;
430 *speed = 0;
431 dev->if_port = IF_PORT_UNKNOWN;
432 }
433 return 0;
434}
435
436int lxt971a_init(struct net_device *dev, int phy_addr)
437{
438 if (au1000_debug > 4)
439 printk("lxt971a_init\n");
440
441 /* restart auto-negotiation */
442 mdio_write(dev, phy_addr, MII_CONTROL,
443 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
444
445 /* set up LEDs to correct display */
446 mdio_write(dev, phy_addr, 20, 0x0422);
447
448 if (au1000_debug > 4)
449 dump_mii(dev, phy_addr);
450 return 0;
451}
452
453int lxt971a_reset(struct net_device *dev, int phy_addr)
454{
455 s16 mii_control, timeout;
456
457 if (au1000_debug > 4) {
458 printk("lxt971a_reset\n");
459 dump_mii(dev, phy_addr);
460 }
461
462 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
463 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
464 mdelay(1);
465 for (timeout = 100; timeout > 0; --timeout) {
466 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
467 if ((mii_control & MII_CNTL_RESET) == 0)
468 break;
469 mdelay(1);
470 }
471 if (mii_control & MII_CNTL_RESET) {
472 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
473 return -1;
474 }
475 return 0;
476}
477
478int
479lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
480{
481 u16 mii_data;
482 struct au1000_private *aup;
483
484 if (!dev) {
485 printk(KERN_ERR "lxt971a_status error: NULL dev\n");
486 return -1;
487 }
488 aup = (struct au1000_private *) dev->priv;
489
490 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
491 if (mii_data & MII_STAT_LINK) {
492 *link = 1;
493 mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
494 if (mii_data & MII_INTEL_PHY_STAT_SPD) {
495 if (mii_data & MII_INTEL_PHY_STAT_FDX) {
496 *speed = IF_PORT_100BASEFX;
497 dev->if_port = IF_PORT_100BASEFX;
498 }
499 else {
500 *speed = IF_PORT_100BASETX;
501 dev->if_port = IF_PORT_100BASETX;
502 }
503 }
504 else {
505 *speed = IF_PORT_10BASET;
506 dev->if_port = IF_PORT_10BASET;
507 }
508
509 }
510 else {
511 *link = 0;
512 *speed = 0;
513 dev->if_port = IF_PORT_UNKNOWN;
514 }
515 return 0;
516}
517
518int ks8995m_init(struct net_device *dev, int phy_addr)
519{
520 s16 data;
521
522// printk("ks8995m_init\n");
523 /* Stop auto-negotiation */
524 data = mdio_read(dev, phy_addr, MII_CONTROL);
525 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
526
527 /* Set advertisement to 10/100 and Half/Full duplex
528 * (full capabilities) */
529 data = mdio_read(dev, phy_addr, MII_ANADV);
530 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
531 mdio_write(dev, phy_addr, MII_ANADV, data);
532
533 /* Restart auto-negotiation */
534 data = mdio_read(dev, phy_addr, MII_CONTROL);
535 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
536 mdio_write(dev, phy_addr, MII_CONTROL, data);
537
538 if (au1000_debug > 4) dump_mii(dev, phy_addr);
539
540 return 0;
541}
542
543int ks8995m_reset(struct net_device *dev, int phy_addr)
544{
545 s16 mii_control, timeout;
546
547// printk("ks8995m_reset\n");
548 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
549 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
550 mdelay(1);
551 for (timeout = 100; timeout > 0; --timeout) {
552 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
553 if ((mii_control & MII_CNTL_RESET) == 0)
554 break;
555 mdelay(1);
556 }
557 if (mii_control & MII_CNTL_RESET) {
558 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
559 return -1;
560 }
561 return 0;
562}
563
564int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
565{
566 u16 mii_data;
567 struct au1000_private *aup;
568
569 if (!dev) {
570 printk(KERN_ERR "ks8995m_status error: NULL dev\n");
571 return -1;
572 }
573 aup = (struct au1000_private *) dev->priv;
574
575 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
576 if (mii_data & MII_STAT_LINK) {
577 *link = 1;
578 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
579 if (mii_data & MII_AUX_100) {
580 if (mii_data & MII_AUX_FDX) {
581 *speed = IF_PORT_100BASEFX;
582 dev->if_port = IF_PORT_100BASEFX;
583 }
584 else {
585 *speed = IF_PORT_100BASETX;
586 dev->if_port = IF_PORT_100BASETX;
587 }
588 }
589 else {
590 *speed = IF_PORT_10BASET;
591 dev->if_port = IF_PORT_10BASET;
592 }
593
594 }
595 else {
596 *link = 0;
597 *speed = 0;
598 dev->if_port = IF_PORT_UNKNOWN;
599 }
600 return 0;
601}
602
603int
604smsc_83C185_init (struct net_device *dev, int phy_addr)
605{
606 s16 data;
607
608 if (au1000_debug > 4)
609 printk("smsc_83C185_init\n");
610
611 /* Stop auto-negotiation */
612 data = mdio_read(dev, phy_addr, MII_CONTROL);
613 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
614
615 /* Set advertisement to 10/100 and Half/Full duplex
616 * (full capabilities) */
617 data = mdio_read(dev, phy_addr, MII_ANADV);
618 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
619 mdio_write(dev, phy_addr, MII_ANADV, data);
620
621 /* Restart auto-negotiation */
622 data = mdio_read(dev, phy_addr, MII_CONTROL);
623 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
624
625 mdio_write(dev, phy_addr, MII_CONTROL, data);
626
627 if (au1000_debug > 4) dump_mii(dev, phy_addr);
628 return 0;
629}
630
631int
632smsc_83C185_reset (struct net_device *dev, int phy_addr)
633{
634 s16 mii_control, timeout;
635
636 if (au1000_debug > 4)
637 printk("smsc_83C185_reset\n");
638
639 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
640 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
641 mdelay(1);
642 for (timeout = 100; timeout > 0; --timeout) {
643 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
644 if ((mii_control & MII_CNTL_RESET) == 0)
645 break;
646 mdelay(1);
647 }
648 if (mii_control & MII_CNTL_RESET) {
649 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
650 return -1;
651 }
652 return 0;
653}
654
655int
656smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
657{
658 u16 mii_data;
659 struct au1000_private *aup;
660
661 if (!dev) {
662 printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
663 return -1;
664 }
665
666 aup = (struct au1000_private *) dev->priv;
667 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
668
669 if (mii_data & MII_STAT_LINK) {
670 *link = 1;
671 mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
672 if (mii_data & (1<<3)) {
673 if (mii_data & (1<<4)) {
674 *speed = IF_PORT_100BASEFX;
675 dev->if_port = IF_PORT_100BASEFX;
676 }
677 else {
678 *speed = IF_PORT_100BASETX;
679 dev->if_port = IF_PORT_100BASETX;
680 }
681 }
682 else {
683 *speed = IF_PORT_10BASET;
684 dev->if_port = IF_PORT_10BASET;
685 }
686 }
687 else {
688 *link = 0;
689 *speed = 0;
690 dev->if_port = IF_PORT_UNKNOWN;
691 }
692 return 0;
693}
694
695
696#ifdef CONFIG_MIPS_BOSPORUS
697int stub_init(struct net_device *dev, int phy_addr)
698{
699 //printk("PHY stub_init\n");
700 return 0;
701}
702
703int stub_reset(struct net_device *dev, int phy_addr)
704{
705 //printk("PHY stub_reset\n");
706 return 0;
707}
708
709int
710stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
711{
712 //printk("PHY stub_status\n");
713 *link = 1;
714 /* hmmm, revisit */
715 *speed = IF_PORT_100BASEFX;
716 dev->if_port = IF_PORT_100BASEFX;
717 return 0;
718}
719#endif
720
721struct phy_ops bcm_5201_ops = {
722 bcm_5201_init,
723 bcm_5201_reset,
724 bcm_5201_status,
725};
726
727struct phy_ops am79c874_ops = {
728 am79c874_init,
729 am79c874_reset,
730 am79c874_status,
731};
732
733struct phy_ops am79c901_ops = {
734 am79c901_init,
735 am79c901_reset,
736 am79c901_status,
737};
738
739struct phy_ops lsi_80227_ops = {
740 lsi_80227_init,
741 lsi_80227_reset,
742 lsi_80227_status,
743};
744
745struct phy_ops lxt971a_ops = {
746 lxt971a_init,
747 lxt971a_reset,
748 lxt971a_status,
749};
750
751struct phy_ops ks8995m_ops = {
752 ks8995m_init,
753 ks8995m_reset,
754 ks8995m_status,
755};
756
757struct phy_ops smsc_83C185_ops = {
758 smsc_83C185_init,
759 smsc_83C185_reset,
760 smsc_83C185_status,
761};
762
763#ifdef CONFIG_MIPS_BOSPORUS
764struct phy_ops stub_ops = {
765 stub_init,
766 stub_reset,
767 stub_status,
768};
769#endif
770
771static struct mii_chip_info {
772 const char * name;
773 u16 phy_id0;
774 u16 phy_id1;
775 struct phy_ops *phy_ops;
776 int dual_phy;
777} mii_chip_table[] = {
778 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
779 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
780 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
7f553e3d 781 {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
1da177e4
LT
782 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
783 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
784 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
785 {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
786 {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
787 {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
788#ifdef CONFIG_MIPS_BOSPORUS
789 {"Stub", 0x1234, 0x5678, &stub_ops },
790#endif
791 {0,},
792};
793
794static int mdio_read(struct net_device *dev, int phy_id, int reg)
795{
796 struct au1000_private *aup = (struct au1000_private *) dev->priv;
797 volatile u32 *mii_control_reg;
798 volatile u32 *mii_data_reg;
799 u32 timedout = 20;
800 u32 mii_control;
801
802 #ifdef CONFIG_BCM5222_DUAL_PHY
803 /* First time we probe, it's for the mac0 phy.
804 * Since we haven't determined yet that we have a dual phy,
805 * aup->mii->mii_control_reg won't be setup and we'll
806 * default to the else statement.
807 * By the time we probe for the mac1 phy, the mii_control_reg
808 * will be setup to be the address of the mac0 phy control since
809 * both phys are controlled through mac0.
810 */
811 if (aup->mii && aup->mii->mii_control_reg) {
812 mii_control_reg = aup->mii->mii_control_reg;
813 mii_data_reg = aup->mii->mii_data_reg;
814 }
815 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
816 /* assume both phys are controlled through mac0 */
817 mii_control_reg = au_macs[0]->mii->mii_control_reg;
818 mii_data_reg = au_macs[0]->mii->mii_data_reg;
819 }
820 else
821 #endif
822 {
823 /* default control and data reg addresses */
824 mii_control_reg = &aup->mac->mii_control;
825 mii_data_reg = &aup->mac->mii_data;
826 }
827
828 while (*mii_control_reg & MAC_MII_BUSY) {
829 mdelay(1);
830 if (--timedout == 0) {
831 printk(KERN_ERR "%s: read_MII busy timeout!!\n",
832 dev->name);
833 return -1;
834 }
835 }
836
837 mii_control = MAC_SET_MII_SELECT_REG(reg) |
838 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
839
840 *mii_control_reg = mii_control;
841
842 timedout = 20;
843 while (*mii_control_reg & MAC_MII_BUSY) {
844 mdelay(1);
845 if (--timedout == 0) {
846 printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
847 dev->name);
848 return -1;
849 }
850 }
851 return (int)*mii_data_reg;
852}
853
854static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
855{
856 struct au1000_private *aup = (struct au1000_private *) dev->priv;
857 volatile u32 *mii_control_reg;
858 volatile u32 *mii_data_reg;
859 u32 timedout = 20;
860 u32 mii_control;
861
862 #ifdef CONFIG_BCM5222_DUAL_PHY
863 if (aup->mii && aup->mii->mii_control_reg) {
864 mii_control_reg = aup->mii->mii_control_reg;
865 mii_data_reg = aup->mii->mii_data_reg;
866 }
867 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
868 /* assume both phys are controlled through mac0 */
869 mii_control_reg = au_macs[0]->mii->mii_control_reg;
870 mii_data_reg = au_macs[0]->mii->mii_data_reg;
871 }
872 else
873 #endif
874 {
875 /* default control and data reg addresses */
876 mii_control_reg = &aup->mac->mii_control;
877 mii_data_reg = &aup->mac->mii_data;
878 }
879
880 while (*mii_control_reg & MAC_MII_BUSY) {
881 mdelay(1);
882 if (--timedout == 0) {
883 printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
884 dev->name);
885 return;
886 }
887 }
888
889 mii_control = MAC_SET_MII_SELECT_REG(reg) |
890 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
891
892 *mii_data_reg = value;
893 *mii_control_reg = mii_control;
894}
895
896
897static void dump_mii(struct net_device *dev, int phy_id)
898{
899 int i, val;
900
901 for (i = 0; i < 7; i++) {
902 if ((val = mdio_read(dev, phy_id, i)) >= 0)
903 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
904 }
905 for (i = 16; i < 25; i++) {
906 if ((val = mdio_read(dev, phy_id, i)) >= 0)
907 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
908 }
909}
910
911static int mii_probe (struct net_device * dev)
912{
913 struct au1000_private *aup = (struct au1000_private *) dev->priv;
914 int phy_addr;
915#ifdef CONFIG_MIPS_BOSPORUS
916 int phy_found=0;
917#endif
918
919 /* search for total of 32 possible mii phy addresses */
920 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
921 u16 mii_status;
922 u16 phy_id0, phy_id1;
923 int i;
924
925 #ifdef CONFIG_BCM5222_DUAL_PHY
926 /* Mask the already found phy, try next one */
927 if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
928 if (au_macs[0]->phy_addr == phy_addr)
929 continue;
930 }
931 #endif
932
933 mii_status = mdio_read(dev, phy_addr, MII_STATUS);
934 if (mii_status == 0xffff || mii_status == 0x0000)
935 /* the mii is not accessable, try next one */
936 continue;
937
938 phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
939 phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
940
941 /* search our mii table for the current mii */
942 for (i = 0; mii_chip_table[i].phy_id1; i++) {
943 if (phy_id0 == mii_chip_table[i].phy_id0 &&
944 phy_id1 == mii_chip_table[i].phy_id1) {
945 struct mii_phy * mii_phy = aup->mii;
946
947 printk(KERN_INFO "%s: %s at phy address %d\n",
948 dev->name, mii_chip_table[i].name,
949 phy_addr);
950#ifdef CONFIG_MIPS_BOSPORUS
951 phy_found = 1;
952#endif
953 mii_phy->chip_info = mii_chip_table+i;
954 aup->phy_addr = phy_addr;
955 aup->want_autoneg = 1;
956 aup->phy_ops = mii_chip_table[i].phy_ops;
957 aup->phy_ops->phy_init(dev,phy_addr);
958
959 // Check for dual-phy and then store required
960 // values and set indicators. We need to do
961 // this now since mdio_{read,write} need the
962 // control and data register addresses.
963 #ifdef CONFIG_BCM5222_DUAL_PHY
964 if ( mii_chip_table[i].dual_phy) {
965
966 /* assume both phys are controlled
967 * through MAC0. Board specific? */
968
969 /* sanity check */
970 if (!au_macs[0] || !au_macs[0]->mii)
971 return -1;
972 aup->mii->mii_control_reg = (u32 *)
973 &au_macs[0]->mac->mii_control;
974 aup->mii->mii_data_reg = (u32 *)
975 &au_macs[0]->mac->mii_data;
976 }
977 #endif
978 goto found;
979 }
980 }
981 }
982found:
983
984#ifdef CONFIG_MIPS_BOSPORUS
985 /* This is a workaround for the Micrel/Kendin 5 port switch
986 The second MAC doesn't see a PHY connected... so we need to
987 trick it into thinking we have one.
988
989 If this kernel is run on another Au1500 development board
990 the stub will be found as well as the actual PHY. However,
991 the last found PHY will be used... usually at Addr 31 (Db1500).
992 */
993 if ( (!phy_found) )
994 {
995 u16 phy_id0, phy_id1;
996 int i;
997
998 phy_id0 = 0x1234;
999 phy_id1 = 0x5678;
1000
1001 /* search our mii table for the current mii */
1002 for (i = 0; mii_chip_table[i].phy_id1; i++) {
1003 if (phy_id0 == mii_chip_table[i].phy_id0 &&
1004 phy_id1 == mii_chip_table[i].phy_id1) {
1005 struct mii_phy * mii_phy;
1006
1007 printk(KERN_INFO "%s: %s at phy address %d\n",
1008 dev->name, mii_chip_table[i].name,
1009 phy_addr);
1010 mii_phy = kmalloc(sizeof(struct mii_phy),
1011 GFP_KERNEL);
1012 if (mii_phy) {
1013 mii_phy->chip_info = mii_chip_table+i;
1014 aup->phy_addr = phy_addr;
1015 mii_phy->next = aup->mii;
1016 aup->phy_ops =
1017 mii_chip_table[i].phy_ops;
1018 aup->mii = mii_phy;
1019 aup->phy_ops->phy_init(dev,phy_addr);
1020 } else {
1021 printk(KERN_ERR "%s: out of memory\n",
1022 dev->name);
1023 return -1;
1024 }
1025 mii_phy->chip_info = mii_chip_table+i;
1026 aup->phy_addr = phy_addr;
1027 aup->phy_ops = mii_chip_table[i].phy_ops;
1028 aup->phy_ops->phy_init(dev,phy_addr);
1029 break;
1030 }
1031 }
1032 }
1033 if (aup->mac_id == 0) {
1034 /* the Bosporus phy responds to addresses 0-5 but
1035 * 5 is the correct one.
1036 */
1037 aup->phy_addr = 5;
1038 }
1039#endif
1040
1041 if (aup->mii->chip_info == NULL) {
7f553e3d 1042 printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
1da177e4
LT
1043 dev->name);
1044 return -1;
1045 }
1046
1047 printk(KERN_INFO "%s: Using %s as default\n",
1048 dev->name, aup->mii->chip_info->name);
1049
1050 return 0;
1051}
1052
1053
1054/*
1055 * Buffer allocation/deallocation routines. The buffer descriptor returned
1056 * has the virtual and dma address of a buffer suitable for
1057 * both, receive and transmit operations.
1058 */
1059static db_dest_t *GetFreeDB(struct au1000_private *aup)
1060{
1061 db_dest_t *pDB;
1062 pDB = aup->pDBfree;
1063
1064 if (pDB) {
1065 aup->pDBfree = pDB->pnext;
1066 }
1067 return pDB;
1068}
1069
1070void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
1071{
1072 db_dest_t *pDBfree = aup->pDBfree;
1073 if (pDBfree)
1074 pDBfree->pnext = pDB;
1075 aup->pDBfree = pDB;
1076}
1077
1078static void enable_rx_tx(struct net_device *dev)
1079{
1080 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1081
1082 if (au1000_debug > 4)
1083 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
1084
1085 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
1086 au_sync_delay(10);
1087}
1088
1089static void hard_stop(struct net_device *dev)
1090{
1091 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1092
1093 if (au1000_debug > 4)
1094 printk(KERN_INFO "%s: hard stop\n", dev->name);
1095
1096 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
1097 au_sync_delay(10);
1098}
1099
1100
1101static void reset_mac(struct net_device *dev)
1102{
1103 int i;
1104 u32 flags;
1105 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1106
1107 if (au1000_debug > 4)
1108 printk(KERN_INFO "%s: reset mac, aup %x\n",
1109 dev->name, (unsigned)aup);
1110
1111 spin_lock_irqsave(&aup->lock, flags);
1112 if (aup->timer.function == &au1000_timer) {/* check if timer initted */
1113 del_timer(&aup->timer);
1114 }
1115
1116 hard_stop(dev);
1117 #ifdef CONFIG_BCM5222_DUAL_PHY
1118 if (aup->mac_id != 0) {
1119 #endif
1120 /* If BCM5222, we can't leave MAC0 in reset because then
1121 * we can't access the dual phy for ETH1 */
1122 *aup->enable = MAC_EN_CLOCK_ENABLE;
1123 au_sync_delay(2);
1124 *aup->enable = 0;
1125 au_sync_delay(2);
1126 #ifdef CONFIG_BCM5222_DUAL_PHY
1127 }
1128 #endif
1129 aup->tx_full = 0;
1130 for (i = 0; i < NUM_RX_DMA; i++) {
1131 /* reset control bits */
1132 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
1133 }
1134 for (i = 0; i < NUM_TX_DMA; i++) {
1135 /* reset control bits */
1136 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
1137 }
1138 spin_unlock_irqrestore(&aup->lock, flags);
1139}
1140
1141
1142/*
1143 * Setup the receive and transmit "rings". These pointers are the addresses
1144 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
1145 * these are not descriptors sitting in memory.
1146 */
1147static void
1148setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
1149{
1150 int i;
1151
1152 for (i = 0; i < NUM_RX_DMA; i++) {
1153 aup->rx_dma_ring[i] =
1154 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
1155 }
1156 for (i = 0; i < NUM_TX_DMA; i++) {
1157 aup->tx_dma_ring[i] =
1158 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
1159 }
1160}
1161
1162static struct {
1163 int port;
1164 u32 base_addr;
1165 u32 macen_addr;
1166 int irq;
1167 struct net_device *dev;
1168} iflist[2];
1169
1170static int num_ifs;
1171
1172/*
1173 * Setup the base address and interupt of the Au1xxx ethernet macs
1174 * based on cpu type and whether the interface is enabled in sys_pinfunc
1175 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1176 */
1177static int __init au1000_init_module(void)
1178{
1179 struct cpuinfo_mips *c = &current_cpu_data;
1180 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1181 struct net_device *dev;
1182 int i, found_one = 0;
1183
1184 switch (c->cputype) {
1185#ifdef CONFIG_SOC_AU1000
1186 case CPU_AU1000:
1187 num_ifs = 2 - ni;
1188 iflist[0].base_addr = AU1000_ETH0_BASE;
1189 iflist[1].base_addr = AU1000_ETH1_BASE;
1190 iflist[0].macen_addr = AU1000_MAC0_ENABLE;
1191 iflist[1].macen_addr = AU1000_MAC1_ENABLE;
1192 iflist[0].irq = AU1000_MAC0_DMA_INT;
1193 iflist[1].irq = AU1000_MAC1_DMA_INT;
1194 break;
1195#endif
1196#ifdef CONFIG_SOC_AU1100
1197 case CPU_AU1100:
1198 num_ifs = 1 - ni;
1199 iflist[0].base_addr = AU1100_ETH0_BASE;
1200 iflist[0].macen_addr = AU1100_MAC0_ENABLE;
1201 iflist[0].irq = AU1100_MAC0_DMA_INT;
1202 break;
1203#endif
1204#ifdef CONFIG_SOC_AU1500
1205 case CPU_AU1500:
1206 num_ifs = 2 - ni;
1207 iflist[0].base_addr = AU1500_ETH0_BASE;
1208 iflist[1].base_addr = AU1500_ETH1_BASE;
1209 iflist[0].macen_addr = AU1500_MAC0_ENABLE;
1210 iflist[1].macen_addr = AU1500_MAC1_ENABLE;
1211 iflist[0].irq = AU1500_MAC0_DMA_INT;
1212 iflist[1].irq = AU1500_MAC1_DMA_INT;
1213 break;
1214#endif
1215#ifdef CONFIG_SOC_AU1550
1216 case CPU_AU1550:
1217 num_ifs = 2 - ni;
1218 iflist[0].base_addr = AU1550_ETH0_BASE;
1219 iflist[1].base_addr = AU1550_ETH1_BASE;
1220 iflist[0].macen_addr = AU1550_MAC0_ENABLE;
1221 iflist[1].macen_addr = AU1550_MAC1_ENABLE;
1222 iflist[0].irq = AU1550_MAC0_DMA_INT;
1223 iflist[1].irq = AU1550_MAC1_DMA_INT;
1224 break;
1225#endif
1226 default:
1227 num_ifs = 0;
1228 }
1229 for(i = 0; i < num_ifs; i++) {
1230 dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
1231 iflist[i].dev = dev;
1232 if (dev)
1233 found_one++;
1234 }
1235 if (!found_one)
1236 return -ENODEV;
1237 return 0;
1238}
1239
1240static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
1241{
1242 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1243 u16 ctl, adv;
1244
1245 /* Setup standard advertise */
1246 adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
1247 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1248 if (advertise & ADVERTISED_10baseT_Half)
1249 adv |= ADVERTISE_10HALF;
1250 if (advertise & ADVERTISED_10baseT_Full)
1251 adv |= ADVERTISE_10FULL;
1252 if (advertise & ADVERTISED_100baseT_Half)
1253 adv |= ADVERTISE_100HALF;
1254 if (advertise & ADVERTISED_100baseT_Full)
1255 adv |= ADVERTISE_100FULL;
1256 mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
1257
1258 /* Start/Restart aneg */
1259 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1260 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1261 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1262
1263 return 0;
1264}
1265
1266static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
1267{
1268 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1269 u16 ctl;
1270
1271 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1272 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
1273
1274 /* First reset the PHY */
1275 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
1276
1277 /* Select speed & duplex */
1278 switch (speed) {
1279 case SPEED_10:
1280 break;
1281 case SPEED_100:
1282 ctl |= BMCR_SPEED100;
1283 break;
1284 case SPEED_1000:
1285 default:
1286 return -EINVAL;
1287 }
1288 if (fd == DUPLEX_FULL)
1289 ctl |= BMCR_FULLDPLX;
1290 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1291
1292 return 0;
1293}
1294
1295
1296static void
1297au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
1298{
1299 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1300 u32 advertise;
1301 int autoneg;
1302 int forced_speed;
1303 int forced_duplex;
1304
1305 /* Default advertise */
1306 advertise = GENMII_DEFAULT_ADVERTISE;
1307 autoneg = aup->want_autoneg;
1308 forced_speed = SPEED_100;
1309 forced_duplex = DUPLEX_FULL;
1310
1311 /* Setup link parameters */
1312 if (cmd) {
1313 if (cmd->autoneg == AUTONEG_ENABLE) {
1314 advertise = cmd->advertising;
1315 autoneg = 1;
1316 } else {
1317 autoneg = 0;
1318
1319 forced_speed = cmd->speed;
1320 forced_duplex = cmd->duplex;
1321 }
1322 }
1323
1324 /* Configure PHY & start aneg */
1325 aup->want_autoneg = autoneg;
1326 if (autoneg)
1327 au1000_setup_aneg(dev, advertise);
1328 else
1329 au1000_setup_forced(dev, forced_speed, forced_duplex);
1330 mod_timer(&aup->timer, jiffies + HZ);
1331}
1332
1333static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1334{
1335 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1336 u16 link, speed;
1337
1338 cmd->supported = GENMII_DEFAULT_FEATURES;
1339 cmd->advertising = GENMII_DEFAULT_ADVERTISE;
1340 cmd->port = PORT_MII;
1341 cmd->transceiver = XCVR_EXTERNAL;
1342 cmd->phy_address = aup->phy_addr;
1343 spin_lock_irq(&aup->lock);
1344 cmd->autoneg = aup->want_autoneg;
1345 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1346 if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
1347 cmd->speed = SPEED_100;
1348 else if (speed == IF_PORT_10BASET)
1349 cmd->speed = SPEED_10;
1350 if (link && (dev->if_port == IF_PORT_100BASEFX))
1351 cmd->duplex = DUPLEX_FULL;
1352 else
1353 cmd->duplex = DUPLEX_HALF;
1354 spin_unlock_irq(&aup->lock);
1355 return 0;
1356}
1357
1358static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1359{
1360 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1361 unsigned long features = GENMII_DEFAULT_FEATURES;
1362
1363 if (!capable(CAP_NET_ADMIN))
1364 return -EPERM;
1365
1366 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1367 return -EINVAL;
1368 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1369 return -EINVAL;
1370 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1371 return -EINVAL;
1372 if (cmd->autoneg == AUTONEG_DISABLE)
1373 switch (cmd->speed) {
1374 case SPEED_10:
1375 if (cmd->duplex == DUPLEX_HALF &&
1376 (features & SUPPORTED_10baseT_Half) == 0)
1377 return -EINVAL;
1378 if (cmd->duplex == DUPLEX_FULL &&
1379 (features & SUPPORTED_10baseT_Full) == 0)
1380 return -EINVAL;
1381 break;
1382 case SPEED_100:
1383 if (cmd->duplex == DUPLEX_HALF &&
1384 (features & SUPPORTED_100baseT_Half) == 0)
1385 return -EINVAL;
1386 if (cmd->duplex == DUPLEX_FULL &&
1387 (features & SUPPORTED_100baseT_Full) == 0)
1388 return -EINVAL;
1389 break;
1390 default:
1391 return -EINVAL;
1392 }
1393 else if ((features & SUPPORTED_Autoneg) == 0)
1394 return -EINVAL;
1395
1396 spin_lock_irq(&aup->lock);
1397 au1000_start_link(dev, cmd);
1398 spin_unlock_irq(&aup->lock);
1399 return 0;
1400}
1401
1402static int au1000_nway_reset(struct net_device *dev)
1403{
1404 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1405
1406 if (!aup->want_autoneg)
1407 return -EINVAL;
1408 spin_lock_irq(&aup->lock);
1409 au1000_start_link(dev, NULL);
1410 spin_unlock_irq(&aup->lock);
1411 return 0;
1412}
1413
1414static void
1415au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1416{
1417 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1418
1419 strcpy(info->driver, DRV_NAME);
1420 strcpy(info->version, DRV_VERSION);
1421 info->fw_version[0] = '\0';
1422 sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
1423 info->regdump_len = 0;
1424}
1425
1426static u32 au1000_get_link(struct net_device *dev)
1427{
1428 return netif_carrier_ok(dev);
1429}
1430
1431static struct ethtool_ops au1000_ethtool_ops = {
1432 .get_settings = au1000_get_settings,
1433 .set_settings = au1000_set_settings,
1434 .get_drvinfo = au1000_get_drvinfo,
1435 .nway_reset = au1000_nway_reset,
1436 .get_link = au1000_get_link
1437};
1438
1439static struct net_device *
1440au1000_probe(u32 ioaddr, int irq, int port_num)
1441{
1442 static unsigned version_printed = 0;
1443 struct au1000_private *aup = NULL;
1444 struct net_device *dev = NULL;
1445 db_dest_t *pDB, *pDBfree;
1446 char *pmac, *argptr;
1447 char ethaddr[6];
1448 int i, err;
1449
1450 if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
1451 return NULL;
1452
1453 if (version_printed++ == 0)
1454 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1455
1456 dev = alloc_etherdev(sizeof(struct au1000_private));
1457 if (!dev) {
1458 printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
1459 return NULL;
1460 }
1461
1462 if ((err = register_netdev(dev))) {
1463 printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
1464 err);
1465 free_netdev(dev);
1466 return NULL;
1467 }
1468
1469 printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
1470 dev->name, ioaddr, irq);
1471
1472 aup = dev->priv;
1473
1474 /* Allocate the data buffers */
1475 /* Snooping works fine with eth on all au1xxx */
1476 aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
1477 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1478 &aup->dma_addr,
1479 0);
1480 if (!aup->vaddr) {
1481 free_netdev(dev);
1482 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1483 return NULL;
1484 }
1485
1486 /* aup->mac is the base address of the MAC's registers */
1487 aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
1488 /* Setup some variables for quick register address access */
1489 if (ioaddr == iflist[0].base_addr)
1490 {
1491 /* check env variables first */
1492 if (!get_ethernet_addr(ethaddr)) {
1493 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1494 } else {
1495 /* Check command line */
1496 argptr = prom_getcmdline();
1497 if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
1498 printk(KERN_INFO "%s: No mac address found\n",
1499 dev->name);
1500 /* use the hard coded mac addresses */
1501 } else {
1502 str2eaddr(ethaddr, pmac + strlen("ethaddr="));
1503 memcpy(au1000_mac_addr, ethaddr,
1504 sizeof(au1000_mac_addr));
1505 }
1506 }
1507 aup->enable = (volatile u32 *)
1508 ((unsigned long)iflist[0].macen_addr);
1509 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1510 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1511 aup->mac_id = 0;
1512 au_macs[0] = aup;
1513 }
1514 else
1515 if (ioaddr == iflist[1].base_addr)
1516 {
1517 aup->enable = (volatile u32 *)
1518 ((unsigned long)iflist[1].macen_addr);
1519 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1520 dev->dev_addr[4] += 0x10;
1521 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1522 aup->mac_id = 1;
1523 au_macs[1] = aup;
1524 }
1525 else
1526 {
1527 printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
1528 }
1529
1530 /* bring the device out of reset, otherwise probing the mii
1531 * will hang */
1532 *aup->enable = MAC_EN_CLOCK_ENABLE;
1533 au_sync_delay(2);
1534 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1535 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1536 au_sync_delay(2);
1537
1538 aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
1539 if (!aup->mii) {
1540 printk(KERN_ERR "%s: out of memory\n", dev->name);
1541 goto err_out;
1542 }
7f553e3d
RB
1543 aup->mii->next = NULL;
1544 aup->mii->chip_info = NULL;
1545 aup->mii->status = 0;
1da177e4
LT
1546 aup->mii->mii_control_reg = 0;
1547 aup->mii->mii_data_reg = 0;
1548
1549 if (mii_probe(dev) != 0) {
1550 goto err_out;
1551 }
1552
1553 pDBfree = NULL;
1554 /* setup the data buffer descriptors and attach a buffer to each one */
1555 pDB = aup->db;
1556 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1557 pDB->pnext = pDBfree;
1558 pDBfree = pDB;
1559 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1560 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1561 pDB++;
1562 }
1563 aup->pDBfree = pDBfree;
1564
1565 for (i = 0; i < NUM_RX_DMA; i++) {
1566 pDB = GetFreeDB(aup);
1567 if (!pDB) {
1568 goto err_out;
1569 }
1570 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1571 aup->rx_db_inuse[i] = pDB;
1572 }
1573 for (i = 0; i < NUM_TX_DMA; i++) {
1574 pDB = GetFreeDB(aup);
1575 if (!pDB) {
1576 goto err_out;
1577 }
1578 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1579 aup->tx_dma_ring[i]->len = 0;
1580 aup->tx_db_inuse[i] = pDB;
1581 }
1582
1583 spin_lock_init(&aup->lock);
1584 dev->base_addr = ioaddr;
1585 dev->irq = irq;
1586 dev->open = au1000_open;
1587 dev->hard_start_xmit = au1000_tx;
1588 dev->stop = au1000_close;
1589 dev->get_stats = au1000_get_stats;
1590 dev->set_multicast_list = &set_rx_mode;
1591 dev->do_ioctl = &au1000_ioctl;
1592 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1593 dev->set_config = &au1000_set_config;
1594 dev->tx_timeout = au1000_tx_timeout;
1595 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1596
1597 /*
1598 * The boot code uses the ethernet controller, so reset it to start
1599 * fresh. au1000_init() expects that the device is in reset state.
1600 */
1601 reset_mac(dev);
1602
1603 return dev;
1604
1605err_out:
1606 /* here we should have a valid dev plus aup-> register addresses
1607 * so we can reset the mac properly.*/
1608 reset_mac(dev);
b4558ea9 1609 kfree(aup->mii);
1da177e4
LT
1610 for (i = 0; i < NUM_RX_DMA; i++) {
1611 if (aup->rx_db_inuse[i])
1612 ReleaseDB(aup, aup->rx_db_inuse[i]);
1613 }
1614 for (i = 0; i < NUM_TX_DMA; i++) {
1615 if (aup->tx_db_inuse[i])
1616 ReleaseDB(aup, aup->tx_db_inuse[i]);
1617 }
1618 dma_free_noncoherent(NULL,
1619 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1620 (void *)aup->vaddr,
1621 aup->dma_addr);
1622 unregister_netdev(dev);
1623 free_netdev(dev);
1624 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1625 return NULL;
1626}
1627
1628/*
1629 * Initialize the interface.
1630 *
1631 * When the device powers up, the clocks are disabled and the
1632 * mac is in reset state. When the interface is closed, we
1633 * do the same -- reset the device and disable the clocks to
1634 * conserve power. Thus, whenever au1000_init() is called,
1635 * the device should already be in reset state.
1636 */
1637static int au1000_init(struct net_device *dev)
1638{
1639 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1640 u32 flags;
1641 int i;
1642 u32 control;
1643 u16 link, speed;
1644
1645 if (au1000_debug > 4)
1646 printk("%s: au1000_init\n", dev->name);
1647
1648 spin_lock_irqsave(&aup->lock, flags);
1649
1650 /* bring the device out of reset */
1651 *aup->enable = MAC_EN_CLOCK_ENABLE;
1652 au_sync_delay(2);
1653 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1654 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1655 au_sync_delay(20);
1656
1657 aup->mac->control = 0;
1658 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
1659 aup->tx_tail = aup->tx_head;
1660 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
1661
1662 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
1663 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
1664 dev->dev_addr[1]<<8 | dev->dev_addr[0];
1665
1666 for (i = 0; i < NUM_RX_DMA; i++) {
1667 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
1668 }
1669 au_sync();
1670
1671 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1672 control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
1673#ifndef CONFIG_CPU_LITTLE_ENDIAN
1674 control |= MAC_BIG_ENDIAN;
1675#endif
1676 if (link && (dev->if_port == IF_PORT_100BASEFX)) {
1677 control |= MAC_FULL_DUPLEX;
1678 }
1679
1da177e4
LT
1680 aup->mac->control = control;
1681 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1682 au_sync();
1683
1684 spin_unlock_irqrestore(&aup->lock, flags);
1685 return 0;
1686}
1687
1688static void au1000_timer(unsigned long data)
1689{
1690 struct net_device *dev = (struct net_device *)data;
1691 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1692 unsigned char if_port;
1693 u16 link, speed;
1694
1695 if (!dev) {
1696 /* fatal error, don't restart the timer */
1697 printk(KERN_ERR "au1000_timer error: NULL dev\n");
1698 return;
1699 }
1700
1701 if_port = dev->if_port;
1702 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1703 if (link) {
7d17c1d6 1704 if (!netif_carrier_ok(dev)) {
1da177e4 1705 netif_carrier_on(dev);
1da177e4
LT
1706 printk(KERN_INFO "%s: link up\n", dev->name);
1707 }
1708 }
1709 else {
7d17c1d6 1710 if (netif_carrier_ok(dev)) {
1da177e4 1711 netif_carrier_off(dev);
1da177e4
LT
1712 dev->if_port = 0;
1713 printk(KERN_INFO "%s: link down\n", dev->name);
1714 }
1715 }
1716 }
1717
1718 if (link && (dev->if_port != if_port) &&
1719 (dev->if_port != IF_PORT_UNKNOWN)) {
1720 hard_stop(dev);
1721 if (dev->if_port == IF_PORT_100BASEFX) {
1722 printk(KERN_INFO "%s: going to full duplex\n",
1723 dev->name);
1724 aup->mac->control |= MAC_FULL_DUPLEX;
1725 au_sync_delay(1);
1726 }
1727 else {
1728 aup->mac->control &= ~MAC_FULL_DUPLEX;
1729 au_sync_delay(1);
1730 }
1731 enable_rx_tx(dev);
1732 }
1733
1734 aup->timer.expires = RUN_AT((1*HZ));
1735 aup->timer.data = (unsigned long)dev;
1736 aup->timer.function = &au1000_timer; /* timer handler */
1737 add_timer(&aup->timer);
1738
1739}
1740
1741static int au1000_open(struct net_device *dev)
1742{
1743 int retval;
1744 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1745
1746 if (au1000_debug > 4)
1747 printk("%s: open: dev=%p\n", dev->name, dev);
1748
1749 if ((retval = au1000_init(dev))) {
1750 printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
1751 free_irq(dev->irq, dev);
1752 return retval;
1753 }
1754 netif_start_queue(dev);
1755
1756 if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
1757 dev->name, dev))) {
1758 printk(KERN_ERR "%s: unable to get IRQ %d\n",
1759 dev->name, dev->irq);
1760 return retval;
1761 }
1762
1763 init_timer(&aup->timer); /* used in ioctl() */
1764 aup->timer.expires = RUN_AT((3*HZ));
1765 aup->timer.data = (unsigned long)dev;
1766 aup->timer.function = &au1000_timer; /* timer handler */
1767 add_timer(&aup->timer);
1768
1769 if (au1000_debug > 4)
1770 printk("%s: open: Initialization done.\n", dev->name);
1771
1772 return 0;
1773}
1774
1775static int au1000_close(struct net_device *dev)
1776{
1777 u32 flags;
1778 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1779
1780 if (au1000_debug > 4)
1781 printk("%s: close: dev=%p\n", dev->name, dev);
1782
1783 reset_mac(dev);
1784
1785 spin_lock_irqsave(&aup->lock, flags);
1786
1787 /* stop the device */
1788 netif_stop_queue(dev);
1789
1790 /* disable the interrupt */
1791 free_irq(dev->irq, dev);
1792 spin_unlock_irqrestore(&aup->lock, flags);
1793
1794 return 0;
1795}
1796
1797static void __exit au1000_cleanup_module(void)
1798{
1799 int i, j;
1800 struct net_device *dev;
1801 struct au1000_private *aup;
1802
1803 for (i = 0; i < num_ifs; i++) {
1804 dev = iflist[i].dev;
1805 if (dev) {
1806 aup = (struct au1000_private *) dev->priv;
1807 unregister_netdev(dev);
b4558ea9 1808 kfree(aup->mii);
1da177e4
LT
1809 for (j = 0; j < NUM_RX_DMA; j++) {
1810 if (aup->rx_db_inuse[j])
1811 ReleaseDB(aup, aup->rx_db_inuse[j]);
1812 }
1813 for (j = 0; j < NUM_TX_DMA; j++) {
1814 if (aup->tx_db_inuse[j])
1815 ReleaseDB(aup, aup->tx_db_inuse[j]);
1816 }
1817 dma_free_noncoherent(NULL,
1818 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1819 (void *)aup->vaddr,
1820 aup->dma_addr);
1821 free_netdev(dev);
1822 release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
1823 }
1824 }
1825}
1826
1827
1828static inline void
1829update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
1830{
1831 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1832 struct net_device_stats *ps = &aup->stats;
1833
1834 ps->tx_packets++;
1835 ps->tx_bytes += pkt_len;
1836
1837 if (status & TX_FRAME_ABORTED) {
1838 if (dev->if_port == IF_PORT_100BASEFX) {
1839 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1840 /* any other tx errors are only valid
1841 * in half duplex mode */
1842 ps->tx_errors++;
1843 ps->tx_aborted_errors++;
1844 }
1845 }
1846 else {
1847 ps->tx_errors++;
1848 ps->tx_aborted_errors++;
1849 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1850 ps->tx_carrier_errors++;
1851 }
1852 }
1853}
1854
1855
1856/*
1857 * Called from the interrupt service routine to acknowledge
1858 * the TX DONE bits. This is a must if the irq is setup as
1859 * edge triggered.
1860 */
1861static void au1000_tx_ack(struct net_device *dev)
1862{
1863 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1864 volatile tx_dma_t *ptxd;
1865
1866 ptxd = aup->tx_dma_ring[aup->tx_tail];
1867
1868 while (ptxd->buff_stat & TX_T_DONE) {
1869 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
1870 ptxd->buff_stat &= ~TX_T_DONE;
1871 ptxd->len = 0;
1872 au_sync();
1873
1874 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1875 ptxd = aup->tx_dma_ring[aup->tx_tail];
1876
1877 if (aup->tx_full) {
1878 aup->tx_full = 0;
1879 netif_wake_queue(dev);
1880 }
1881 }
1882}
1883
1884
1885/*
1886 * Au1000 transmit routine.
1887 */
1888static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1889{
1890 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1891 volatile tx_dma_t *ptxd;
1892 u32 buff_stat;
1893 db_dest_t *pDB;
1894 int i;
1895
1896 if (au1000_debug > 5)
1897 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1898 dev->name, (unsigned)aup, skb->len,
1899 skb->data, aup->tx_head);
1900
1901 ptxd = aup->tx_dma_ring[aup->tx_head];
1902 buff_stat = ptxd->buff_stat;
1903 if (buff_stat & TX_DMA_ENABLE) {
1904 /* We've wrapped around and the transmitter is still busy */
1905 netif_stop_queue(dev);
1906 aup->tx_full = 1;
1907 return 1;
1908 }
1909 else if (buff_stat & TX_T_DONE) {
1910 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
1911 ptxd->len = 0;
1912 }
1913
1914 if (aup->tx_full) {
1915 aup->tx_full = 0;
1916 netif_wake_queue(dev);
1917 }
1918
1919 pDB = aup->tx_db_inuse[aup->tx_head];
1920 memcpy((void *)pDB->vaddr, skb->data, skb->len);
1921 if (skb->len < ETH_ZLEN) {
1922 for (i=skb->len; i<ETH_ZLEN; i++) {
1923 ((char *)pDB->vaddr)[i] = 0;
1924 }
1925 ptxd->len = ETH_ZLEN;
1926 }
1927 else
1928 ptxd->len = skb->len;
1929
1930 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1931 au_sync();
1932 dev_kfree_skb(skb);
1933 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1934 dev->trans_start = jiffies;
1935 return 0;
1936}
1937
1938
1939static inline void update_rx_stats(struct net_device *dev, u32 status)
1940{
1941 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1942 struct net_device_stats *ps = &aup->stats;
1943
1944 ps->rx_packets++;
1945 if (status & RX_MCAST_FRAME)
1946 ps->multicast++;
1947
1948 if (status & RX_ERROR) {
1949 ps->rx_errors++;
1950 if (status & RX_MISSED_FRAME)
1951 ps->rx_missed_errors++;
1952 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1953 ps->rx_length_errors++;
1954 if (status & RX_CRC_ERROR)
1955 ps->rx_crc_errors++;
1956 if (status & RX_COLL)
1957 ps->collisions++;
1958 }
1959 else
1960 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1961
1962}
1963
1964/*
1965 * Au1000 receive routine.
1966 */
1967static int au1000_rx(struct net_device *dev)
1968{
1969 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1970 struct sk_buff *skb;
1971 volatile rx_dma_t *prxd;
1972 u32 buff_stat, status;
1973 db_dest_t *pDB;
1974 u32 frmlen;
1975
1976 if (au1000_debug > 5)
1977 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1978
1979 prxd = aup->rx_dma_ring[aup->rx_head];
1980 buff_stat = prxd->buff_stat;
1981 while (buff_stat & RX_T_DONE) {
1982 status = prxd->status;
1983 pDB = aup->rx_db_inuse[aup->rx_head];
1984 update_rx_stats(dev, status);
1985 if (!(status & RX_ERROR)) {
1986
1987 /* good frame */
1988 frmlen = (status & RX_FRAME_LEN_MASK);
1989 frmlen -= 4; /* Remove FCS */
1990 skb = dev_alloc_skb(frmlen + 2);
1991 if (skb == NULL) {
1992 printk(KERN_ERR
1993 "%s: Memory squeeze, dropping packet.\n",
1994 dev->name);
1995 aup->stats.rx_dropped++;
1996 continue;
1997 }
1998 skb->dev = dev;
1999 skb_reserve(skb, 2); /* 16 byte IP header align */
2000 eth_copy_and_sum(skb,
2001 (unsigned char *)pDB->vaddr, frmlen, 0);
2002 skb_put(skb, frmlen);
2003 skb->protocol = eth_type_trans(skb, dev);
2004 netif_rx(skb); /* pass the packet to upper layers */
2005 }
2006 else {
2007 if (au1000_debug > 4) {
2008 if (status & RX_MISSED_FRAME)
2009 printk("rx miss\n");
2010 if (status & RX_WDOG_TIMER)
2011 printk("rx wdog\n");
2012 if (status & RX_RUNT)
2013 printk("rx runt\n");
2014 if (status & RX_OVERLEN)
2015 printk("rx overlen\n");
2016 if (status & RX_COLL)
2017 printk("rx coll\n");
2018 if (status & RX_MII_ERROR)
2019 printk("rx mii error\n");
2020 if (status & RX_CRC_ERROR)
2021 printk("rx crc error\n");
2022 if (status & RX_LEN_ERROR)
2023 printk("rx len error\n");
2024 if (status & RX_U_CNTRL_FRAME)
2025 printk("rx u control frame\n");
2026 if (status & RX_MISSED_FRAME)
2027 printk("rx miss\n");
2028 }
2029 }
2030 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
2031 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
2032 au_sync();
2033
2034 /* next descriptor */
2035 prxd = aup->rx_dma_ring[aup->rx_head];
2036 buff_stat = prxd->buff_stat;
2037 dev->last_rx = jiffies;
2038 }
2039 return 0;
2040}
2041
2042
2043/*
2044 * Au1000 interrupt service routine.
2045 */
2046static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2047{
2048 struct net_device *dev = (struct net_device *) dev_id;
2049
2050 if (dev == NULL) {
2051 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
2052 return IRQ_RETVAL(1);
2053 }
2054
2055 /* Handle RX interrupts first to minimize chance of overrun */
2056
2057 au1000_rx(dev);
2058 au1000_tx_ack(dev);
2059 return IRQ_RETVAL(1);
2060}
2061
2062
2063/*
2064 * The Tx ring has been full longer than the watchdog timeout
2065 * value. The transmitter must be hung?
2066 */
2067static void au1000_tx_timeout(struct net_device *dev)
2068{
2069 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
2070 reset_mac(dev);
2071 au1000_init(dev);
2072 dev->trans_start = jiffies;
2073 netif_wake_queue(dev);
2074}
2075
2076
2077static unsigned const ethernet_polynomial = 0x04c11db7U;
2078static inline u32 ether_crc(int length, unsigned char *data)
2079{
2080 int crc = -1;
2081
2082 while(--length >= 0) {
2083 unsigned char current_octet = *data++;
2084 int bit;
2085 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2086 crc = (crc << 1) ^
2087 ((crc < 0) ^ (current_octet & 1) ?
2088 ethernet_polynomial : 0);
2089 }
2090 return crc;
2091}
2092
2093static void set_rx_mode(struct net_device *dev)
2094{
2095 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2096
2097 if (au1000_debug > 4)
2098 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
2099
2100 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2101 aup->mac->control |= MAC_PROMISCUOUS;
2102 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
2103 } else if ((dev->flags & IFF_ALLMULTI) ||
2104 dev->mc_count > MULTICAST_FILTER_LIMIT) {
2105 aup->mac->control |= MAC_PASS_ALL_MULTI;
2106 aup->mac->control &= ~MAC_PROMISCUOUS;
2107 printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
2108 } else {
2109 int i;
2110 struct dev_mc_list *mclist;
2111 u32 mc_filter[2]; /* Multicast hash filter */
2112
2113 mc_filter[1] = mc_filter[0] = 0;
2114 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2115 i++, mclist = mclist->next) {
2116 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
2117 (long *)mc_filter);
2118 }
2119 aup->mac->multi_hash_high = mc_filter[1];
2120 aup->mac->multi_hash_low = mc_filter[0];
2121 aup->mac->control &= ~MAC_PROMISCUOUS;
2122 aup->mac->control |= MAC_HASH_MODE;
2123 }
2124}
2125
2126
2127static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2128{
2129 struct au1000_private *aup = (struct au1000_private *)dev->priv;
2130 u16 *data = (u16 *)&rq->ifr_ifru;
2131
2132 switch(cmd) {
2133 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2134 case SIOCGMIIPHY:
2135 if (!netif_running(dev)) return -EINVAL;
2136 data[0] = aup->phy_addr;
2137 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2138 case SIOCGMIIREG:
2139 data[3] = mdio_read(dev, data[0], data[1]);
2140 return 0;
2141 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2142 case SIOCSMIIREG:
2143 if (!capable(CAP_NET_ADMIN))
2144 return -EPERM;
2145 mdio_write(dev, data[0], data[1],data[2]);
2146 return 0;
2147 default:
2148 return -EOPNOTSUPP;
2149 }
2150
2151}
2152
2153
2154static int au1000_set_config(struct net_device *dev, struct ifmap *map)
2155{
2156 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2157 u16 control;
2158
2159 if (au1000_debug > 4) {
2160 printk("%s: set_config called: dev->if_port %d map->port %x\n",
2161 dev->name, dev->if_port, map->port);
2162 }
2163
2164 switch(map->port){
2165 case IF_PORT_UNKNOWN: /* use auto here */
2166 printk(KERN_INFO "%s: config phy for aneg\n",
2167 dev->name);
2168 dev->if_port = map->port;
2169 /* Link Down: the timer will bring it up */
2170 netif_carrier_off(dev);
2171
2172 /* read current control */
2173 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2174 control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
2175
2176 /* enable auto negotiation and reset the negotiation */
2177 mdio_write(dev, aup->phy_addr, MII_CONTROL,
2178 control | MII_CNTL_AUTO |
2179 MII_CNTL_RST_AUTO);
2180
2181 break;
2182
2183 case IF_PORT_10BASET: /* 10BaseT */
2184 printk(KERN_INFO "%s: config phy for 10BaseT\n",
2185 dev->name);
2186 dev->if_port = map->port;
2187
2188 /* Link Down: the timer will bring it up */
2189 netif_carrier_off(dev);
2190
2191 /* set Speed to 10Mbps, Half Duplex */
2192 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2193 control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
2194 MII_CNTL_FDX);
2195
2196 /* disable auto negotiation and force 10M/HD mode*/
2197 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2198 break;
2199
2200 case IF_PORT_100BASET: /* 100BaseT */
2201 case IF_PORT_100BASETX: /* 100BaseTx */
2202 printk(KERN_INFO "%s: config phy for 100BaseTX\n",
2203 dev->name);
2204 dev->if_port = map->port;
2205
2206 /* Link Down: the timer will bring it up */
2207 netif_carrier_off(dev);
2208
2209 /* set Speed to 100Mbps, Half Duplex */
2210 /* disable auto negotiation and enable 100MBit Mode */
2211 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2212 control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
2213 control |= MII_CNTL_F100;
2214 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2215 break;
2216
2217 case IF_PORT_100BASEFX: /* 100BaseFx */
2218 printk(KERN_INFO "%s: config phy for 100BaseFX\n",
2219 dev->name);
2220 dev->if_port = map->port;
2221
2222 /* Link Down: the timer will bring it up */
2223 netif_carrier_off(dev);
2224
2225 /* set Speed to 100Mbps, Full Duplex */
2226 /* disable auto negotiation and enable 100MBit Mode */
2227 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2228 control &= ~MII_CNTL_AUTO;
2229 control |= MII_CNTL_F100 | MII_CNTL_FDX;
2230 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2231 break;
2232 case IF_PORT_10BASE2: /* 10Base2 */
2233 case IF_PORT_AUI: /* AUI */
2234 /* These Modes are not supported (are they?)*/
2235 printk(KERN_ERR "%s: 10Base2/AUI not supported",
2236 dev->name);
2237 return -EOPNOTSUPP;
2238 break;
2239
2240 default:
2241 printk(KERN_ERR "%s: Invalid media selected",
2242 dev->name);
2243 return -EINVAL;
2244 }
2245 return 0;
2246}
2247
2248static struct net_device_stats *au1000_get_stats(struct net_device *dev)
2249{
2250 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2251
2252 if (au1000_debug > 4)
2253 printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
2254
2255 if (netif_device_present(dev)) {
2256 return &aup->stats;
2257 }
2258 return 0;
2259}
2260
2261module_init(au1000_init_module);
2262module_exit(au1000_cleanup_module);
This page took 0.595546 seconds and 5 git commands to generate.