Merge ../bleed-2.6
[deliverable/linux.git] / drivers / net / au1000_eth.c
CommitLineData
1da177e4
LT
1/*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
5 * Copyright 2001,2002,2003 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Author: MontaVista Software, Inc.
13 * ppopov@mvista.com or source@mvista.com
14 *
15 * ########################################################################
16 *
17 * This program is free software; you can distribute it and/or modify it
18 * under the terms of the GNU General Public License (Version 2) as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 * for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
29 *
30 * ########################################################################
31 *
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/string.h>
39#include <linux/timer.h>
40#include <linux/errno.h>
41#include <linux/in.h>
42#include <linux/ioport.h>
43#include <linux/bitops.h>
44#include <linux/slab.h>
45#include <linux/interrupt.h>
46#include <linux/pci.h>
47#include <linux/init.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/ethtool.h>
51#include <linux/mii.h>
52#include <linux/skbuff.h>
53#include <linux/delay.h>
54#include <asm/mipsregs.h>
55#include <asm/irq.h>
56#include <asm/io.h>
57#include <asm/processor.h>
58
59#include <asm/mach-au1x00/au1000.h>
60#include <asm/cpu.h>
61#include "au1000_eth.h"
62
63#ifdef AU1000_ETH_DEBUG
64static int au1000_debug = 5;
65#else
66static int au1000_debug = 3;
67#endif
68
69#define DRV_NAME "au1000eth"
70#define DRV_VERSION "1.5"
71#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
72#define DRV_DESC "Au1xxx on-chip Ethernet driver"
73
74MODULE_AUTHOR(DRV_AUTHOR);
75MODULE_DESCRIPTION(DRV_DESC);
76MODULE_LICENSE("GPL");
77
78// prototypes
79static void hard_stop(struct net_device *);
80static void enable_rx_tx(struct net_device *dev);
81static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
82static int au1000_init(struct net_device *);
83static int au1000_open(struct net_device *);
84static int au1000_close(struct net_device *);
85static int au1000_tx(struct sk_buff *, struct net_device *);
86static int au1000_rx(struct net_device *);
87static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
88static void au1000_tx_timeout(struct net_device *);
89static int au1000_set_config(struct net_device *dev, struct ifmap *map);
90static void set_rx_mode(struct net_device *);
91static struct net_device_stats *au1000_get_stats(struct net_device *);
92static inline void update_tx_stats(struct net_device *, u32, u32);
93static inline void update_rx_stats(struct net_device *, u32);
94static void au1000_timer(unsigned long);
95static int au1000_ioctl(struct net_device *, struct ifreq *, int);
96static int mdio_read(struct net_device *, int, int);
97static void mdio_write(struct net_device *, int, int, u16);
98static void dump_mii(struct net_device *dev, int phy_id);
99
100// externs
101extern void ack_rise_edge_irq(unsigned int);
102extern int get_ethernet_addr(char *ethernet_addr);
103extern void str2eaddr(unsigned char *ea, unsigned char *str);
104extern char * __init prom_getcmdline(void);
105
106/*
107 * Theory of operation
108 *
109 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
110 * There are four receive and four transmit descriptors. These
111 * descriptors are not in memory; rather, they are just a set of
112 * hardware registers.
113 *
114 * Since the Au1000 has a coherent data cache, the receive and
115 * transmit buffers are allocated from the KSEG0 segment. The
116 * hardware registers, however, are still mapped at KSEG1 to
117 * make sure there's no out-of-order writes, and that all writes
118 * complete immediately.
119 */
120
121/* These addresses are only used if yamon doesn't tell us what
122 * the mac address is, and the mac address is not passed on the
123 * command line.
124 */
125static unsigned char au1000_mac_addr[6] __devinitdata = {
126 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
127};
128
129#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
130#define RUN_AT(x) (jiffies + (x))
131
132// For reading/writing 32-bit words from/to DMA memory
133#define cpu_to_dma32 cpu_to_be32
134#define dma32_to_cpu be32_to_cpu
135
136struct au1000_private *au_macs[NUM_ETH_INTERFACES];
137
138/* FIXME
139 * All of the PHY code really should be detached from the MAC
140 * code.
141 */
142
143/* Default advertise */
144#define GENMII_DEFAULT_ADVERTISE \
145 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
146 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
147 ADVERTISED_Autoneg
148
149#define GENMII_DEFAULT_FEATURES \
150 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
151 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
152 SUPPORTED_Autoneg
153
1da177e4
LT
154int bcm_5201_init(struct net_device *dev, int phy_addr)
155{
156 s16 data;
157
158 /* Stop auto-negotiation */
159 data = mdio_read(dev, phy_addr, MII_CONTROL);
160 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
161
162 /* Set advertisement to 10/100 and Half/Full duplex
163 * (full capabilities) */
164 data = mdio_read(dev, phy_addr, MII_ANADV);
165 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
166 mdio_write(dev, phy_addr, MII_ANADV, data);
167
168 /* Restart auto-negotiation */
169 data = mdio_read(dev, phy_addr, MII_CONTROL);
170 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
171 mdio_write(dev, phy_addr, MII_CONTROL, data);
172
173 if (au1000_debug > 4)
174 dump_mii(dev, phy_addr);
175 return 0;
176}
177
178int bcm_5201_reset(struct net_device *dev, int phy_addr)
179{
180 s16 mii_control, timeout;
181
182 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
183 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
184 mdelay(1);
185 for (timeout = 100; timeout > 0; --timeout) {
186 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
187 if ((mii_control & MII_CNTL_RESET) == 0)
188 break;
189 mdelay(1);
190 }
191 if (mii_control & MII_CNTL_RESET) {
192 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
193 return -1;
194 }
195 return 0;
196}
197
198int
199bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
200{
201 u16 mii_data;
202 struct au1000_private *aup;
203
204 if (!dev) {
205 printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
206 return -1;
207 }
208 aup = (struct au1000_private *) dev->priv;
209
210 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
211 if (mii_data & MII_STAT_LINK) {
212 *link = 1;
213 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
214 if (mii_data & MII_AUX_100) {
215 if (mii_data & MII_AUX_FDX) {
216 *speed = IF_PORT_100BASEFX;
217 dev->if_port = IF_PORT_100BASEFX;
218 }
219 else {
220 *speed = IF_PORT_100BASETX;
221 dev->if_port = IF_PORT_100BASETX;
222 }
223 }
224 else {
225 *speed = IF_PORT_10BASET;
226 dev->if_port = IF_PORT_10BASET;
227 }
228
229 }
230 else {
231 *link = 0;
232 *speed = 0;
233 dev->if_port = IF_PORT_UNKNOWN;
234 }
235 return 0;
236}
237
238int lsi_80227_init(struct net_device *dev, int phy_addr)
239{
240 if (au1000_debug > 4)
241 printk("lsi_80227_init\n");
242
243 /* restart auto-negotiation */
244 mdio_write(dev, phy_addr, MII_CONTROL,
245 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
246 mdelay(1);
247
248 /* set up LEDs to correct display */
249#ifdef CONFIG_MIPS_MTX1
250 mdio_write(dev, phy_addr, 17, 0xff80);
251#else
252 mdio_write(dev, phy_addr, 17, 0xffc0);
253#endif
254
255 if (au1000_debug > 4)
256 dump_mii(dev, phy_addr);
257 return 0;
258}
259
260int lsi_80227_reset(struct net_device *dev, int phy_addr)
261{
262 s16 mii_control, timeout;
263
264 if (au1000_debug > 4) {
265 printk("lsi_80227_reset\n");
266 dump_mii(dev, phy_addr);
267 }
268
269 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
270 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
271 mdelay(1);
272 for (timeout = 100; timeout > 0; --timeout) {
273 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
274 if ((mii_control & MII_CNTL_RESET) == 0)
275 break;
276 mdelay(1);
277 }
278 if (mii_control & MII_CNTL_RESET) {
279 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
280 return -1;
281 }
282 return 0;
283}
284
285int
286lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
287{
288 u16 mii_data;
289 struct au1000_private *aup;
290
291 if (!dev) {
292 printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
293 return -1;
294 }
295 aup = (struct au1000_private *) dev->priv;
296
297 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
298 if (mii_data & MII_STAT_LINK) {
299 *link = 1;
300 mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
301 if (mii_data & MII_LSI_PHY_STAT_SPD) {
302 if (mii_data & MII_LSI_PHY_STAT_FDX) {
303 *speed = IF_PORT_100BASEFX;
304 dev->if_port = IF_PORT_100BASEFX;
305 }
306 else {
307 *speed = IF_PORT_100BASETX;
308 dev->if_port = IF_PORT_100BASETX;
309 }
310 }
311 else {
312 *speed = IF_PORT_10BASET;
313 dev->if_port = IF_PORT_10BASET;
314 }
315
316 }
317 else {
318 *link = 0;
319 *speed = 0;
320 dev->if_port = IF_PORT_UNKNOWN;
321 }
322 return 0;
323}
324
325int am79c901_init(struct net_device *dev, int phy_addr)
326{
327 printk("am79c901_init\n");
328 return 0;
329}
330
331int am79c901_reset(struct net_device *dev, int phy_addr)
332{
333 printk("am79c901_reset\n");
334 return 0;
335}
336
337int
338am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
339{
340 return 0;
341}
342
343int am79c874_init(struct net_device *dev, int phy_addr)
344{
345 s16 data;
346
347 /* 79c874 has quit resembled bit assignments to BCM5201 */
348 if (au1000_debug > 4)
349 printk("am79c847_init\n");
350
351 /* Stop auto-negotiation */
352 data = mdio_read(dev, phy_addr, MII_CONTROL);
353 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
354
355 /* Set advertisement to 10/100 and Half/Full duplex
356 * (full capabilities) */
357 data = mdio_read(dev, phy_addr, MII_ANADV);
358 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
359 mdio_write(dev, phy_addr, MII_ANADV, data);
360
361 /* Restart auto-negotiation */
362 data = mdio_read(dev, phy_addr, MII_CONTROL);
363 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
364
365 mdio_write(dev, phy_addr, MII_CONTROL, data);
366
367 if (au1000_debug > 4) dump_mii(dev, phy_addr);
368 return 0;
369}
370
371int am79c874_reset(struct net_device *dev, int phy_addr)
372{
373 s16 mii_control, timeout;
374
375 if (au1000_debug > 4)
376 printk("am79c874_reset\n");
377
378 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
379 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
380 mdelay(1);
381 for (timeout = 100; timeout > 0; --timeout) {
382 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
383 if ((mii_control & MII_CNTL_RESET) == 0)
384 break;
385 mdelay(1);
386 }
387 if (mii_control & MII_CNTL_RESET) {
388 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
389 return -1;
390 }
391 return 0;
392}
393
394int
395am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
396{
397 u16 mii_data;
398 struct au1000_private *aup;
399
400 // printk("am79c874_status\n");
401 if (!dev) {
402 printk(KERN_ERR "am79c874_status error: NULL dev\n");
403 return -1;
404 }
405
406 aup = (struct au1000_private *) dev->priv;
407 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
408
409 if (mii_data & MII_STAT_LINK) {
410 *link = 1;
411 mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
412 if (mii_data & MII_AMD_PHY_STAT_SPD) {
413 if (mii_data & MII_AMD_PHY_STAT_FDX) {
414 *speed = IF_PORT_100BASEFX;
415 dev->if_port = IF_PORT_100BASEFX;
416 }
417 else {
418 *speed = IF_PORT_100BASETX;
419 dev->if_port = IF_PORT_100BASETX;
420 }
421 }
422 else {
423 *speed = IF_PORT_10BASET;
424 dev->if_port = IF_PORT_10BASET;
425 }
426
427 }
428 else {
429 *link = 0;
430 *speed = 0;
431 dev->if_port = IF_PORT_UNKNOWN;
432 }
433 return 0;
434}
435
436int lxt971a_init(struct net_device *dev, int phy_addr)
437{
438 if (au1000_debug > 4)
439 printk("lxt971a_init\n");
440
441 /* restart auto-negotiation */
442 mdio_write(dev, phy_addr, MII_CONTROL,
443 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
444
445 /* set up LEDs to correct display */
446 mdio_write(dev, phy_addr, 20, 0x0422);
447
448 if (au1000_debug > 4)
449 dump_mii(dev, phy_addr);
450 return 0;
451}
452
453int lxt971a_reset(struct net_device *dev, int phy_addr)
454{
455 s16 mii_control, timeout;
456
457 if (au1000_debug > 4) {
458 printk("lxt971a_reset\n");
459 dump_mii(dev, phy_addr);
460 }
461
462 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
463 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
464 mdelay(1);
465 for (timeout = 100; timeout > 0; --timeout) {
466 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
467 if ((mii_control & MII_CNTL_RESET) == 0)
468 break;
469 mdelay(1);
470 }
471 if (mii_control & MII_CNTL_RESET) {
472 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
473 return -1;
474 }
475 return 0;
476}
477
478int
479lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
480{
481 u16 mii_data;
482 struct au1000_private *aup;
483
484 if (!dev) {
485 printk(KERN_ERR "lxt971a_status error: NULL dev\n");
486 return -1;
487 }
488 aup = (struct au1000_private *) dev->priv;
489
490 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
491 if (mii_data & MII_STAT_LINK) {
492 *link = 1;
493 mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
494 if (mii_data & MII_INTEL_PHY_STAT_SPD) {
495 if (mii_data & MII_INTEL_PHY_STAT_FDX) {
496 *speed = IF_PORT_100BASEFX;
497 dev->if_port = IF_PORT_100BASEFX;
498 }
499 else {
500 *speed = IF_PORT_100BASETX;
501 dev->if_port = IF_PORT_100BASETX;
502 }
503 }
504 else {
505 *speed = IF_PORT_10BASET;
506 dev->if_port = IF_PORT_10BASET;
507 }
508
509 }
510 else {
511 *link = 0;
512 *speed = 0;
513 dev->if_port = IF_PORT_UNKNOWN;
514 }
515 return 0;
516}
517
518int ks8995m_init(struct net_device *dev, int phy_addr)
519{
520 s16 data;
521
522// printk("ks8995m_init\n");
523 /* Stop auto-negotiation */
524 data = mdio_read(dev, phy_addr, MII_CONTROL);
525 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
526
527 /* Set advertisement to 10/100 and Half/Full duplex
528 * (full capabilities) */
529 data = mdio_read(dev, phy_addr, MII_ANADV);
530 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
531 mdio_write(dev, phy_addr, MII_ANADV, data);
532
533 /* Restart auto-negotiation */
534 data = mdio_read(dev, phy_addr, MII_CONTROL);
535 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
536 mdio_write(dev, phy_addr, MII_CONTROL, data);
537
538 if (au1000_debug > 4) dump_mii(dev, phy_addr);
539
540 return 0;
541}
542
543int ks8995m_reset(struct net_device *dev, int phy_addr)
544{
545 s16 mii_control, timeout;
546
547// printk("ks8995m_reset\n");
548 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
549 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
550 mdelay(1);
551 for (timeout = 100; timeout > 0; --timeout) {
552 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
553 if ((mii_control & MII_CNTL_RESET) == 0)
554 break;
555 mdelay(1);
556 }
557 if (mii_control & MII_CNTL_RESET) {
558 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
559 return -1;
560 }
561 return 0;
562}
563
564int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
565{
566 u16 mii_data;
567 struct au1000_private *aup;
568
569 if (!dev) {
570 printk(KERN_ERR "ks8995m_status error: NULL dev\n");
571 return -1;
572 }
573 aup = (struct au1000_private *) dev->priv;
574
575 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
576 if (mii_data & MII_STAT_LINK) {
577 *link = 1;
578 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
579 if (mii_data & MII_AUX_100) {
580 if (mii_data & MII_AUX_FDX) {
581 *speed = IF_PORT_100BASEFX;
582 dev->if_port = IF_PORT_100BASEFX;
583 }
584 else {
585 *speed = IF_PORT_100BASETX;
586 dev->if_port = IF_PORT_100BASETX;
587 }
588 }
589 else {
590 *speed = IF_PORT_10BASET;
591 dev->if_port = IF_PORT_10BASET;
592 }
593
594 }
595 else {
596 *link = 0;
597 *speed = 0;
598 dev->if_port = IF_PORT_UNKNOWN;
599 }
600 return 0;
601}
602
603int
604smsc_83C185_init (struct net_device *dev, int phy_addr)
605{
606 s16 data;
607
608 if (au1000_debug > 4)
609 printk("smsc_83C185_init\n");
610
611 /* Stop auto-negotiation */
612 data = mdio_read(dev, phy_addr, MII_CONTROL);
613 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
614
615 /* Set advertisement to 10/100 and Half/Full duplex
616 * (full capabilities) */
617 data = mdio_read(dev, phy_addr, MII_ANADV);
618 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
619 mdio_write(dev, phy_addr, MII_ANADV, data);
620
621 /* Restart auto-negotiation */
622 data = mdio_read(dev, phy_addr, MII_CONTROL);
623 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
624
625 mdio_write(dev, phy_addr, MII_CONTROL, data);
626
627 if (au1000_debug > 4) dump_mii(dev, phy_addr);
628 return 0;
629}
630
631int
632smsc_83C185_reset (struct net_device *dev, int phy_addr)
633{
634 s16 mii_control, timeout;
635
636 if (au1000_debug > 4)
637 printk("smsc_83C185_reset\n");
638
639 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
640 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
641 mdelay(1);
642 for (timeout = 100; timeout > 0; --timeout) {
643 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
644 if ((mii_control & MII_CNTL_RESET) == 0)
645 break;
646 mdelay(1);
647 }
648 if (mii_control & MII_CNTL_RESET) {
649 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
650 return -1;
651 }
652 return 0;
653}
654
655int
656smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
657{
658 u16 mii_data;
659 struct au1000_private *aup;
660
661 if (!dev) {
662 printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
663 return -1;
664 }
665
666 aup = (struct au1000_private *) dev->priv;
667 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
668
669 if (mii_data & MII_STAT_LINK) {
670 *link = 1;
671 mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
672 if (mii_data & (1<<3)) {
673 if (mii_data & (1<<4)) {
674 *speed = IF_PORT_100BASEFX;
675 dev->if_port = IF_PORT_100BASEFX;
676 }
677 else {
678 *speed = IF_PORT_100BASETX;
679 dev->if_port = IF_PORT_100BASETX;
680 }
681 }
682 else {
683 *speed = IF_PORT_10BASET;
684 dev->if_port = IF_PORT_10BASET;
685 }
686 }
687 else {
688 *link = 0;
689 *speed = 0;
690 dev->if_port = IF_PORT_UNKNOWN;
691 }
692 return 0;
693}
694
695
696#ifdef CONFIG_MIPS_BOSPORUS
697int stub_init(struct net_device *dev, int phy_addr)
698{
699 //printk("PHY stub_init\n");
700 return 0;
701}
702
703int stub_reset(struct net_device *dev, int phy_addr)
704{
705 //printk("PHY stub_reset\n");
706 return 0;
707}
708
709int
710stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
711{
712 //printk("PHY stub_status\n");
713 *link = 1;
714 /* hmmm, revisit */
715 *speed = IF_PORT_100BASEFX;
716 dev->if_port = IF_PORT_100BASEFX;
717 return 0;
718}
719#endif
720
721struct phy_ops bcm_5201_ops = {
722 bcm_5201_init,
723 bcm_5201_reset,
724 bcm_5201_status,
725};
726
727struct phy_ops am79c874_ops = {
728 am79c874_init,
729 am79c874_reset,
730 am79c874_status,
731};
732
733struct phy_ops am79c901_ops = {
734 am79c901_init,
735 am79c901_reset,
736 am79c901_status,
737};
738
739struct phy_ops lsi_80227_ops = {
740 lsi_80227_init,
741 lsi_80227_reset,
742 lsi_80227_status,
743};
744
745struct phy_ops lxt971a_ops = {
746 lxt971a_init,
747 lxt971a_reset,
748 lxt971a_status,
749};
750
751struct phy_ops ks8995m_ops = {
752 ks8995m_init,
753 ks8995m_reset,
754 ks8995m_status,
755};
756
757struct phy_ops smsc_83C185_ops = {
758 smsc_83C185_init,
759 smsc_83C185_reset,
760 smsc_83C185_status,
761};
762
763#ifdef CONFIG_MIPS_BOSPORUS
764struct phy_ops stub_ops = {
765 stub_init,
766 stub_reset,
767 stub_status,
768};
769#endif
770
771static struct mii_chip_info {
772 const char * name;
773 u16 phy_id0;
774 u16 phy_id1;
775 struct phy_ops *phy_ops;
776 int dual_phy;
777} mii_chip_table[] = {
778 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
779 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
780 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
7f553e3d 781 {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
1da177e4
LT
782 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
783 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
784 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
785 {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
786 {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
787 {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
788#ifdef CONFIG_MIPS_BOSPORUS
789 {"Stub", 0x1234, 0x5678, &stub_ops },
790#endif
791 {0,},
792};
793
794static int mdio_read(struct net_device *dev, int phy_id, int reg)
795{
796 struct au1000_private *aup = (struct au1000_private *) dev->priv;
797 volatile u32 *mii_control_reg;
798 volatile u32 *mii_data_reg;
799 u32 timedout = 20;
800 u32 mii_control;
801
802 #ifdef CONFIG_BCM5222_DUAL_PHY
803 /* First time we probe, it's for the mac0 phy.
804 * Since we haven't determined yet that we have a dual phy,
805 * aup->mii->mii_control_reg won't be setup and we'll
806 * default to the else statement.
807 * By the time we probe for the mac1 phy, the mii_control_reg
808 * will be setup to be the address of the mac0 phy control since
809 * both phys are controlled through mac0.
810 */
811 if (aup->mii && aup->mii->mii_control_reg) {
812 mii_control_reg = aup->mii->mii_control_reg;
813 mii_data_reg = aup->mii->mii_data_reg;
814 }
815 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
816 /* assume both phys are controlled through mac0 */
817 mii_control_reg = au_macs[0]->mii->mii_control_reg;
818 mii_data_reg = au_macs[0]->mii->mii_data_reg;
819 }
820 else
821 #endif
822 {
823 /* default control and data reg addresses */
824 mii_control_reg = &aup->mac->mii_control;
825 mii_data_reg = &aup->mac->mii_data;
826 }
827
828 while (*mii_control_reg & MAC_MII_BUSY) {
829 mdelay(1);
830 if (--timedout == 0) {
831 printk(KERN_ERR "%s: read_MII busy timeout!!\n",
832 dev->name);
833 return -1;
834 }
835 }
836
837 mii_control = MAC_SET_MII_SELECT_REG(reg) |
838 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
839
840 *mii_control_reg = mii_control;
841
842 timedout = 20;
843 while (*mii_control_reg & MAC_MII_BUSY) {
844 mdelay(1);
845 if (--timedout == 0) {
846 printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
847 dev->name);
848 return -1;
849 }
850 }
851 return (int)*mii_data_reg;
852}
853
854static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
855{
856 struct au1000_private *aup = (struct au1000_private *) dev->priv;
857 volatile u32 *mii_control_reg;
858 volatile u32 *mii_data_reg;
859 u32 timedout = 20;
860 u32 mii_control;
861
862 #ifdef CONFIG_BCM5222_DUAL_PHY
863 if (aup->mii && aup->mii->mii_control_reg) {
864 mii_control_reg = aup->mii->mii_control_reg;
865 mii_data_reg = aup->mii->mii_data_reg;
866 }
867 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
868 /* assume both phys are controlled through mac0 */
869 mii_control_reg = au_macs[0]->mii->mii_control_reg;
870 mii_data_reg = au_macs[0]->mii->mii_data_reg;
871 }
872 else
873 #endif
874 {
875 /* default control and data reg addresses */
876 mii_control_reg = &aup->mac->mii_control;
877 mii_data_reg = &aup->mac->mii_data;
878 }
879
880 while (*mii_control_reg & MAC_MII_BUSY) {
881 mdelay(1);
882 if (--timedout == 0) {
883 printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
884 dev->name);
885 return;
886 }
887 }
888
889 mii_control = MAC_SET_MII_SELECT_REG(reg) |
890 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
891
892 *mii_data_reg = value;
893 *mii_control_reg = mii_control;
894}
895
896
897static void dump_mii(struct net_device *dev, int phy_id)
898{
899 int i, val;
900
901 for (i = 0; i < 7; i++) {
902 if ((val = mdio_read(dev, phy_id, i)) >= 0)
903 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
904 }
905 for (i = 16; i < 25; i++) {
906 if ((val = mdio_read(dev, phy_id, i)) >= 0)
907 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
908 }
909}
910
911static int mii_probe (struct net_device * dev)
912{
913 struct au1000_private *aup = (struct au1000_private *) dev->priv;
914 int phy_addr;
915#ifdef CONFIG_MIPS_BOSPORUS
916 int phy_found=0;
917#endif
918
919 /* search for total of 32 possible mii phy addresses */
920 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
921 u16 mii_status;
922 u16 phy_id0, phy_id1;
923 int i;
924
925 #ifdef CONFIG_BCM5222_DUAL_PHY
926 /* Mask the already found phy, try next one */
927 if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
928 if (au_macs[0]->phy_addr == phy_addr)
929 continue;
930 }
931 #endif
932
933 mii_status = mdio_read(dev, phy_addr, MII_STATUS);
934 if (mii_status == 0xffff || mii_status == 0x0000)
935 /* the mii is not accessable, try next one */
936 continue;
937
938 phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
939 phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
940
941 /* search our mii table for the current mii */
942 for (i = 0; mii_chip_table[i].phy_id1; i++) {
943 if (phy_id0 == mii_chip_table[i].phy_id0 &&
944 phy_id1 == mii_chip_table[i].phy_id1) {
945 struct mii_phy * mii_phy = aup->mii;
946
947 printk(KERN_INFO "%s: %s at phy address %d\n",
948 dev->name, mii_chip_table[i].name,
949 phy_addr);
950#ifdef CONFIG_MIPS_BOSPORUS
951 phy_found = 1;
952#endif
953 mii_phy->chip_info = mii_chip_table+i;
954 aup->phy_addr = phy_addr;
955 aup->want_autoneg = 1;
956 aup->phy_ops = mii_chip_table[i].phy_ops;
957 aup->phy_ops->phy_init(dev,phy_addr);
958
959 // Check for dual-phy and then store required
960 // values and set indicators. We need to do
961 // this now since mdio_{read,write} need the
962 // control and data register addresses.
963 #ifdef CONFIG_BCM5222_DUAL_PHY
964 if ( mii_chip_table[i].dual_phy) {
965
966 /* assume both phys are controlled
967 * through MAC0. Board specific? */
968
969 /* sanity check */
970 if (!au_macs[0] || !au_macs[0]->mii)
971 return -1;
972 aup->mii->mii_control_reg = (u32 *)
973 &au_macs[0]->mac->mii_control;
974 aup->mii->mii_data_reg = (u32 *)
975 &au_macs[0]->mac->mii_data;
976 }
977 #endif
978 goto found;
979 }
980 }
981 }
982found:
983
984#ifdef CONFIG_MIPS_BOSPORUS
985 /* This is a workaround for the Micrel/Kendin 5 port switch
986 The second MAC doesn't see a PHY connected... so we need to
987 trick it into thinking we have one.
988
989 If this kernel is run on another Au1500 development board
990 the stub will be found as well as the actual PHY. However,
991 the last found PHY will be used... usually at Addr 31 (Db1500).
992 */
993 if ( (!phy_found) )
994 {
995 u16 phy_id0, phy_id1;
996 int i;
997
998 phy_id0 = 0x1234;
999 phy_id1 = 0x5678;
1000
1001 /* search our mii table for the current mii */
1002 for (i = 0; mii_chip_table[i].phy_id1; i++) {
1003 if (phy_id0 == mii_chip_table[i].phy_id0 &&
1004 phy_id1 == mii_chip_table[i].phy_id1) {
1005 struct mii_phy * mii_phy;
1006
1007 printk(KERN_INFO "%s: %s at phy address %d\n",
1008 dev->name, mii_chip_table[i].name,
1009 phy_addr);
1010 mii_phy = kmalloc(sizeof(struct mii_phy),
1011 GFP_KERNEL);
1012 if (mii_phy) {
1013 mii_phy->chip_info = mii_chip_table+i;
1014 aup->phy_addr = phy_addr;
1015 mii_phy->next = aup->mii;
1016 aup->phy_ops =
1017 mii_chip_table[i].phy_ops;
1018 aup->mii = mii_phy;
1019 aup->phy_ops->phy_init(dev,phy_addr);
1020 } else {
1021 printk(KERN_ERR "%s: out of memory\n",
1022 dev->name);
1023 return -1;
1024 }
1025 mii_phy->chip_info = mii_chip_table+i;
1026 aup->phy_addr = phy_addr;
1027 aup->phy_ops = mii_chip_table[i].phy_ops;
1028 aup->phy_ops->phy_init(dev,phy_addr);
1029 break;
1030 }
1031 }
1032 }
1033 if (aup->mac_id == 0) {
1034 /* the Bosporus phy responds to addresses 0-5 but
1035 * 5 is the correct one.
1036 */
1037 aup->phy_addr = 5;
1038 }
1039#endif
1040
1041 if (aup->mii->chip_info == NULL) {
7f553e3d 1042 printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
1da177e4
LT
1043 dev->name);
1044 return -1;
1045 }
1046
1047 printk(KERN_INFO "%s: Using %s as default\n",
1048 dev->name, aup->mii->chip_info->name);
1049
1050 return 0;
1051}
1052
1053
1054/*
1055 * Buffer allocation/deallocation routines. The buffer descriptor returned
1056 * has the virtual and dma address of a buffer suitable for
1057 * both, receive and transmit operations.
1058 */
1059static db_dest_t *GetFreeDB(struct au1000_private *aup)
1060{
1061 db_dest_t *pDB;
1062 pDB = aup->pDBfree;
1063
1064 if (pDB) {
1065 aup->pDBfree = pDB->pnext;
1066 }
1067 return pDB;
1068}
1069
1070void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
1071{
1072 db_dest_t *pDBfree = aup->pDBfree;
1073 if (pDBfree)
1074 pDBfree->pnext = pDB;
1075 aup->pDBfree = pDB;
1076}
1077
1078static void enable_rx_tx(struct net_device *dev)
1079{
1080 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1081
1082 if (au1000_debug > 4)
1083 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
1084
1085 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
1086 au_sync_delay(10);
1087}
1088
1089static void hard_stop(struct net_device *dev)
1090{
1091 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1092
1093 if (au1000_debug > 4)
1094 printk(KERN_INFO "%s: hard stop\n", dev->name);
1095
1096 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
1097 au_sync_delay(10);
1098}
1099
1100
1101static void reset_mac(struct net_device *dev)
1102{
1103 int i;
1104 u32 flags;
1105 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1106
1107 if (au1000_debug > 4)
1108 printk(KERN_INFO "%s: reset mac, aup %x\n",
1109 dev->name, (unsigned)aup);
1110
1111 spin_lock_irqsave(&aup->lock, flags);
1112 if (aup->timer.function == &au1000_timer) {/* check if timer initted */
1113 del_timer(&aup->timer);
1114 }
1115
1116 hard_stop(dev);
1117 #ifdef CONFIG_BCM5222_DUAL_PHY
1118 if (aup->mac_id != 0) {
1119 #endif
1120 /* If BCM5222, we can't leave MAC0 in reset because then
1121 * we can't access the dual phy for ETH1 */
1122 *aup->enable = MAC_EN_CLOCK_ENABLE;
1123 au_sync_delay(2);
1124 *aup->enable = 0;
1125 au_sync_delay(2);
1126 #ifdef CONFIG_BCM5222_DUAL_PHY
1127 }
1128 #endif
1129 aup->tx_full = 0;
1130 for (i = 0; i < NUM_RX_DMA; i++) {
1131 /* reset control bits */
1132 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
1133 }
1134 for (i = 0; i < NUM_TX_DMA; i++) {
1135 /* reset control bits */
1136 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
1137 }
1138 spin_unlock_irqrestore(&aup->lock, flags);
1139}
1140
1141
1142/*
1143 * Setup the receive and transmit "rings". These pointers are the addresses
1144 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
1145 * these are not descriptors sitting in memory.
1146 */
1147static void
1148setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
1149{
1150 int i;
1151
1152 for (i = 0; i < NUM_RX_DMA; i++) {
1153 aup->rx_dma_ring[i] =
1154 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
1155 }
1156 for (i = 0; i < NUM_TX_DMA; i++) {
1157 aup->tx_dma_ring[i] =
1158 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
1159 }
1160}
1161
1162static struct {
1163 int port;
1164 u32 base_addr;
1165 u32 macen_addr;
1166 int irq;
1167 struct net_device *dev;
1168} iflist[2];
1169
1170static int num_ifs;
1171
1172/*
1173 * Setup the base address and interupt of the Au1xxx ethernet macs
1174 * based on cpu type and whether the interface is enabled in sys_pinfunc
1175 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1176 */
1177static int __init au1000_init_module(void)
1178{
1179 struct cpuinfo_mips *c = &current_cpu_data;
1180 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1181 struct net_device *dev;
1182 int i, found_one = 0;
1183
1184 switch (c->cputype) {
1185#ifdef CONFIG_SOC_AU1000
1186 case CPU_AU1000:
1187 num_ifs = 2 - ni;
1188 iflist[0].base_addr = AU1000_ETH0_BASE;
1189 iflist[1].base_addr = AU1000_ETH1_BASE;
1190 iflist[0].macen_addr = AU1000_MAC0_ENABLE;
1191 iflist[1].macen_addr = AU1000_MAC1_ENABLE;
1192 iflist[0].irq = AU1000_MAC0_DMA_INT;
1193 iflist[1].irq = AU1000_MAC1_DMA_INT;
1194 break;
1195#endif
1196#ifdef CONFIG_SOC_AU1100
1197 case CPU_AU1100:
1198 num_ifs = 1 - ni;
1199 iflist[0].base_addr = AU1100_ETH0_BASE;
1200 iflist[0].macen_addr = AU1100_MAC0_ENABLE;
1201 iflist[0].irq = AU1100_MAC0_DMA_INT;
1202 break;
1203#endif
1204#ifdef CONFIG_SOC_AU1500
1205 case CPU_AU1500:
1206 num_ifs = 2 - ni;
1207 iflist[0].base_addr = AU1500_ETH0_BASE;
1208 iflist[1].base_addr = AU1500_ETH1_BASE;
1209 iflist[0].macen_addr = AU1500_MAC0_ENABLE;
1210 iflist[1].macen_addr = AU1500_MAC1_ENABLE;
1211 iflist[0].irq = AU1500_MAC0_DMA_INT;
1212 iflist[1].irq = AU1500_MAC1_DMA_INT;
1213 break;
1214#endif
1215#ifdef CONFIG_SOC_AU1550
1216 case CPU_AU1550:
1217 num_ifs = 2 - ni;
1218 iflist[0].base_addr = AU1550_ETH0_BASE;
1219 iflist[1].base_addr = AU1550_ETH1_BASE;
1220 iflist[0].macen_addr = AU1550_MAC0_ENABLE;
1221 iflist[1].macen_addr = AU1550_MAC1_ENABLE;
1222 iflist[0].irq = AU1550_MAC0_DMA_INT;
1223 iflist[1].irq = AU1550_MAC1_DMA_INT;
1224 break;
1225#endif
1226 default:
1227 num_ifs = 0;
1228 }
1229 for(i = 0; i < num_ifs; i++) {
1230 dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
1231 iflist[i].dev = dev;
1232 if (dev)
1233 found_one++;
1234 }
1235 if (!found_one)
1236 return -ENODEV;
1237 return 0;
1238}
1239
1240static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
1241{
1242 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1243 u16 ctl, adv;
1244
1245 /* Setup standard advertise */
1246 adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
1247 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1248 if (advertise & ADVERTISED_10baseT_Half)
1249 adv |= ADVERTISE_10HALF;
1250 if (advertise & ADVERTISED_10baseT_Full)
1251 adv |= ADVERTISE_10FULL;
1252 if (advertise & ADVERTISED_100baseT_Half)
1253 adv |= ADVERTISE_100HALF;
1254 if (advertise & ADVERTISED_100baseT_Full)
1255 adv |= ADVERTISE_100FULL;
1256 mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
1257
1258 /* Start/Restart aneg */
1259 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1260 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1261 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1262
1263 return 0;
1264}
1265
1266static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
1267{
1268 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1269 u16 ctl;
1270
1271 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1272 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
1273
1274 /* First reset the PHY */
1275 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
1276
1277 /* Select speed & duplex */
1278 switch (speed) {
1279 case SPEED_10:
1280 break;
1281 case SPEED_100:
1282 ctl |= BMCR_SPEED100;
1283 break;
1284 case SPEED_1000:
1285 default:
1286 return -EINVAL;
1287 }
1288 if (fd == DUPLEX_FULL)
1289 ctl |= BMCR_FULLDPLX;
1290 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1291
1292 return 0;
1293}
1294
1295
1296static void
1297au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
1298{
1299 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1300 u32 advertise;
1301 int autoneg;
1302 int forced_speed;
1303 int forced_duplex;
1304
1305 /* Default advertise */
1306 advertise = GENMII_DEFAULT_ADVERTISE;
1307 autoneg = aup->want_autoneg;
1308 forced_speed = SPEED_100;
1309 forced_duplex = DUPLEX_FULL;
1310
1311 /* Setup link parameters */
1312 if (cmd) {
1313 if (cmd->autoneg == AUTONEG_ENABLE) {
1314 advertise = cmd->advertising;
1315 autoneg = 1;
1316 } else {
1317 autoneg = 0;
1318
1319 forced_speed = cmd->speed;
1320 forced_duplex = cmd->duplex;
1321 }
1322 }
1323
1324 /* Configure PHY & start aneg */
1325 aup->want_autoneg = autoneg;
1326 if (autoneg)
1327 au1000_setup_aneg(dev, advertise);
1328 else
1329 au1000_setup_forced(dev, forced_speed, forced_duplex);
1330 mod_timer(&aup->timer, jiffies + HZ);
1331}
1332
1333static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1334{
1335 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1336 u16 link, speed;
1337
1338 cmd->supported = GENMII_DEFAULT_FEATURES;
1339 cmd->advertising = GENMII_DEFAULT_ADVERTISE;
1340 cmd->port = PORT_MII;
1341 cmd->transceiver = XCVR_EXTERNAL;
1342 cmd->phy_address = aup->phy_addr;
1343 spin_lock_irq(&aup->lock);
1344 cmd->autoneg = aup->want_autoneg;
1345 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1346 if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
1347 cmd->speed = SPEED_100;
1348 else if (speed == IF_PORT_10BASET)
1349 cmd->speed = SPEED_10;
1350 if (link && (dev->if_port == IF_PORT_100BASEFX))
1351 cmd->duplex = DUPLEX_FULL;
1352 else
1353 cmd->duplex = DUPLEX_HALF;
1354 spin_unlock_irq(&aup->lock);
1355 return 0;
1356}
1357
1358static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1359{
1360 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1361 unsigned long features = GENMII_DEFAULT_FEATURES;
1362
1363 if (!capable(CAP_NET_ADMIN))
1364 return -EPERM;
1365
1366 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1367 return -EINVAL;
1368 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1369 return -EINVAL;
1370 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1371 return -EINVAL;
1372 if (cmd->autoneg == AUTONEG_DISABLE)
1373 switch (cmd->speed) {
1374 case SPEED_10:
1375 if (cmd->duplex == DUPLEX_HALF &&
1376 (features & SUPPORTED_10baseT_Half) == 0)
1377 return -EINVAL;
1378 if (cmd->duplex == DUPLEX_FULL &&
1379 (features & SUPPORTED_10baseT_Full) == 0)
1380 return -EINVAL;
1381 break;
1382 case SPEED_100:
1383 if (cmd->duplex == DUPLEX_HALF &&
1384 (features & SUPPORTED_100baseT_Half) == 0)
1385 return -EINVAL;
1386 if (cmd->duplex == DUPLEX_FULL &&
1387 (features & SUPPORTED_100baseT_Full) == 0)
1388 return -EINVAL;
1389 break;
1390 default:
1391 return -EINVAL;
1392 }
1393 else if ((features & SUPPORTED_Autoneg) == 0)
1394 return -EINVAL;
1395
1396 spin_lock_irq(&aup->lock);
1397 au1000_start_link(dev, cmd);
1398 spin_unlock_irq(&aup->lock);
1399 return 0;
1400}
1401
1402static int au1000_nway_reset(struct net_device *dev)
1403{
1404 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1405
1406 if (!aup->want_autoneg)
1407 return -EINVAL;
1408 spin_lock_irq(&aup->lock);
1409 au1000_start_link(dev, NULL);
1410 spin_unlock_irq(&aup->lock);
1411 return 0;
1412}
1413
1414static void
1415au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1416{
1417 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1418
1419 strcpy(info->driver, DRV_NAME);
1420 strcpy(info->version, DRV_VERSION);
1421 info->fw_version[0] = '\0';
1422 sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
1423 info->regdump_len = 0;
1424}
1425
1426static u32 au1000_get_link(struct net_device *dev)
1427{
1428 return netif_carrier_ok(dev);
1429}
1430
1431static struct ethtool_ops au1000_ethtool_ops = {
1432 .get_settings = au1000_get_settings,
1433 .set_settings = au1000_set_settings,
1434 .get_drvinfo = au1000_get_drvinfo,
1435 .nway_reset = au1000_nway_reset,
1436 .get_link = au1000_get_link
1437};
1438
1439static struct net_device *
1440au1000_probe(u32 ioaddr, int irq, int port_num)
1441{
1442 static unsigned version_printed = 0;
1443 struct au1000_private *aup = NULL;
1444 struct net_device *dev = NULL;
1445 db_dest_t *pDB, *pDBfree;
1446 char *pmac, *argptr;
1447 char ethaddr[6];
1448 int i, err;
1449
1450 if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
1451 return NULL;
1452
1453 if (version_printed++ == 0)
1454 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1455
1456 dev = alloc_etherdev(sizeof(struct au1000_private));
1457 if (!dev) {
1458 printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
1459 return NULL;
1460 }
1461
1462 if ((err = register_netdev(dev))) {
1463 printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
1464 err);
1465 free_netdev(dev);
1466 return NULL;
1467 }
1468
1469 printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
1470 dev->name, ioaddr, irq);
1471
1472 aup = dev->priv;
1473
1474 /* Allocate the data buffers */
1475 /* Snooping works fine with eth on all au1xxx */
1476 aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
1477 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1478 &aup->dma_addr,
1479 0);
1480 if (!aup->vaddr) {
1481 free_netdev(dev);
1482 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1483 return NULL;
1484 }
1485
1486 /* aup->mac is the base address of the MAC's registers */
1487 aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
1488 /* Setup some variables for quick register address access */
1489 if (ioaddr == iflist[0].base_addr)
1490 {
1491 /* check env variables first */
1492 if (!get_ethernet_addr(ethaddr)) {
1493 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1494 } else {
1495 /* Check command line */
1496 argptr = prom_getcmdline();
1497 if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
1498 printk(KERN_INFO "%s: No mac address found\n",
1499 dev->name);
1500 /* use the hard coded mac addresses */
1501 } else {
1502 str2eaddr(ethaddr, pmac + strlen("ethaddr="));
1503 memcpy(au1000_mac_addr, ethaddr,
1504 sizeof(au1000_mac_addr));
1505 }
1506 }
1507 aup->enable = (volatile u32 *)
1508 ((unsigned long)iflist[0].macen_addr);
1509 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1510 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1511 aup->mac_id = 0;
1512 au_macs[0] = aup;
1513 }
1514 else
1515 if (ioaddr == iflist[1].base_addr)
1516 {
1517 aup->enable = (volatile u32 *)
1518 ((unsigned long)iflist[1].macen_addr);
1519 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1520 dev->dev_addr[4] += 0x10;
1521 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1522 aup->mac_id = 1;
1523 au_macs[1] = aup;
1524 }
1525 else
1526 {
1527 printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
1528 }
1529
1530 /* bring the device out of reset, otherwise probing the mii
1531 * will hang */
1532 *aup->enable = MAC_EN_CLOCK_ENABLE;
1533 au_sync_delay(2);
1534 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1535 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1536 au_sync_delay(2);
1537
1538 aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
1539 if (!aup->mii) {
1540 printk(KERN_ERR "%s: out of memory\n", dev->name);
1541 goto err_out;
1542 }
7f553e3d
RB
1543 aup->mii->next = NULL;
1544 aup->mii->chip_info = NULL;
1545 aup->mii->status = 0;
1da177e4
LT
1546 aup->mii->mii_control_reg = 0;
1547 aup->mii->mii_data_reg = 0;
1548
1549 if (mii_probe(dev) != 0) {
1550 goto err_out;
1551 }
1552
1553 pDBfree = NULL;
1554 /* setup the data buffer descriptors and attach a buffer to each one */
1555 pDB = aup->db;
1556 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1557 pDB->pnext = pDBfree;
1558 pDBfree = pDB;
1559 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1560 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1561 pDB++;
1562 }
1563 aup->pDBfree = pDBfree;
1564
1565 for (i = 0; i < NUM_RX_DMA; i++) {
1566 pDB = GetFreeDB(aup);
1567 if (!pDB) {
1568 goto err_out;
1569 }
1570 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1571 aup->rx_db_inuse[i] = pDB;
1572 }
1573 for (i = 0; i < NUM_TX_DMA; i++) {
1574 pDB = GetFreeDB(aup);
1575 if (!pDB) {
1576 goto err_out;
1577 }
1578 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1579 aup->tx_dma_ring[i]->len = 0;
1580 aup->tx_db_inuse[i] = pDB;
1581 }
1582
1583 spin_lock_init(&aup->lock);
1584 dev->base_addr = ioaddr;
1585 dev->irq = irq;
1586 dev->open = au1000_open;
1587 dev->hard_start_xmit = au1000_tx;
1588 dev->stop = au1000_close;
1589 dev->get_stats = au1000_get_stats;
1590 dev->set_multicast_list = &set_rx_mode;
1591 dev->do_ioctl = &au1000_ioctl;
1592 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1593 dev->set_config = &au1000_set_config;
1594 dev->tx_timeout = au1000_tx_timeout;
1595 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1596
1597 /*
1598 * The boot code uses the ethernet controller, so reset it to start
1599 * fresh. au1000_init() expects that the device is in reset state.
1600 */
1601 reset_mac(dev);
1602
1603 return dev;
1604
1605err_out:
1606 /* here we should have a valid dev plus aup-> register addresses
1607 * so we can reset the mac properly.*/
1608 reset_mac(dev);
1609 if (aup->mii)
1610 kfree(aup->mii);
1611 for (i = 0; i < NUM_RX_DMA; i++) {
1612 if (aup->rx_db_inuse[i])
1613 ReleaseDB(aup, aup->rx_db_inuse[i]);
1614 }
1615 for (i = 0; i < NUM_TX_DMA; i++) {
1616 if (aup->tx_db_inuse[i])
1617 ReleaseDB(aup, aup->tx_db_inuse[i]);
1618 }
1619 dma_free_noncoherent(NULL,
1620 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1621 (void *)aup->vaddr,
1622 aup->dma_addr);
1623 unregister_netdev(dev);
1624 free_netdev(dev);
1625 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1626 return NULL;
1627}
1628
1629/*
1630 * Initialize the interface.
1631 *
1632 * When the device powers up, the clocks are disabled and the
1633 * mac is in reset state. When the interface is closed, we
1634 * do the same -- reset the device and disable the clocks to
1635 * conserve power. Thus, whenever au1000_init() is called,
1636 * the device should already be in reset state.
1637 */
1638static int au1000_init(struct net_device *dev)
1639{
1640 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1641 u32 flags;
1642 int i;
1643 u32 control;
1644 u16 link, speed;
1645
1646 if (au1000_debug > 4)
1647 printk("%s: au1000_init\n", dev->name);
1648
1649 spin_lock_irqsave(&aup->lock, flags);
1650
1651 /* bring the device out of reset */
1652 *aup->enable = MAC_EN_CLOCK_ENABLE;
1653 au_sync_delay(2);
1654 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1655 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1656 au_sync_delay(20);
1657
1658 aup->mac->control = 0;
1659 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
1660 aup->tx_tail = aup->tx_head;
1661 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
1662
1663 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
1664 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
1665 dev->dev_addr[1]<<8 | dev->dev_addr[0];
1666
1667 for (i = 0; i < NUM_RX_DMA; i++) {
1668 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
1669 }
1670 au_sync();
1671
1672 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1673 control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
1674#ifndef CONFIG_CPU_LITTLE_ENDIAN
1675 control |= MAC_BIG_ENDIAN;
1676#endif
1677 if (link && (dev->if_port == IF_PORT_100BASEFX)) {
1678 control |= MAC_FULL_DUPLEX;
1679 }
1680
1da177e4
LT
1681 aup->mac->control = control;
1682 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1683 au_sync();
1684
1685 spin_unlock_irqrestore(&aup->lock, flags);
1686 return 0;
1687}
1688
1689static void au1000_timer(unsigned long data)
1690{
1691 struct net_device *dev = (struct net_device *)data;
1692 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1693 unsigned char if_port;
1694 u16 link, speed;
1695
1696 if (!dev) {
1697 /* fatal error, don't restart the timer */
1698 printk(KERN_ERR "au1000_timer error: NULL dev\n");
1699 return;
1700 }
1701
1702 if_port = dev->if_port;
1703 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1704 if (link) {
7d17c1d6 1705 if (!netif_carrier_ok(dev)) {
1da177e4 1706 netif_carrier_on(dev);
1da177e4
LT
1707 printk(KERN_INFO "%s: link up\n", dev->name);
1708 }
1709 }
1710 else {
7d17c1d6 1711 if (netif_carrier_ok(dev)) {
1da177e4 1712 netif_carrier_off(dev);
1da177e4
LT
1713 dev->if_port = 0;
1714 printk(KERN_INFO "%s: link down\n", dev->name);
1715 }
1716 }
1717 }
1718
1719 if (link && (dev->if_port != if_port) &&
1720 (dev->if_port != IF_PORT_UNKNOWN)) {
1721 hard_stop(dev);
1722 if (dev->if_port == IF_PORT_100BASEFX) {
1723 printk(KERN_INFO "%s: going to full duplex\n",
1724 dev->name);
1725 aup->mac->control |= MAC_FULL_DUPLEX;
1726 au_sync_delay(1);
1727 }
1728 else {
1729 aup->mac->control &= ~MAC_FULL_DUPLEX;
1730 au_sync_delay(1);
1731 }
1732 enable_rx_tx(dev);
1733 }
1734
1735 aup->timer.expires = RUN_AT((1*HZ));
1736 aup->timer.data = (unsigned long)dev;
1737 aup->timer.function = &au1000_timer; /* timer handler */
1738 add_timer(&aup->timer);
1739
1740}
1741
1742static int au1000_open(struct net_device *dev)
1743{
1744 int retval;
1745 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1746
1747 if (au1000_debug > 4)
1748 printk("%s: open: dev=%p\n", dev->name, dev);
1749
1750 if ((retval = au1000_init(dev))) {
1751 printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
1752 free_irq(dev->irq, dev);
1753 return retval;
1754 }
1755 netif_start_queue(dev);
1756
1757 if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
1758 dev->name, dev))) {
1759 printk(KERN_ERR "%s: unable to get IRQ %d\n",
1760 dev->name, dev->irq);
1761 return retval;
1762 }
1763
1764 init_timer(&aup->timer); /* used in ioctl() */
1765 aup->timer.expires = RUN_AT((3*HZ));
1766 aup->timer.data = (unsigned long)dev;
1767 aup->timer.function = &au1000_timer; /* timer handler */
1768 add_timer(&aup->timer);
1769
1770 if (au1000_debug > 4)
1771 printk("%s: open: Initialization done.\n", dev->name);
1772
1773 return 0;
1774}
1775
1776static int au1000_close(struct net_device *dev)
1777{
1778 u32 flags;
1779 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1780
1781 if (au1000_debug > 4)
1782 printk("%s: close: dev=%p\n", dev->name, dev);
1783
1784 reset_mac(dev);
1785
1786 spin_lock_irqsave(&aup->lock, flags);
1787
1788 /* stop the device */
1789 netif_stop_queue(dev);
1790
1791 /* disable the interrupt */
1792 free_irq(dev->irq, dev);
1793 spin_unlock_irqrestore(&aup->lock, flags);
1794
1795 return 0;
1796}
1797
1798static void __exit au1000_cleanup_module(void)
1799{
1800 int i, j;
1801 struct net_device *dev;
1802 struct au1000_private *aup;
1803
1804 for (i = 0; i < num_ifs; i++) {
1805 dev = iflist[i].dev;
1806 if (dev) {
1807 aup = (struct au1000_private *) dev->priv;
1808 unregister_netdev(dev);
1809 if (aup->mii)
1810 kfree(aup->mii);
1811 for (j = 0; j < NUM_RX_DMA; j++) {
1812 if (aup->rx_db_inuse[j])
1813 ReleaseDB(aup, aup->rx_db_inuse[j]);
1814 }
1815 for (j = 0; j < NUM_TX_DMA; j++) {
1816 if (aup->tx_db_inuse[j])
1817 ReleaseDB(aup, aup->tx_db_inuse[j]);
1818 }
1819 dma_free_noncoherent(NULL,
1820 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1821 (void *)aup->vaddr,
1822 aup->dma_addr);
1823 free_netdev(dev);
1824 release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
1825 }
1826 }
1827}
1828
1829
1830static inline void
1831update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
1832{
1833 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1834 struct net_device_stats *ps = &aup->stats;
1835
1836 ps->tx_packets++;
1837 ps->tx_bytes += pkt_len;
1838
1839 if (status & TX_FRAME_ABORTED) {
1840 if (dev->if_port == IF_PORT_100BASEFX) {
1841 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1842 /* any other tx errors are only valid
1843 * in half duplex mode */
1844 ps->tx_errors++;
1845 ps->tx_aborted_errors++;
1846 }
1847 }
1848 else {
1849 ps->tx_errors++;
1850 ps->tx_aborted_errors++;
1851 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1852 ps->tx_carrier_errors++;
1853 }
1854 }
1855}
1856
1857
1858/*
1859 * Called from the interrupt service routine to acknowledge
1860 * the TX DONE bits. This is a must if the irq is setup as
1861 * edge triggered.
1862 */
1863static void au1000_tx_ack(struct net_device *dev)
1864{
1865 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1866 volatile tx_dma_t *ptxd;
1867
1868 ptxd = aup->tx_dma_ring[aup->tx_tail];
1869
1870 while (ptxd->buff_stat & TX_T_DONE) {
1871 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
1872 ptxd->buff_stat &= ~TX_T_DONE;
1873 ptxd->len = 0;
1874 au_sync();
1875
1876 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1877 ptxd = aup->tx_dma_ring[aup->tx_tail];
1878
1879 if (aup->tx_full) {
1880 aup->tx_full = 0;
1881 netif_wake_queue(dev);
1882 }
1883 }
1884}
1885
1886
1887/*
1888 * Au1000 transmit routine.
1889 */
1890static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1891{
1892 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1893 volatile tx_dma_t *ptxd;
1894 u32 buff_stat;
1895 db_dest_t *pDB;
1896 int i;
1897
1898 if (au1000_debug > 5)
1899 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1900 dev->name, (unsigned)aup, skb->len,
1901 skb->data, aup->tx_head);
1902
1903 ptxd = aup->tx_dma_ring[aup->tx_head];
1904 buff_stat = ptxd->buff_stat;
1905 if (buff_stat & TX_DMA_ENABLE) {
1906 /* We've wrapped around and the transmitter is still busy */
1907 netif_stop_queue(dev);
1908 aup->tx_full = 1;
1909 return 1;
1910 }
1911 else if (buff_stat & TX_T_DONE) {
1912 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
1913 ptxd->len = 0;
1914 }
1915
1916 if (aup->tx_full) {
1917 aup->tx_full = 0;
1918 netif_wake_queue(dev);
1919 }
1920
1921 pDB = aup->tx_db_inuse[aup->tx_head];
1922 memcpy((void *)pDB->vaddr, skb->data, skb->len);
1923 if (skb->len < ETH_ZLEN) {
1924 for (i=skb->len; i<ETH_ZLEN; i++) {
1925 ((char *)pDB->vaddr)[i] = 0;
1926 }
1927 ptxd->len = ETH_ZLEN;
1928 }
1929 else
1930 ptxd->len = skb->len;
1931
1932 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1933 au_sync();
1934 dev_kfree_skb(skb);
1935 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1936 dev->trans_start = jiffies;
1937 return 0;
1938}
1939
1940
1941static inline void update_rx_stats(struct net_device *dev, u32 status)
1942{
1943 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1944 struct net_device_stats *ps = &aup->stats;
1945
1946 ps->rx_packets++;
1947 if (status & RX_MCAST_FRAME)
1948 ps->multicast++;
1949
1950 if (status & RX_ERROR) {
1951 ps->rx_errors++;
1952 if (status & RX_MISSED_FRAME)
1953 ps->rx_missed_errors++;
1954 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1955 ps->rx_length_errors++;
1956 if (status & RX_CRC_ERROR)
1957 ps->rx_crc_errors++;
1958 if (status & RX_COLL)
1959 ps->collisions++;
1960 }
1961 else
1962 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1963
1964}
1965
1966/*
1967 * Au1000 receive routine.
1968 */
1969static int au1000_rx(struct net_device *dev)
1970{
1971 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1972 struct sk_buff *skb;
1973 volatile rx_dma_t *prxd;
1974 u32 buff_stat, status;
1975 db_dest_t *pDB;
1976 u32 frmlen;
1977
1978 if (au1000_debug > 5)
1979 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1980
1981 prxd = aup->rx_dma_ring[aup->rx_head];
1982 buff_stat = prxd->buff_stat;
1983 while (buff_stat & RX_T_DONE) {
1984 status = prxd->status;
1985 pDB = aup->rx_db_inuse[aup->rx_head];
1986 update_rx_stats(dev, status);
1987 if (!(status & RX_ERROR)) {
1988
1989 /* good frame */
1990 frmlen = (status & RX_FRAME_LEN_MASK);
1991 frmlen -= 4; /* Remove FCS */
1992 skb = dev_alloc_skb(frmlen + 2);
1993 if (skb == NULL) {
1994 printk(KERN_ERR
1995 "%s: Memory squeeze, dropping packet.\n",
1996 dev->name);
1997 aup->stats.rx_dropped++;
1998 continue;
1999 }
2000 skb->dev = dev;
2001 skb_reserve(skb, 2); /* 16 byte IP header align */
2002 eth_copy_and_sum(skb,
2003 (unsigned char *)pDB->vaddr, frmlen, 0);
2004 skb_put(skb, frmlen);
2005 skb->protocol = eth_type_trans(skb, dev);
2006 netif_rx(skb); /* pass the packet to upper layers */
2007 }
2008 else {
2009 if (au1000_debug > 4) {
2010 if (status & RX_MISSED_FRAME)
2011 printk("rx miss\n");
2012 if (status & RX_WDOG_TIMER)
2013 printk("rx wdog\n");
2014 if (status & RX_RUNT)
2015 printk("rx runt\n");
2016 if (status & RX_OVERLEN)
2017 printk("rx overlen\n");
2018 if (status & RX_COLL)
2019 printk("rx coll\n");
2020 if (status & RX_MII_ERROR)
2021 printk("rx mii error\n");
2022 if (status & RX_CRC_ERROR)
2023 printk("rx crc error\n");
2024 if (status & RX_LEN_ERROR)
2025 printk("rx len error\n");
2026 if (status & RX_U_CNTRL_FRAME)
2027 printk("rx u control frame\n");
2028 if (status & RX_MISSED_FRAME)
2029 printk("rx miss\n");
2030 }
2031 }
2032 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
2033 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
2034 au_sync();
2035
2036 /* next descriptor */
2037 prxd = aup->rx_dma_ring[aup->rx_head];
2038 buff_stat = prxd->buff_stat;
2039 dev->last_rx = jiffies;
2040 }
2041 return 0;
2042}
2043
2044
2045/*
2046 * Au1000 interrupt service routine.
2047 */
2048static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2049{
2050 struct net_device *dev = (struct net_device *) dev_id;
2051
2052 if (dev == NULL) {
2053 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
2054 return IRQ_RETVAL(1);
2055 }
2056
2057 /* Handle RX interrupts first to minimize chance of overrun */
2058
2059 au1000_rx(dev);
2060 au1000_tx_ack(dev);
2061 return IRQ_RETVAL(1);
2062}
2063
2064
2065/*
2066 * The Tx ring has been full longer than the watchdog timeout
2067 * value. The transmitter must be hung?
2068 */
2069static void au1000_tx_timeout(struct net_device *dev)
2070{
2071 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
2072 reset_mac(dev);
2073 au1000_init(dev);
2074 dev->trans_start = jiffies;
2075 netif_wake_queue(dev);
2076}
2077
2078
2079static unsigned const ethernet_polynomial = 0x04c11db7U;
2080static inline u32 ether_crc(int length, unsigned char *data)
2081{
2082 int crc = -1;
2083
2084 while(--length >= 0) {
2085 unsigned char current_octet = *data++;
2086 int bit;
2087 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2088 crc = (crc << 1) ^
2089 ((crc < 0) ^ (current_octet & 1) ?
2090 ethernet_polynomial : 0);
2091 }
2092 return crc;
2093}
2094
2095static void set_rx_mode(struct net_device *dev)
2096{
2097 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2098
2099 if (au1000_debug > 4)
2100 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
2101
2102 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2103 aup->mac->control |= MAC_PROMISCUOUS;
2104 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
2105 } else if ((dev->flags & IFF_ALLMULTI) ||
2106 dev->mc_count > MULTICAST_FILTER_LIMIT) {
2107 aup->mac->control |= MAC_PASS_ALL_MULTI;
2108 aup->mac->control &= ~MAC_PROMISCUOUS;
2109 printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
2110 } else {
2111 int i;
2112 struct dev_mc_list *mclist;
2113 u32 mc_filter[2]; /* Multicast hash filter */
2114
2115 mc_filter[1] = mc_filter[0] = 0;
2116 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2117 i++, mclist = mclist->next) {
2118 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
2119 (long *)mc_filter);
2120 }
2121 aup->mac->multi_hash_high = mc_filter[1];
2122 aup->mac->multi_hash_low = mc_filter[0];
2123 aup->mac->control &= ~MAC_PROMISCUOUS;
2124 aup->mac->control |= MAC_HASH_MODE;
2125 }
2126}
2127
2128
2129static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2130{
2131 struct au1000_private *aup = (struct au1000_private *)dev->priv;
2132 u16 *data = (u16 *)&rq->ifr_ifru;
2133
2134 switch(cmd) {
2135 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2136 case SIOCGMIIPHY:
2137 if (!netif_running(dev)) return -EINVAL;
2138 data[0] = aup->phy_addr;
2139 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2140 case SIOCGMIIREG:
2141 data[3] = mdio_read(dev, data[0], data[1]);
2142 return 0;
2143 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2144 case SIOCSMIIREG:
2145 if (!capable(CAP_NET_ADMIN))
2146 return -EPERM;
2147 mdio_write(dev, data[0], data[1],data[2]);
2148 return 0;
2149 default:
2150 return -EOPNOTSUPP;
2151 }
2152
2153}
2154
2155
2156static int au1000_set_config(struct net_device *dev, struct ifmap *map)
2157{
2158 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2159 u16 control;
2160
2161 if (au1000_debug > 4) {
2162 printk("%s: set_config called: dev->if_port %d map->port %x\n",
2163 dev->name, dev->if_port, map->port);
2164 }
2165
2166 switch(map->port){
2167 case IF_PORT_UNKNOWN: /* use auto here */
2168 printk(KERN_INFO "%s: config phy for aneg\n",
2169 dev->name);
2170 dev->if_port = map->port;
2171 /* Link Down: the timer will bring it up */
2172 netif_carrier_off(dev);
2173
2174 /* read current control */
2175 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2176 control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
2177
2178 /* enable auto negotiation and reset the negotiation */
2179 mdio_write(dev, aup->phy_addr, MII_CONTROL,
2180 control | MII_CNTL_AUTO |
2181 MII_CNTL_RST_AUTO);
2182
2183 break;
2184
2185 case IF_PORT_10BASET: /* 10BaseT */
2186 printk(KERN_INFO "%s: config phy for 10BaseT\n",
2187 dev->name);
2188 dev->if_port = map->port;
2189
2190 /* Link Down: the timer will bring it up */
2191 netif_carrier_off(dev);
2192
2193 /* set Speed to 10Mbps, Half Duplex */
2194 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2195 control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
2196 MII_CNTL_FDX);
2197
2198 /* disable auto negotiation and force 10M/HD mode*/
2199 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2200 break;
2201
2202 case IF_PORT_100BASET: /* 100BaseT */
2203 case IF_PORT_100BASETX: /* 100BaseTx */
2204 printk(KERN_INFO "%s: config phy for 100BaseTX\n",
2205 dev->name);
2206 dev->if_port = map->port;
2207
2208 /* Link Down: the timer will bring it up */
2209 netif_carrier_off(dev);
2210
2211 /* set Speed to 100Mbps, Half Duplex */
2212 /* disable auto negotiation and enable 100MBit Mode */
2213 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2214 control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
2215 control |= MII_CNTL_F100;
2216 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2217 break;
2218
2219 case IF_PORT_100BASEFX: /* 100BaseFx */
2220 printk(KERN_INFO "%s: config phy for 100BaseFX\n",
2221 dev->name);
2222 dev->if_port = map->port;
2223
2224 /* Link Down: the timer will bring it up */
2225 netif_carrier_off(dev);
2226
2227 /* set Speed to 100Mbps, Full Duplex */
2228 /* disable auto negotiation and enable 100MBit Mode */
2229 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2230 control &= ~MII_CNTL_AUTO;
2231 control |= MII_CNTL_F100 | MII_CNTL_FDX;
2232 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2233 break;
2234 case IF_PORT_10BASE2: /* 10Base2 */
2235 case IF_PORT_AUI: /* AUI */
2236 /* These Modes are not supported (are they?)*/
2237 printk(KERN_ERR "%s: 10Base2/AUI not supported",
2238 dev->name);
2239 return -EOPNOTSUPP;
2240 break;
2241
2242 default:
2243 printk(KERN_ERR "%s: Invalid media selected",
2244 dev->name);
2245 return -EINVAL;
2246 }
2247 return 0;
2248}
2249
2250static struct net_device_stats *au1000_get_stats(struct net_device *dev)
2251{
2252 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2253
2254 if (au1000_debug > 4)
2255 printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
2256
2257 if (netif_device_present(dev)) {
2258 return &aup->stats;
2259 }
2260 return 0;
2261}
2262
2263module_init(au1000_init_module);
2264module_exit(au1000_cleanup_module);
This page took 0.164895 seconds and 5 git commands to generate.