Merge branch 'master' into upstream-fixes
[deliverable/linux.git] / drivers / net / lp486e.c
CommitLineData
1da177e4
LT
1/* Intel Professional Workstation/panther ethernet driver */
2/* lp486e.c: A panther 82596 ethernet driver for linux. */
3/*
4 History and copyrights:
5
6 Driver skeleton
7 Written 1993 by Donald Becker.
8 Copyright 1993 United States Government as represented by the Director,
9 National Security Agency. This software may only be used and
10 distributed according to the terms of the GNU General Public License
11 as modified by SRC, incorporated herein by reference.
12
13 The author may be reached as becker@scyld.com, or C/O
14 Scyld Computing Corporation
15 410 Severn Ave., Suite 210
16 Annapolis MD 21403
17
18 Apricot
19 Written 1994 by Mark Evans.
20 This driver is for the Apricot 82596 bus-master interface
21
22 Modularised 12/94 Mark Evans
23
24 Professional Workstation
25 Derived from apricot.c by Ard van Breemen
26 <ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org>
27
28 Credits:
29 Thanks to Murphy Software BV for letting me write this in their time.
30 Well, actually, I get payed doing this...
31 (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
32 more information on the Professional Workstation)
33
34 Present version
35 aeb@cwi.nl
36*/
37/*
38 There are currently two motherboards that I know of in the
39 professional workstation. The only one that I know is the
40 intel panther motherboard. -- ard
41*/
42/*
43The pws is equipped with an intel 82596. This is a very intelligent controller
44which runs its own micro-code. Communication with the hostprocessor is done
45through linked lists of commands and buffers in the hostprocessors memory.
46A complete description of the 82596 is available from intel. Search for
47a file called "29021806.pdf". It is a complete description of the chip itself.
48To use it for the pws some additions are needed regarding generation of
49the PORT and CA signal, and the interrupt glue needed for a pc.
50I/O map:
51PORT SIZE ACTION MEANING
520xCB0 2 WRITE Lower 16 bits for PORT command
530xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command
540xCB4 1 WRITE Generation of CA signal
550xCB8 1 WRITE Clear interrupt glue
56All other communication is through memory!
57*/
58
1da177e4
LT
59#include <linux/module.h>
60#include <linux/init.h>
61#include <linux/delay.h>
62#include <linux/kernel.h>
63#include <linux/string.h>
64#include <linux/errno.h>
65#include <linux/ioport.h>
66#include <linux/slab.h>
67#include <linux/interrupt.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
70#include <linux/skbuff.h>
71#include <linux/bitops.h>
72
73#include <asm/io.h>
74#include <asm/dma.h>
75
76#define DRV_NAME "lp486e"
77
78/* debug print flags */
79#define LOG_SRCDST 0x80000000
80#define LOG_STATINT 0x40000000
81#define LOG_STARTINT 0x20000000
82
83#define i596_debug debug
84
85static int i596_debug = 0;
86
87static const char * const medianame[] = {
88 "10baseT", "AUI",
89 "10baseT-FD", "AUI-FD",
90};
91
92#define LP486E_TOTAL_SIZE 16
93
94#define I596_NULL (0xffffffff)
95
96#define CMD_EOL 0x8000 /* The last command of the list, stop. */
97#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
98#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
99
100#define CMD_FLEX 0x0008 /* Enable flexible memory model */
101
102enum commands {
103 CmdNOP = 0,
104 CmdIASetup = 1,
105 CmdConfigure = 2,
106 CmdMulticastList = 3,
107 CmdTx = 4,
108 CmdTDR = 5,
109 CmdDump = 6,
110 CmdDiagnose = 7
111};
112
113#if 0
114static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
115 "Tx", "TDR", "Dump", "Diagnose" };
116#endif
117
118/* Status word bits */
119#define STAT_CX 0x8000 /* The CU finished executing a command
120 with the Interrupt bit set */
121#define STAT_FR 0x4000 /* The RU finished receiving a frame */
122#define STAT_CNA 0x2000 /* The CU left the active state */
123#define STAT_RNR 0x1000 /* The RU left the active state */
124#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
125#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended,
126 2: active, 3-7: unused */
127#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended,
128 2: no resources, 4: ready,
129 10: no resources due to no more RBDs,
130 12: no more RBDs, other: unused */
131#define STAT_T 0x0008 /* Bus throttle timers loaded */
132#define STAT_ZERO 0x0807 /* Always zero */
133
134#if 0
135static char *CUstates[8] = {
136 "idle", "suspended", "active", 0, 0, 0, 0, 0
137};
138static char *RUstates[16] = {
139 "idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
140 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
141};
142
143static void
144i596_out_status(int status) {
145 int bad = 0;
146 char *s;
147
148 printk("status %4.4x:", status);
149 if (status == 0xffff)
150 printk(" strange..\n");
151 else {
152 if (status & STAT_CX)
153 printk(" CU done");
154 if (status & STAT_CNA)
155 printk(" CU stopped");
156 if (status & STAT_FR)
157 printk(" got a frame");
158 if (status & STAT_RNR)
159 printk(" RU stopped");
160 if (status & STAT_T)
161 printk(" throttled");
162 if (status & STAT_ZERO)
163 bad = 1;
164 s = CUstates[(status & STAT_CUS) >> 8];
165 if (!s)
166 bad = 1;
167 else
168 printk(" CU(%s)", s);
169 s = RUstates[(status & STAT_RUS) >> 4];
170 if (!s)
171 bad = 1;
172 else
173 printk(" RU(%s)", s);
174 if (bad)
175 printk(" bad status");
176 printk("\n");
177 }
178}
179#endif
180
181/* Command word bits */
182#define ACK_CX 0x8000
183#define ACK_FR 0x4000
184#define ACK_CNA 0x2000
185#define ACK_RNR 0x1000
186
187#define CUC_START 0x0100
188#define CUC_RESUME 0x0200
189#define CUC_SUSPEND 0x0300
190#define CUC_ABORT 0x0400
191
192#define RX_START 0x0010
193#define RX_RESUME 0x0020
194#define RX_SUSPEND 0x0030
195#define RX_ABORT 0x0040
196
197typedef u32 phys_addr;
198
199static inline phys_addr
200va_to_pa(void *x) {
201 return x ? virt_to_bus(x) : I596_NULL;
202}
203
204static inline void *
205pa_to_va(phys_addr x) {
206 return (x == I596_NULL) ? NULL : bus_to_virt(x);
207}
208
209/* status bits for cmd */
210#define CMD_STAT_C 0x8000 /* CU command complete */
211#define CMD_STAT_B 0x4000 /* CU command in progress */
212#define CMD_STAT_OK 0x2000 /* CU command completed without errors */
213#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */
214
215struct i596_cmd { /* 8 bytes */
216 unsigned short status;
217 unsigned short command;
218 phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */
219};
220
221#define EOF 0x8000
222#define SIZE_MASK 0x3fff
223
224struct i596_tbd {
225 unsigned short size;
226 unsigned short pad;
227 phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
228 phys_addr pa_data; /* va_to_pa(char *data) */
229 struct sk_buff *skb;
230};
231
232struct tx_cmd {
233 struct i596_cmd cmd;
234 phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */
235 unsigned short size;
236 unsigned short pad;
237};
238
239/* status bits for rfd */
240#define RFD_STAT_C 0x8000 /* Frame reception complete */
241#define RFD_STAT_B 0x4000 /* Frame reception in progress */
242#define RFD_STAT_OK 0x2000 /* Frame received without errors */
243#define RFD_STATUS 0x1fff
244#define RFD_LENGTH_ERR 0x1000
245#define RFD_CRC_ERR 0x0800
246#define RFD_ALIGN_ERR 0x0400
247#define RFD_NOBUFS_ERR 0x0200
248#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */
249#define RFD_SHORT_FRAME_ERR 0x0080
250#define RFD_NOEOP_ERR 0x0040
251#define RFD_TRUNC_ERR 0x0020
252#define RFD_MULTICAST 0x0002 /* 0: destination had our address
253 1: destination was broadcast/multicast */
254#define RFD_COLLISION 0x0001
255
256/* receive frame descriptor */
257struct i596_rfd {
258 unsigned short stat;
259 unsigned short cmd;
260 phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */
261 phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */
262 unsigned short count;
263 unsigned short size;
264 char data[1532];
265};
266
267#define RBD_EL 0x8000
268#define RBD_P 0x4000
269#define RBD_SIZEMASK 0x3fff
270#define RBD_EOF 0x8000
271#define RBD_F 0x4000
272
273/* receive buffer descriptor */
274struct i596_rbd {
275 unsigned short size;
276 unsigned short pad;
277 phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
278 phys_addr pa_data; /* va_to_pa(char *data) */
279 phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */
280
281 /* Driver private part */
282 struct sk_buff *skb;
283};
284
285#define RX_RING_SIZE 64
286#define RX_SKBSIZE (ETH_FRAME_LEN+10)
287#define RX_RBD_SIZE 32
288
289/* System Control Block - 40 bytes */
290struct i596_scb {
291 u16 status; /* 0 */
292 u16 command; /* 2 */
293 phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */
294 phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */
295 u32 crc_err; /* 12 */
296 u32 align_err; /* 16 */
297 u32 resource_err; /* 20 */
298 u32 over_err; /* 24 */
299 u32 rcvdt_err; /* 28 */
300 u32 short_err; /* 32 */
301 u16 t_on; /* 36 */
302 u16 t_off; /* 38 */
303};
304
305/* Intermediate System Configuration Pointer - 8 bytes */
306struct i596_iscp {
307 u32 busy; /* 0 */
308 phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */
309};
310
311/* System Configuration Pointer - 12 bytes */
312struct i596_scp {
313 u32 sysbus; /* 0 */
314 u32 pad; /* 4 */
315 phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */
316};
317
318/* Selftest and dump results - needs 16-byte alignment */
319/*
320 * The size of the dump area is 304 bytes. When the dump is executed
321 * by the Port command an extra word will be appended to the dump area.
322 * The extra word is a copy of the Dump status word (containing the
323 * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump]
324 */
325struct i596_dump {
326 u16 dump[153]; /* (304 = 130h) + 2 bytes */
327};
328
329struct i596_private { /* aligned to a 16-byte boundary */
330 struct i596_scp scp; /* 0 - needs 16-byte alignment */
331 struct i596_iscp iscp; /* 12 */
332 struct i596_scb scb; /* 20 */
333 u32 dummy; /* 60 */
334 struct i596_dump dump; /* 64 - needs 16-byte alignment */
335
336 struct i596_cmd set_add;
337 char eth_addr[8]; /* directly follows set_add */
338
339 struct i596_cmd set_conf;
340 char i596_config[16]; /* directly follows set_conf */
341
342 struct i596_cmd tdr;
343 unsigned long tdr_stat; /* directly follows tdr */
344
345 int last_restart;
346 struct i596_rbd *rbd_list;
347 struct i596_rbd *rbd_tail;
348 struct i596_rfd *rx_tail;
349 struct i596_cmd *cmd_tail;
350 struct i596_cmd *cmd_head;
351 int cmd_backlog;
352 unsigned long last_cmd;
353 struct net_device_stats stats;
354 spinlock_t cmd_lock;
355};
356
357static char init_setup[14] = {
358 0x8E, /* length 14 bytes, prefetch on */
359 0xC8, /* default: fifo to 8, monitor off */
360 0x40, /* default: don't save bad frames (apricot.c had 0x80) */
361 0x2E, /* (default is 0x26)
362 No source address insertion, 8 byte preamble */
363 0x00, /* default priority and backoff */
364 0x60, /* default interframe spacing */
365 0x00, /* default slot time LSB */
366 0xf2, /* default slot time and nr of retries */
367 0x00, /* default various bits
368 (0: promiscuous mode, 1: broadcast disable,
369 2: encoding mode, 3: transmit on no CRS,
370 4: no CRC insertion, 5: CRC type,
371 6: bit stuffing, 7: padding) */
372 0x00, /* default carrier sense and collision detect */
373 0x40, /* default minimum frame length */
374 0xff, /* (default is 0xff, and that is what apricot.c has;
375 elp486.c has 0xfb: Enable crc append in memory.) */
376 0x00, /* default: not full duplex */
377 0x7f /* (default is 0x3f) multi IA */
378};
379
380static int i596_open(struct net_device *dev);
381static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
382static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
383static int i596_close(struct net_device *dev);
384static struct net_device_stats *i596_get_stats(struct net_device *dev);
385static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
386static void print_eth(char *);
387static void set_multicast_list(struct net_device *dev);
388static void i596_tx_timeout(struct net_device *dev);
389
390static int
391i596_timeout(struct net_device *dev, char *msg, int ct) {
392 struct i596_private *lp;
393 int boguscnt = ct;
394
395 lp = (struct i596_private *) dev->priv;
396 while (lp->scb.command) {
397 if (--boguscnt == 0) {
398 printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
399 dev->name, msg,
400 lp->scb.status, lp->scb.command);
401 return 1;
402 }
403 udelay(5);
404 barrier();
405 }
406 return 0;
407}
408
409static inline int
410init_rx_bufs(struct net_device *dev, int num) {
411 struct i596_private *lp;
412 struct i596_rfd *rfd;
413 int i;
414 // struct i596_rbd *rbd;
415
416 lp = (struct i596_private *) dev->priv;
417 lp->scb.pa_rfd = I596_NULL;
418
419 for (i = 0; i < num; i++) {
420 rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
421 if (rfd == NULL)
422 break;
423
424 rfd->stat = 0;
425 rfd->pa_rbd = I596_NULL;
426 rfd->count = 0;
427 rfd->size = 1532;
428 if (i == 0) {
429 rfd->cmd = CMD_EOL;
430 lp->rx_tail = rfd;
431 } else {
432 rfd->cmd = 0;
433 }
434 rfd->pa_next = lp->scb.pa_rfd;
435 lp->scb.pa_rfd = va_to_pa(rfd);
436 lp->rx_tail->pa_next = lp->scb.pa_rfd;
437 }
438
439#if 0
440 for (i = 0; i<RX_RBD_SIZE; i++) {
441 rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
442 if (rbd) {
443 rbd->pad = 0;
444 rbd->count = 0;
445 rbd->skb = dev_alloc_skb(RX_SKB_SIZE);
446 if (!rbd->skb) {
447 printk("dev_alloc_skb failed");
448 }
449 rbd->next = rfd->rbd;
450 if (i) {
451 rfd->rbd->prev = rbd;
452 rbd->size = RX_SKB_SIZE;
453 } else {
454 rbd->size = (RX_SKB_SIZE | RBD_EL);
455 lp->rbd_tail = rbd;
456 }
457
458 rfd->rbd = rbd;
459 } else {
460 printk("Could not kmalloc rbd\n");
461 }
462 }
463 lp->rbd_tail->next = rfd->rbd;
464#endif
465 return (i);
466}
467
468static inline void
469remove_rx_bufs(struct net_device *dev) {
470 struct i596_private *lp;
471 struct i596_rfd *rfd;
472
473 lp = (struct i596_private *) dev->priv;
474 lp->rx_tail->pa_next = I596_NULL;
475
476 do {
477 rfd = pa_to_va(lp->scb.pa_rfd);
478 lp->scb.pa_rfd = rfd->pa_next;
479 kfree(rfd);
480 } while (rfd != lp->rx_tail);
481
482 lp->rx_tail = NULL;
483
484#if 0
485 for (lp->rbd_list) {
486 }
487#endif
488}
489
490#define PORT_RESET 0x00 /* reset 82596 */
491#define PORT_SELFTEST 0x01 /* selftest */
492#define PORT_ALTSCP 0x02 /* alternate SCB address */
493#define PORT_DUMP 0x03 /* dump */
494
495#define IOADDR 0xcb0 /* real constant */
496#define IRQ 10 /* default IRQ - can be changed by ECU */
497
498/* The 82596 requires two 16-bit write cycles for a port command */
499static inline void
500PORT(phys_addr a, unsigned int cmd) {
501 if (a & 0xf)
502 printk("lp486e.c: PORT: address not aligned\n");
503 outw(((a & 0xffff) | cmd), IOADDR);
504 outw(((a>>16) & 0xffff), IOADDR+2);
505}
506
507static inline void
508CA(void) {
509 outb(0, IOADDR+4);
510 udelay(8);
511}
512
513static inline void
514CLEAR_INT(void) {
515 outb(0, IOADDR+8);
516}
517
518#define SIZE(x) (sizeof(x)/sizeof((x)[0]))
519
520#if 0
521/* selftest or dump */
522static void
523i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
524 struct i596_private *lp = dev->priv;
525 u16 *outp;
526 int i, m;
527
528 memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
529 outp = &(lp->dump.dump[0]);
530
531 PORT(va_to_pa(outp), portcmd);
532 mdelay(30); /* random, unmotivated */
533
534 printk("lp486e i82596 %s result:\n", cmdname);
535 for (m = SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
536 ;
537 for (i = 0; i < m; i++) {
538 printk(" %04x", lp->dump.dump[i]);
539 if (i%8 == 7)
540 printk("\n");
541 }
542 printk("\n");
543}
544#endif
545
546static int
547i596_scp_setup(struct net_device *dev) {
548 struct i596_private *lp = dev->priv;
549 int boguscnt;
550
551 /* Setup SCP, ISCP, SCB */
552 /*
553 * sysbus bits:
554 * only a single byte is significant - here 0x44
555 * 0x80: big endian mode (details depend on stepping)
556 * 0x40: 1
557 * 0x20: interrupt pin is active low
558 * 0x10: lock function disabled
559 * 0x08: external triggering of bus throttle timers
560 * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode
561 * 0x01: unused
562 */
563 lp->scp.sysbus = 0x00440000; /* linear mode */
564 lp->scp.pad = 0; /* must be zero */
565 lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
566
567 /*
568 * The CPU sets the ISCP to 1 before it gives the first CA()
569 */
570 lp->iscp.busy = 0x0001;
571 lp->iscp.pa_scb = va_to_pa(&(lp->scb));
572
573 lp->scb.command = 0;
574 lp->scb.status = 0;
575 lp->scb.pa_cmd = I596_NULL;
576 /* lp->scb.pa_rfd has been initialised already */
577
578 lp->last_cmd = jiffies;
579 lp->cmd_backlog = 0;
580 lp->cmd_head = NULL;
581
582 /*
583 * Reset the 82596.
584 * We need to wait 10 systemclock cycles, and
585 * 5 serial clock cycles.
586 */
587 PORT(0, PORT_RESET); /* address part ignored */
588 udelay(100);
589
590 /*
591 * Before the CA signal is asserted, the default SCP address
592 * (0x00fffff4) can be changed to a 16-byte aligned value
593 */
594 PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */
595
596 /*
597 * The initialization procedure begins when a
598 * Channel Attention signal is asserted after a reset.
599 */
600
601 CA();
602
603 /*
604 * The ISCP busy is cleared by the 82596 after the SCB address is read.
605 */
606 boguscnt = 100;
607 while (lp->iscp.busy) {
608 if (--boguscnt == 0) {
609 /* No i82596 present? */
610 printk("%s: i82596 initialization timed out\n",
611 dev->name);
612 return 1;
613 }
614 udelay(5);
615 barrier();
616 }
617 /* I find here boguscnt==100, so no delay was required. */
618
619 return 0;
620}
621
622static int
623init_i596(struct net_device *dev) {
624 struct i596_private *lp;
625
626 if (i596_scp_setup(dev))
627 return 1;
628
629 lp = (struct i596_private *) dev->priv;
630 lp->scb.command = 0;
631
632 memcpy ((void *)lp->i596_config, init_setup, 14);
633 lp->set_conf.command = CmdConfigure;
634 i596_add_cmd(dev, (void *)&lp->set_conf);
635
636 memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
637 lp->set_add.command = CmdIASetup;
638 i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
639
640 lp->tdr.command = CmdTDR;
641 i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
642
643 if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
644 return 1;
645
646 lp->scb.command = RX_START;
647 CA();
648
649 barrier();
650
651 if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
652 return 1;
653
654 return 0;
655}
656
657/* Receive a single frame */
658static inline int
659i596_rx_one(struct net_device *dev, struct i596_private *lp,
660 struct i596_rfd *rfd, int *frames) {
661
662 if (rfd->stat & RFD_STAT_OK) {
663 /* a good frame */
664 int pkt_len = (rfd->count & 0x3fff);
665 struct sk_buff *skb = dev_alloc_skb(pkt_len);
666
667 (*frames)++;
668
669 if (rfd->cmd & CMD_EOL)
670 printk("Received on EOL\n");
671
672 if (skb == NULL) {
673 printk ("%s: i596_rx Memory squeeze, "
674 "dropping packet.\n", dev->name);
675 lp->stats.rx_dropped++;
676 return 1;
677 }
678
679 skb->dev = dev;
680 memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
681
682 skb->protocol = eth_type_trans(skb,dev);
683 netif_rx(skb);
684 dev->last_rx = jiffies;
685 lp->stats.rx_packets++;
686 } else {
687#if 0
688 printk("Frame reception error status %04x\n",
689 rfd->stat);
690#endif
691 lp->stats.rx_errors++;
692 if (rfd->stat & RFD_COLLISION)
693 lp->stats.collisions++;
694 if (rfd->stat & RFD_SHORT_FRAME_ERR)
695 lp->stats.rx_length_errors++;
696 if (rfd->stat & RFD_DMA_ERR)
697 lp->stats.rx_over_errors++;
698 if (rfd->stat & RFD_NOBUFS_ERR)
699 lp->stats.rx_fifo_errors++;
700 if (rfd->stat & RFD_ALIGN_ERR)
701 lp->stats.rx_frame_errors++;
702 if (rfd->stat & RFD_CRC_ERR)
703 lp->stats.rx_crc_errors++;
704 if (rfd->stat & RFD_LENGTH_ERR)
705 lp->stats.rx_length_errors++;
706 }
707 rfd->stat = rfd->count = 0;
708 return 0;
709}
710
711static int
712i596_rx(struct net_device *dev) {
713 struct i596_private *lp = (struct i596_private *) dev->priv;
714 struct i596_rfd *rfd;
715 int frames = 0;
716
717 while (1) {
718 rfd = pa_to_va(lp->scb.pa_rfd);
719 if (!rfd) {
720 printk(KERN_ERR "i596_rx: NULL rfd?\n");
721 return 0;
722 }
723#if 1
724 if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
725 printk("SF:%p-%04x\n", rfd, rfd->stat);
726#endif
727 if (!(rfd->stat & RFD_STAT_C))
728 break; /* next one not ready */
729 if (i596_rx_one(dev, lp, rfd, &frames))
730 break; /* out of memory */
731 rfd->cmd = CMD_EOL;
732 lp->rx_tail->cmd = 0;
733 lp->rx_tail = rfd;
734 lp->scb.pa_rfd = rfd->pa_next;
735 barrier();
736 }
737
738 return frames;
739}
740
741static void
742i596_cleanup_cmd(struct net_device *dev) {
743 struct i596_private *lp;
744 struct i596_cmd *cmd;
745
746 lp = (struct i596_private *) dev->priv;
747 while (lp->cmd_head) {
748 cmd = (struct i596_cmd *)lp->cmd_head;
749
750 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
751 lp->cmd_backlog--;
752
753 switch ((cmd->command) & 0x7) {
754 case CmdTx: {
755 struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
756 struct i596_tbd * tx_cmd_tbd;
757 tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
758
759 dev_kfree_skb_any(tx_cmd_tbd->skb);
760
761 lp->stats.tx_errors++;
762 lp->stats.tx_aborted_errors++;
763
764 cmd->pa_next = I596_NULL;
765 kfree((unsigned char *)tx_cmd);
766 netif_wake_queue(dev);
767 break;
768 }
769 case CmdMulticastList: {
770 // unsigned short count = *((unsigned short *) (ptr + 1));
771
772 cmd->pa_next = I596_NULL;
773 kfree((unsigned char *)cmd);
774 break;
775 }
776 default: {
777 cmd->pa_next = I596_NULL;
778 break;
779 }
780 }
781 barrier();
782 }
783
784 if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
785 ;
786
787 lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
788}
789
790static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
791
792 if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
793 ;
794
795 netif_stop_queue(dev);
796
797 lp->scb.command = CUC_ABORT | RX_ABORT;
798 CA();
799 barrier();
800
801 /* wait for shutdown */
802 if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
803 ;
804
805 i596_cleanup_cmd(dev);
806 i596_rx(dev);
807
808 netif_start_queue(dev);
809 /*dev_kfree_skb(skb, FREE_WRITE);*/
810 init_i596(dev);
811}
812
813static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
814 struct i596_private *lp = dev->priv;
815 int ioaddr = dev->base_addr;
816 unsigned long flags;
817
818 cmd->status = 0;
819 cmd->command |= (CMD_EOL | CMD_INTR);
820 cmd->pa_next = I596_NULL;
821
822 spin_lock_irqsave(&lp->cmd_lock, flags);
823
824 if (lp->cmd_head) {
825 lp->cmd_tail->pa_next = va_to_pa(cmd);
826 } else {
827 lp->cmd_head = cmd;
828 if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
829 ;
830 lp->scb.pa_cmd = va_to_pa(cmd);
831 lp->scb.command = CUC_START;
832 CA();
833 }
834 lp->cmd_tail = cmd;
835 lp->cmd_backlog++;
836
837 lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
838 spin_unlock_irqrestore(&lp->cmd_lock, flags);
839
840 if (lp->cmd_backlog > 16) {
841 int tickssofar = jiffies - lp->last_cmd;
842 if (tickssofar < HZ/4)
843 return;
844
845 printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
846 i596_reset(dev, lp, ioaddr);
847 }
848}
849
850static int i596_open(struct net_device *dev)
851{
852 int i;
853
854 i = request_irq(dev->irq, &i596_interrupt, SA_SHIRQ, dev->name, dev);
855 if (i) {
856 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
857 return i;
858 }
859
860 if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
861 printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
862
863 if (i < 4) {
864 free_irq(dev->irq, dev);
865 return -EAGAIN;
866 }
867 netif_start_queue(dev);
868 init_i596(dev);
869 return 0; /* Always succeed */
870}
871
872static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
873 struct i596_private *lp = dev->priv;
874 struct tx_cmd *tx_cmd;
875 short length;
876
877 length = skb->len;
878
879 if (length < ETH_ZLEN) {
880 skb = skb_padto(skb, ETH_ZLEN);
881 if (skb == NULL)
882 return 0;
883 length = ETH_ZLEN;
884 }
885
886 dev->trans_start = jiffies;
887
888 tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
889 if (tx_cmd == NULL) {
890 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
891 lp->stats.tx_dropped++;
892 dev_kfree_skb (skb);
893 } else {
894 struct i596_tbd *tx_cmd_tbd;
895 tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
896 tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
897 tx_cmd_tbd->pa_next = I596_NULL;
898
899 tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
900
901 tx_cmd->pad = 0;
902 tx_cmd->size = 0;
903 tx_cmd_tbd->pad = 0;
904 tx_cmd_tbd->size = (EOF | length);
905
906 tx_cmd_tbd->pa_data = va_to_pa (skb->data);
907 tx_cmd_tbd->skb = skb;
908
909 if (i596_debug & LOG_SRCDST)
910 print_eth (skb->data);
911
912 i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
913
914 lp->stats.tx_packets++;
915 }
916
917 return 0;
918}
919
920static void
921i596_tx_timeout (struct net_device *dev) {
922 struct i596_private *lp = dev->priv;
923 int ioaddr = dev->base_addr;
924
925 /* Transmitter timeout, serious problems. */
926 printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
927 lp->stats.tx_errors++;
928
929 /* Try to restart the adaptor */
930 if (lp->last_restart == lp->stats.tx_packets) {
931 printk ("Resetting board.\n");
932
933 /* Shutdown and restart */
934 i596_reset (dev, lp, ioaddr);
935 } else {
936 /* Issue a channel attention signal */
937 printk ("Kicking board.\n");
938 lp->scb.command = (CUC_START | RX_START);
939 CA();
940 lp->last_restart = lp->stats.tx_packets;
941 }
942 netif_wake_queue(dev);
943}
944
945static void print_eth(char *add)
946{
947 int i;
948
949 printk ("Dest ");
950 for (i = 0; i < 6; i++)
951 printk(" %2.2X", (unsigned char) add[i]);
952 printk ("\n");
953
954 printk ("Source");
955 for (i = 0; i < 6; i++)
956 printk(" %2.2X", (unsigned char) add[i+6]);
957 printk ("\n");
958
959 printk ("type %2.2X%2.2X\n",
960 (unsigned char) add[12], (unsigned char) add[13]);
961}
962
963static int __init lp486e_probe(struct net_device *dev) {
964 struct i596_private *lp;
965 unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
966 unsigned char *bios;
967 int i, j;
968 int ret = -ENOMEM;
969 static int probed;
970
971 if (probed)
972 return -ENODEV;
973 probed++;
974
975 if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
976 printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
977 return -EBUSY;
978 }
979
980 lp = (struct i596_private *) dev->priv;
981 spin_lock_init(&lp->cmd_lock);
982
983 /*
984 * Do we really have this thing?
985 */
986 if (i596_scp_setup(dev)) {
987 ret = -ENODEV;
988 goto err_out_kfree;
989 }
990
991 dev->base_addr = IOADDR;
992 dev->irq = IRQ;
993
994
995 /*
996 * How do we find the ethernet address? I don't know.
997 * One possibility is to look at the EISA configuration area
998 * [0xe8000-0xe9fff]. This contains the ethernet address
999 * but not at a fixed address - things depend on setup options.
1000 *
1001 * If we find no address, or the wrong address, use
1002 * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6
1003 * with the value found in the BIOS setup.
1004 */
1005 bios = bus_to_virt(0xe8000);
1006 for (j = 0; j < 0x2000; j++) {
1007 if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
1008 printk("%s: maybe address at BIOS 0x%x:",
1009 dev->name, 0xe8000+j);
1010 for (i = 0; i < 6; i++) {
1011 eth_addr[i] = bios[i+j];
1012 printk(" %2.2X", eth_addr[i]);
1013 }
1014 printk("\n");
1015 }
1016 }
1017
1018 printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
1019 dev->name, dev->base_addr, dev->irq);
1020 for (i = 0; i < 6; i++)
1021 printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
1022 printk("\n");
1023
1024 /* The LP486E-specific entries in the device structure. */
1025 dev->open = &i596_open;
1026 dev->stop = &i596_close;
1027 dev->hard_start_xmit = &i596_start_xmit;
1028 dev->get_stats = &i596_get_stats;
1029 dev->set_multicast_list = &set_multicast_list;
1030 dev->watchdog_timeo = 5*HZ;
1031 dev->tx_timeout = i596_tx_timeout;
1032
1033#if 0
1034 /* selftest reports 0x320925ae - don't know what that means */
1035 i596_port_do(dev, PORT_SELFTEST, "selftest");
1036 i596_port_do(dev, PORT_DUMP, "dump");
1037#endif
1038 return 0;
1039
1040err_out_kfree:
1041 release_region(IOADDR, LP486E_TOTAL_SIZE);
1042 return ret;
1043}
1044
1045static inline void
1046i596_handle_CU_completion(struct net_device *dev,
1047 struct i596_private *lp,
1048 unsigned short status,
1049 unsigned short *ack_cmdp) {
1050 struct i596_cmd *cmd;
1051 int frames_out = 0;
1052 int commands_done = 0;
1053 int cmd_val;
1054 unsigned long flags;
1055
1056 spin_lock_irqsave(&lp->cmd_lock, flags);
1057 cmd = lp->cmd_head;
1058
1059 while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
1060 cmd = lp->cmd_head;
1061
1062 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
1063 lp->cmd_backlog--;
1064
1065 commands_done++;
1066 cmd_val = cmd->command & 0x7;
1067#if 0
1068 printk("finished CU %s command (%d)\n",
1069 CUcmdnames[cmd_val], cmd_val);
1070#endif
1071 switch (cmd_val) {
1072 case CmdTx:
1073 {
1074 struct tx_cmd *tx_cmd;
1075 struct i596_tbd *tx_cmd_tbd;
1076
1077 tx_cmd = (struct tx_cmd *) cmd;
1078 tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
1079
1080 frames_out++;
1081 if (cmd->status & CMD_STAT_OK) {
1082 if (i596_debug)
1083 print_eth(pa_to_va(tx_cmd_tbd->pa_data));
1084 } else {
1085 lp->stats.tx_errors++;
1086 if (i596_debug)
1087 printk("transmission failure:%04x\n",
1088 cmd->status);
1089 if (cmd->status & 0x0020)
1090 lp->stats.collisions++;
1091 if (!(cmd->status & 0x0040))
1092 lp->stats.tx_heartbeat_errors++;
1093 if (cmd->status & 0x0400)
1094 lp->stats.tx_carrier_errors++;
1095 if (cmd->status & 0x0800)
1096 lp->stats.collisions++;
1097 if (cmd->status & 0x1000)
1098 lp->stats.tx_aborted_errors++;
1099 }
1100 dev_kfree_skb_irq(tx_cmd_tbd->skb);
1101
1102 cmd->pa_next = I596_NULL;
1103 kfree((unsigned char *)tx_cmd);
1104 netif_wake_queue(dev);
1105 break;
1106 }
1107
1108 case CmdMulticastList:
1109 cmd->pa_next = I596_NULL;
1110 kfree((unsigned char *)cmd);
1111 break;
1112
1113 case CmdTDR:
1114 {
1115 unsigned long status = *((unsigned long *) (cmd + 1));
1116 if (status & 0x8000) {
1117 if (i596_debug)
1118 printk("%s: link ok.\n", dev->name);
1119 } else {
1120 if (status & 0x4000)
1121 printk("%s: Transceiver problem.\n",
1122 dev->name);
1123 if (status & 0x2000)
1124 printk("%s: Termination problem.\n",
1125 dev->name);
1126 if (status & 0x1000)
1127 printk("%s: Short circuit.\n",
1128 dev->name);
1129 printk("%s: Time %ld.\n",
1130 dev->name, status & 0x07ff);
1131 }
1132 }
1133 default:
1134 cmd->pa_next = I596_NULL;
1135 lp->last_cmd = jiffies;
1136
1137 }
1138 barrier();
1139 }
1140
1141 cmd = lp->cmd_head;
1142 while (cmd && (cmd != lp->cmd_tail)) {
1143 cmd->command &= 0x1fff;
1144 cmd = pa_to_va(cmd->pa_next);
1145 barrier();
1146 }
1147
1148 if (lp->cmd_head)
1149 *ack_cmdp |= CUC_START;
1150 lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
1151 spin_unlock_irqrestore(&lp->cmd_lock, flags);
1152}
1153
1154static irqreturn_t
1155i596_interrupt (int irq, void *dev_instance, struct pt_regs *regs) {
1156 struct net_device *dev = (struct net_device *) dev_instance;
1157 struct i596_private *lp;
1158 unsigned short status, ack_cmd = 0;
1159 int frames_in = 0;
1160
1161 lp = (struct i596_private *) dev->priv;
1162
1163 /*
1164 * The 82596 examines the command, performs the required action,
1165 * and then clears the SCB command word.
1166 */
1167 if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
1168 ;
1169
1170 /*
1171 * The status word indicates the status of the 82596.
1172 * It is modified only by the 82596.
1173 *
1174 * [So, we must not clear it. I find often status 0xffff,
1175 * which is not one of the values allowed by the docs.]
1176 */
1177 status = lp->scb.status;
1178#if 0
1179 if (i596_debug) {
1180 printk("%s: i596 interrupt, ", dev->name);
1181 i596_out_status(status);
1182 }
1183#endif
1184 /* Impossible, but it happens - perhaps when we get
1185 a receive interrupt but scb.pa_rfd is I596_NULL. */
1186 if (status == 0xffff) {
1187 printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
1188 goto out;
1189 }
1190
1191 ack_cmd = (status & STAT_ACK);
1192
1193 if (status & (STAT_CX | STAT_CNA))
1194 i596_handle_CU_completion(dev, lp, status, &ack_cmd);
1195
1196 if (status & (STAT_FR | STAT_RNR)) {
1197 /* Restart the receive unit when it got inactive somehow */
1198 if ((status & STAT_RNR) && netif_running(dev))
1199 ack_cmd |= RX_START;
1200
1201 if (status & STAT_FR) {
1202 frames_in = i596_rx(dev);
1203 if (!frames_in)
1204 printk("receive frame reported, but no frames\n");
1205 }
1206 }
1207
1208 /* acknowledge the interrupt */
1209 /*
1210 if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev))
1211 ack_cmd |= CUC_START;
1212 */
1213
1214 if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
1215 ;
1216
1217 lp->scb.command = ack_cmd;
1218
1219 CLEAR_INT();
1220 CA();
1221
1222 out:
1223 return IRQ_HANDLED;
1224}
1225
1226static int i596_close(struct net_device *dev) {
1227 struct i596_private *lp = dev->priv;
1228
1229 netif_stop_queue(dev);
1230
1231 if (i596_debug)
1232 printk("%s: Shutting down ethercard, status was %4.4x.\n",
1233 dev->name, lp->scb.status);
1234
1235 lp->scb.command = (CUC_ABORT | RX_ABORT);
1236 CA();
1237
1238 i596_cleanup_cmd(dev);
1239
1240 if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
1241 ;
1242
1243 free_irq(dev->irq, dev);
1244 remove_rx_bufs(dev);
1245
1246 return 0;
1247}
1248
1249static struct net_device_stats * i596_get_stats(struct net_device *dev) {
1250 struct i596_private *lp = dev->priv;
1251
1252 return &lp->stats;
1253}
1254
1255/*
1256* Set or clear the multicast filter for this adaptor.
1257*/
1258
1259static void set_multicast_list(struct net_device *dev) {
1260 struct i596_private *lp = dev->priv;
1261 struct i596_cmd *cmd;
1262
1263 if (i596_debug > 1)
1264 printk ("%s: set multicast list %d\n",
1265 dev->name, dev->mc_count);
1266
1267 if (dev->mc_count > 0) {
1268 struct dev_mc_list *dmi;
1269 char *cp;
1270 cmd = (struct i596_cmd *)kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
1271 if (cmd == NULL) {
1272 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
1273 return;
1274 }
1275 cmd->command = CmdMulticastList;
1276 *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
1277 cp = ((char *)(cmd + 1))+2;
1278 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
1279 memcpy(cp, dmi,6);
1280 cp += 6;
1281 }
1282 if (i596_debug & LOG_SRCDST)
1283 print_eth (((char *)(cmd + 1)) + 2);
1284 i596_add_cmd(dev, cmd);
1285 } else {
1286 if (lp->set_conf.pa_next != I596_NULL) {
1287 return;
1288 }
1289 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1290 if (dev->flags & IFF_ALLMULTI)
1291 dev->flags |= IFF_PROMISC;
1292 lp->i596_config[8] &= ~0x01;
1293 } else {
1294 lp->i596_config[8] |= 0x01;
1295 }
1296
1297 i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
1298 }
1299}
1300
1301MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
1302MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
1303MODULE_LICENSE("GPL");
1304
1305static struct net_device *dev_lp486e;
1306static int full_duplex;
1307static int options;
1308static int io = IOADDR;
1309static int irq = IRQ;
1310
1311module_param(debug, int, 0);
1312//module_param(max_interrupt_work, int, 0);
1313//module_param(reverse_probe, int, 0);
1314//module_param(rx_copybreak, int, 0);
1315module_param(options, int, 0);
1316module_param(full_duplex, int, 0);
1317
1318static int __init lp486e_init_module(void) {
1319 int err;
1320 struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
1321 if (!dev)
1322 return -ENOMEM;
1323
1324 dev->irq = irq;
1325 dev->base_addr = io;
1326 err = lp486e_probe(dev);
1327 if (err) {
1328 free_netdev(dev);
1329 return err;
1330 }
1331 err = register_netdev(dev);
1332 if (err) {
1333 release_region(dev->base_addr, LP486E_TOTAL_SIZE);
1334 free_netdev(dev);
1335 return err;
1336 }
1337 dev_lp486e = dev;
1338 full_duplex = 0;
1339 options = 0;
1340 return 0;
1341}
1342
1343static void __exit lp486e_cleanup_module(void) {
1344 unregister_netdev(dev_lp486e);
1345 release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
1346 free_netdev(dev_lp486e);
1347}
1348
1349module_init(lp486e_init_module);
1350module_exit(lp486e_cleanup_module);
This page took 0.170366 seconds and 5 git commands to generate.