lib8390: Convert include <asm to include <linux
[deliverable/linux.git] / drivers / net / lib8390.c
1 /* 8390.c: A general NS8390 ethernet driver core for linux. */
2 /*
3 Written 1992-94 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
14 Annapolis MD 21403
15
16
17 This is the chip-specific code for many 8390-based ethernet adaptors.
18 This is not a complete driver, it must be combined with board-specific
19 code such as ne.c, wd.c, 3c503.c, etc.
20
21 Seeing how at least eight drivers use this code, (not counting the
22 PCMCIA ones either) it is easy to break some card by what seems like
23 a simple innocent change. Please contact me or Donald if you think
24 you have found something that needs changing. -- PG
25
26
27 Changelog:
28
29 Paul Gortmaker : remove set_bit lock, other cleanups.
30 Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
31 ei_block_input() for eth_io_copy_and_sum().
32 Paul Gortmaker : exchange static int ei_pingpong for a #define,
33 also add better Tx error handling.
34 Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
35 Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
36 Paul Gortmaker : tweak ANK's above multicast changes a bit.
37 Paul Gortmaker : update packet statistics for v2.1.x
38 Alan Cox : support arbitrary stupid port mappings on the
39 68K Macintosh. Support >16bit I/O spaces
40 Paul Gortmaker : add kmod support for auto-loading of the 8390
41 module by all drivers that require it.
42 Alan Cox : Spinlocking work, added 'BUG_83C690'
43 Paul Gortmaker : Separate out Tx timeout code from Tx path.
44 Paul Gortmaker : Remove old unused single Tx buffer code.
45 Hayato Fujiwara : Add m32r support.
46 Paul Gortmaker : use skb_padto() instead of stack scratch area
47
48 Sources:
49 The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
50
51 */
52
53 #include <linux/module.h>
54 #include <linux/kernel.h>
55 #include <linux/jiffies.h>
56 #include <linux/fs.h>
57 #include <linux/types.h>
58 #include <linux/string.h>
59 #include <linux/bitops.h>
60 #include <asm/system.h>
61 #include <linux/uaccess.h>
62 #include <linux/io.h>
63 #include <asm/irq.h>
64 #include <linux/delay.h>
65 #include <linux/errno.h>
66 #include <linux/fcntl.h>
67 #include <linux/in.h>
68 #include <linux/interrupt.h>
69 #include <linux/init.h>
70 #include <linux/crc32.h>
71
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74
75 #define NS8390_CORE
76 #include "8390.h"
77
78 #define BUG_83C690
79
80 /* These are the operational function interfaces to board-specific
81 routines.
82 void reset_8390(struct net_device *dev)
83 Resets the board associated with DEV, including a hardware reset of
84 the 8390. This is only called when there is a transmit timeout, and
85 it is always followed by 8390_init().
86 void block_output(struct net_device *dev, int count, const unsigned char *buf,
87 int start_page)
88 Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
89 "page" value uses the 8390's 256-byte pages.
90 void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
91 Read the 4 byte, page aligned 8390 header. *If* there is a
92 subsequent read, it will be of the rest of the packet.
93 void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
94 Read COUNT bytes from the packet buffer into the skb data area. Start
95 reading from RING_OFFSET, the address as the 8390 sees it. This will always
96 follow the read of the 8390 header.
97 */
98 #define ei_reset_8390 (ei_local->reset_8390)
99 #define ei_block_output (ei_local->block_output)
100 #define ei_block_input (ei_local->block_input)
101 #define ei_get_8390_hdr (ei_local->get_8390_hdr)
102
103 /* use 0 for production, 1 for verification, >2 for debug */
104 #ifndef ei_debug
105 int ei_debug = 1;
106 #endif
107
108 /* Index to functions. */
109 static void ei_tx_intr(struct net_device *dev);
110 static void ei_tx_err(struct net_device *dev);
111 void ei_tx_timeout(struct net_device *dev);
112 static void ei_receive(struct net_device *dev);
113 static void ei_rx_overrun(struct net_device *dev);
114
115 /* Routines generic to NS8390-based boards. */
116 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
117 int start_page);
118 static void do_set_multicast_list(struct net_device *dev);
119 static void __NS8390_init(struct net_device *dev, int startp);
120
121 /*
122 * SMP and the 8390 setup.
123 *
124 * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
125 * a page register that controls bank and packet buffer access. We guard
126 * this with ei_local->page_lock. Nobody should assume or set the page other
127 * than zero when the lock is not held. Lock holders must restore page 0
128 * before unlocking. Even pure readers must take the lock to protect in
129 * page 0.
130 *
131 * To make life difficult the chip can also be very slow. We therefore can't
132 * just use spinlocks. For the longer lockups we disable the irq the device
133 * sits on and hold the lock. We must hold the lock because there is a dual
134 * processor case other than interrupts (get stats/set multicast list in
135 * parallel with each other and transmit).
136 *
137 * Note: in theory we can just disable the irq on the card _but_ there is
138 * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
139 * enter lock, take the queued irq. So we waddle instead of flying.
140 *
141 * Finally by special arrangement for the purpose of being generally
142 * annoying the transmit function is called bh atomic. That places
143 * restrictions on the user context callers as disable_irq won't save
144 * them.
145 *
146 * Additional explanation of problems with locking by Alan Cox:
147 *
148 * "The author (me) didn't use spin_lock_irqsave because the slowness of the
149 * card means that approach caused horrible problems like losing serial data
150 * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
151 * chips with FPGA front ends.
152 *
153 * Ok the logic behind the 8390 is very simple:
154 *
155 * Things to know
156 * - IRQ delivery is asynchronous to the PCI bus
157 * - Blocking the local CPU IRQ via spin locks was too slow
158 * - The chip has register windows needing locking work
159 *
160 * So the path was once (I say once as people appear to have changed it
161 * in the mean time and it now looks rather bogus if the changes to use
162 * disable_irq_nosync_irqsave are disabling the local IRQ)
163 *
164 *
165 * Take the page lock
166 * Mask the IRQ on chip
167 * Disable the IRQ (but not mask locally- someone seems to have
168 * broken this with the lock validator stuff)
169 * [This must be _nosync as the page lock may otherwise
170 * deadlock us]
171 * Drop the page lock and turn IRQs back on
172 *
173 * At this point an existing IRQ may still be running but we can't
174 * get a new one
175 *
176 * Take the lock (so we know the IRQ has terminated) but don't mask
177 * the IRQs on the processor
178 * Set irqlock [for debug]
179 *
180 * Transmit (slow as ****)
181 *
182 * re-enable the IRQ
183 *
184 *
185 * We have to use disable_irq because otherwise you will get delayed
186 * interrupts on the APIC bus deadlocking the transmit path.
187 *
188 * Quite hairy but the chip simply wasn't designed for SMP and you can't
189 * even ACK an interrupt without risking corrupting other parallel
190 * activities on the chip." [lkml, 25 Jul 2007]
191 */
192
193
194
195 /**
196 * ei_open - Open/initialize the board.
197 * @dev: network device to initialize
198 *
199 * This routine goes all-out, setting everything
200 * up anew at each open, even though many of these registers should only
201 * need to be set once at boot.
202 */
203 static int __ei_open(struct net_device *dev)
204 {
205 unsigned long flags;
206 struct ei_device *ei_local = netdev_priv(dev);
207
208 if (dev->watchdog_timeo <= 0)
209 dev->watchdog_timeo = TX_TIMEOUT;
210
211 /*
212 * Grab the page lock so we own the register set, then call
213 * the init function.
214 */
215
216 spin_lock_irqsave(&ei_local->page_lock, flags);
217 __NS8390_init(dev, 1);
218 /* Set the flag before we drop the lock, That way the IRQ arrives
219 after its set and we get no silly warnings */
220 netif_start_queue(dev);
221 spin_unlock_irqrestore(&ei_local->page_lock, flags);
222 ei_local->irqlock = 0;
223 return 0;
224 }
225
226 /**
227 * ei_close - shut down network device
228 * @dev: network device to close
229 *
230 * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
231 */
232 static int __ei_close(struct net_device *dev)
233 {
234 struct ei_device *ei_local = netdev_priv(dev);
235 unsigned long flags;
236
237 /*
238 * Hold the page lock during close
239 */
240
241 spin_lock_irqsave(&ei_local->page_lock, flags);
242 __NS8390_init(dev, 0);
243 spin_unlock_irqrestore(&ei_local->page_lock, flags);
244 netif_stop_queue(dev);
245 return 0;
246 }
247
248 /**
249 * ei_tx_timeout - handle transmit time out condition
250 * @dev: network device which has apparently fallen asleep
251 *
252 * Called by kernel when device never acknowledges a transmit has
253 * completed (or failed) - i.e. never posted a Tx related interrupt.
254 */
255
256 static void __ei_tx_timeout(struct net_device *dev)
257 {
258 unsigned long e8390_base = dev->base_addr;
259 struct ei_device *ei_local = netdev_priv(dev);
260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 unsigned long flags;
262
263 dev->stats.tx_errors++;
264
265 spin_lock_irqsave(&ei_local->page_lock, flags);
266 txsr = ei_inb(e8390_base+EN0_TSR);
267 isr = ei_inb(e8390_base+EN0_ISR);
268 spin_unlock_irqrestore(&ei_local->page_lock, flags);
269
270 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
271 (txsr & ENTSR_ABT) ? "excess collisions." :
272 (isr) ? "lost interrupt?" : "cable problem?",
273 txsr, isr, tickssofar);
274
275 if (!isr && !dev->stats.tx_packets) {
276 /* The 8390 probably hasn't gotten on the cable yet. */
277 ei_local->interface_num ^= 1; /* Try a different xcvr. */
278 }
279
280 /* Ugly but a reset can be slow, yet must be protected */
281
282 disable_irq_nosync_lockdep(dev->irq);
283 spin_lock(&ei_local->page_lock);
284
285 /* Try to restart the card. Perhaps the user has fixed something. */
286 ei_reset_8390(dev);
287 __NS8390_init(dev, 1);
288
289 spin_unlock(&ei_local->page_lock);
290 enable_irq_lockdep(dev->irq);
291 netif_wake_queue(dev);
292 }
293
294 /**
295 * ei_start_xmit - begin packet transmission
296 * @skb: packet to be sent
297 * @dev: network device to which packet is sent
298 *
299 * Sends a packet to an 8390 network device.
300 */
301
302 static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
303 struct net_device *dev)
304 {
305 unsigned long e8390_base = dev->base_addr;
306 struct ei_device *ei_local = netdev_priv(dev);
307 int send_length = skb->len, output_page;
308 unsigned long flags;
309 char buf[ETH_ZLEN];
310 char *data = skb->data;
311
312 if (skb->len < ETH_ZLEN) {
313 memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
314 memcpy(buf, data, skb->len);
315 send_length = ETH_ZLEN;
316 data = buf;
317 }
318
319 /* Mask interrupts from the ethercard.
320 SMP: We have to grab the lock here otherwise the IRQ handler
321 on another CPU can flip window and race the IRQ mask set. We end
322 up trashing the mcast filter not disabling irqs if we don't lock */
323
324 spin_lock_irqsave(&ei_local->page_lock, flags);
325 ei_outb_p(0x00, e8390_base + EN0_IMR);
326 spin_unlock_irqrestore(&ei_local->page_lock, flags);
327
328
329 /*
330 * Slow phase with lock held.
331 */
332
333 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
334
335 spin_lock(&ei_local->page_lock);
336
337 ei_local->irqlock = 1;
338
339 /*
340 * We have two Tx slots available for use. Find the first free
341 * slot, and then perform some sanity checks. With two Tx bufs,
342 * you get very close to transmitting back-to-back packets. With
343 * only one Tx buf, the transmitter sits idle while you reload the
344 * card, leaving a substantial gap between each transmitted packet.
345 */
346
347 if (ei_local->tx1 == 0) {
348 output_page = ei_local->tx_start_page;
349 ei_local->tx1 = send_length;
350 if (ei_debug && ei_local->tx2 > 0)
351 netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
352 ei_local->tx2, ei_local->lasttx, ei_local->txing);
353 } else if (ei_local->tx2 == 0) {
354 output_page = ei_local->tx_start_page + TX_PAGES/2;
355 ei_local->tx2 = send_length;
356 if (ei_debug && ei_local->tx1 > 0)
357 netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
358 ei_local->tx1, ei_local->lasttx, ei_local->txing);
359 } else { /* We should never get here. */
360 if (ei_debug)
361 netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
362 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
363 ei_local->irqlock = 0;
364 netif_stop_queue(dev);
365 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
366 spin_unlock(&ei_local->page_lock);
367 enable_irq_lockdep_irqrestore(dev->irq, &flags);
368 dev->stats.tx_errors++;
369 return NETDEV_TX_BUSY;
370 }
371
372 /*
373 * Okay, now upload the packet and trigger a send if the transmitter
374 * isn't already sending. If it is busy, the interrupt handler will
375 * trigger the send later, upon receiving a Tx done interrupt.
376 */
377
378 ei_block_output(dev, send_length, data, output_page);
379
380 if (!ei_local->txing) {
381 ei_local->txing = 1;
382 NS8390_trigger_send(dev, send_length, output_page);
383 if (output_page == ei_local->tx_start_page) {
384 ei_local->tx1 = -1;
385 ei_local->lasttx = -1;
386 } else {
387 ei_local->tx2 = -1;
388 ei_local->lasttx = -2;
389 }
390 } else
391 ei_local->txqueue++;
392
393 if (ei_local->tx1 && ei_local->tx2)
394 netif_stop_queue(dev);
395 else
396 netif_start_queue(dev);
397
398 /* Turn 8390 interrupts back on. */
399 ei_local->irqlock = 0;
400 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
401
402 spin_unlock(&ei_local->page_lock);
403 enable_irq_lockdep_irqrestore(dev->irq, &flags);
404 skb_tx_timestamp(skb);
405 dev_kfree_skb(skb);
406 dev->stats.tx_bytes += send_length;
407
408 return NETDEV_TX_OK;
409 }
410
411 /**
412 * ei_interrupt - handle the interrupts from an 8390
413 * @irq: interrupt number
414 * @dev_id: a pointer to the net_device
415 *
416 * Handle the ether interface interrupts. We pull packets from
417 * the 8390 via the card specific functions and fire them at the networking
418 * stack. We also handle transmit completions and wake the transmit path if
419 * necessary. We also update the counters and do other housekeeping as
420 * needed.
421 */
422
423 static irqreturn_t __ei_interrupt(int irq, void *dev_id)
424 {
425 struct net_device *dev = dev_id;
426 unsigned long e8390_base = dev->base_addr;
427 int interrupts, nr_serviced = 0;
428 struct ei_device *ei_local = netdev_priv(dev);
429
430 /*
431 * Protect the irq test too.
432 */
433
434 spin_lock(&ei_local->page_lock);
435
436 if (ei_local->irqlock) {
437 /*
438 * This might just be an interrupt for a PCI device sharing
439 * this line
440 */
441 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
442 ei_inb_p(e8390_base + EN0_ISR),
443 ei_inb_p(e8390_base + EN0_IMR));
444 spin_unlock(&ei_local->page_lock);
445 return IRQ_NONE;
446 }
447
448 /* Change to page 0 and read the intr status reg. */
449 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
450 if (ei_debug > 3)
451 netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
452 ei_inb_p(e8390_base + EN0_ISR));
453
454 /* !!Assumption!! -- we stay in page 0. Don't break this. */
455 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
456 ++nr_serviced < MAX_SERVICE) {
457 if (!netif_running(dev)) {
458 netdev_warn(dev, "interrupt from stopped card\n");
459 /* rmk - acknowledge the interrupts */
460 ei_outb_p(interrupts, e8390_base + EN0_ISR);
461 interrupts = 0;
462 break;
463 }
464 if (interrupts & ENISR_OVER)
465 ei_rx_overrun(dev);
466 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
467 /* Got a good (?) packet. */
468 ei_receive(dev);
469 }
470 /* Push the next to-transmit packet through. */
471 if (interrupts & ENISR_TX)
472 ei_tx_intr(dev);
473 else if (interrupts & ENISR_TX_ERR)
474 ei_tx_err(dev);
475
476 if (interrupts & ENISR_COUNTERS) {
477 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
478 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
479 dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
480 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
481 }
482
483 /* Ignore any RDC interrupts that make it back to here. */
484 if (interrupts & ENISR_RDC)
485 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
486
487 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
488 }
489
490 if (interrupts && ei_debug) {
491 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
492 if (nr_serviced >= MAX_SERVICE) {
493 /* 0xFF is valid for a card removal */
494 if (interrupts != 0xFF)
495 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
496 interrupts);
497 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
498 } else {
499 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
500 ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
501 }
502 }
503 spin_unlock(&ei_local->page_lock);
504 return IRQ_RETVAL(nr_serviced > 0);
505 }
506
507 #ifdef CONFIG_NET_POLL_CONTROLLER
508 static void __ei_poll(struct net_device *dev)
509 {
510 disable_irq(dev->irq);
511 __ei_interrupt(dev->irq, dev);
512 enable_irq(dev->irq);
513 }
514 #endif
515
516 /**
517 * ei_tx_err - handle transmitter error
518 * @dev: network device which threw the exception
519 *
520 * A transmitter error has happened. Most likely excess collisions (which
521 * is a fairly normal condition). If the error is one where the Tx will
522 * have been aborted, we try and send another one right away, instead of
523 * letting the failed packet sit and collect dust in the Tx buffer. This
524 * is a much better solution as it avoids kernel based Tx timeouts, and
525 * an unnecessary card reset.
526 *
527 * Called with lock held.
528 */
529
530 static void ei_tx_err(struct net_device *dev)
531 {
532 unsigned long e8390_base = dev->base_addr;
533 /* ei_local is used on some platforms via the EI_SHIFT macro */
534 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
535 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
536 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
537
538 #ifdef VERBOSE_ERROR_DUMP
539 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
540 if (txsr & ENTSR_ABT)
541 pr_cont(" excess-collisions ");
542 if (txsr & ENTSR_ND)
543 pr_cont(" non-deferral ");
544 if (txsr & ENTSR_CRS)
545 pr_cont(" lost-carrier ");
546 if (txsr & ENTSR_FU)
547 pr_cont(" FIFO-underrun ");
548 if (txsr & ENTSR_CDH)
549 pr_cont(" lost-heartbeat ");
550 pr_cont("\n");
551 #endif
552
553 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
554
555 if (tx_was_aborted)
556 ei_tx_intr(dev);
557 else {
558 dev->stats.tx_errors++;
559 if (txsr & ENTSR_CRS)
560 dev->stats.tx_carrier_errors++;
561 if (txsr & ENTSR_CDH)
562 dev->stats.tx_heartbeat_errors++;
563 if (txsr & ENTSR_OWC)
564 dev->stats.tx_window_errors++;
565 }
566 }
567
568 /**
569 * ei_tx_intr - transmit interrupt handler
570 * @dev: network device for which tx intr is handled
571 *
572 * We have finished a transmit: check for errors and then trigger the next
573 * packet to be sent. Called with lock held.
574 */
575
576 static void ei_tx_intr(struct net_device *dev)
577 {
578 unsigned long e8390_base = dev->base_addr;
579 struct ei_device *ei_local = netdev_priv(dev);
580 int status = ei_inb(e8390_base + EN0_TSR);
581
582 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
583
584 /*
585 * There are two Tx buffers, see which one finished, and trigger
586 * the send of another one if it exists.
587 */
588 ei_local->txqueue--;
589
590 if (ei_local->tx1 < 0) {
591 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
592 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
593 ei_local->name, ei_local->lasttx, ei_local->tx1);
594 ei_local->tx1 = 0;
595 if (ei_local->tx2 > 0) {
596 ei_local->txing = 1;
597 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
598 dev->trans_start = jiffies;
599 ei_local->tx2 = -1,
600 ei_local->lasttx = 2;
601 } else
602 ei_local->lasttx = 20, ei_local->txing = 0;
603 } else if (ei_local->tx2 < 0) {
604 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
605 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
606 ei_local->name, ei_local->lasttx, ei_local->tx2);
607 ei_local->tx2 = 0;
608 if (ei_local->tx1 > 0) {
609 ei_local->txing = 1;
610 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
611 dev->trans_start = jiffies;
612 ei_local->tx1 = -1;
613 ei_local->lasttx = 1;
614 } else
615 ei_local->lasttx = 10, ei_local->txing = 0;
616 } /* else
617 netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
618 ei_local->lasttx);
619 */
620
621 /* Minimize Tx latency: update the statistics after we restart TXing. */
622 if (status & ENTSR_COL)
623 dev->stats.collisions++;
624 if (status & ENTSR_PTX)
625 dev->stats.tx_packets++;
626 else {
627 dev->stats.tx_errors++;
628 if (status & ENTSR_ABT) {
629 dev->stats.tx_aborted_errors++;
630 dev->stats.collisions += 16;
631 }
632 if (status & ENTSR_CRS)
633 dev->stats.tx_carrier_errors++;
634 if (status & ENTSR_FU)
635 dev->stats.tx_fifo_errors++;
636 if (status & ENTSR_CDH)
637 dev->stats.tx_heartbeat_errors++;
638 if (status & ENTSR_OWC)
639 dev->stats.tx_window_errors++;
640 }
641 netif_wake_queue(dev);
642 }
643
644 /**
645 * ei_receive - receive some packets
646 * @dev: network device with which receive will be run
647 *
648 * We have a good packet(s), get it/them out of the buffers.
649 * Called with lock held.
650 */
651
652 static void ei_receive(struct net_device *dev)
653 {
654 unsigned long e8390_base = dev->base_addr;
655 struct ei_device *ei_local = netdev_priv(dev);
656 unsigned char rxing_page, this_frame, next_frame;
657 unsigned short current_offset;
658 int rx_pkt_count = 0;
659 struct e8390_pkt_hdr rx_frame;
660 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
661
662 while (++rx_pkt_count < 10) {
663 int pkt_len, pkt_stat;
664
665 /* Get the rx page (incoming packet pointer). */
666 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
667 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
668 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
669
670 /* Remove one frame from the ring. Boundary is always a page behind. */
671 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
672 if (this_frame >= ei_local->stop_page)
673 this_frame = ei_local->rx_start_page;
674
675 /* Someday we'll omit the previous, iff we never get this message.
676 (There is at least one clone claimed to have a problem.)
677
678 Keep quiet if it looks like a card removal. One problem here
679 is that some clones crash in roughly the same way.
680 */
681 if (ei_debug > 0 &&
682 this_frame != ei_local->current_page &&
683 (this_frame != 0x0 || rxing_page != 0xFF))
684 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
685 this_frame, ei_local->current_page);
686
687 if (this_frame == rxing_page) /* Read all the frames? */
688 break; /* Done for now */
689
690 current_offset = this_frame << 8;
691 ei_get_8390_hdr(dev, &rx_frame, this_frame);
692
693 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
694 pkt_stat = rx_frame.status;
695
696 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
697
698 /* Check for bogosity warned by 3c503 book: the status byte is never
699 written. This happened a lot during testing! This code should be
700 cleaned up someday. */
701 if (rx_frame.next != next_frame &&
702 rx_frame.next != next_frame + 1 &&
703 rx_frame.next != next_frame - num_rx_pages &&
704 rx_frame.next != next_frame + 1 - num_rx_pages) {
705 ei_local->current_page = rxing_page;
706 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
707 dev->stats.rx_errors++;
708 continue;
709 }
710
711 if (pkt_len < 60 || pkt_len > 1518) {
712 if (ei_debug)
713 netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
714 rx_frame.count, rx_frame.status,
715 rx_frame.next);
716 dev->stats.rx_errors++;
717 dev->stats.rx_length_errors++;
718 } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
719 struct sk_buff *skb;
720
721 skb = dev_alloc_skb(pkt_len+2);
722 if (skb == NULL) {
723 if (ei_debug > 1)
724 netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
725 pkt_len);
726 dev->stats.rx_dropped++;
727 break;
728 } else {
729 skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */
730 skb_put(skb, pkt_len); /* Make room */
731 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
732 skb->protocol = eth_type_trans(skb, dev);
733 if (!skb_defer_rx_timestamp(skb))
734 netif_rx(skb);
735 dev->stats.rx_packets++;
736 dev->stats.rx_bytes += pkt_len;
737 if (pkt_stat & ENRSR_PHY)
738 dev->stats.multicast++;
739 }
740 } else {
741 if (ei_debug)
742 netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
743 rx_frame.status, rx_frame.next,
744 rx_frame.count);
745 dev->stats.rx_errors++;
746 /* NB: The NIC counts CRC, frame and missed errors. */
747 if (pkt_stat & ENRSR_FO)
748 dev->stats.rx_fifo_errors++;
749 }
750 next_frame = rx_frame.next;
751
752 /* This _should_ never happen: it's here for avoiding bad clones. */
753 if (next_frame >= ei_local->stop_page) {
754 netdev_notice(dev, "next frame inconsistency, %#2x\n",
755 next_frame);
756 next_frame = ei_local->rx_start_page;
757 }
758 ei_local->current_page = next_frame;
759 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
760 }
761
762 /* We used to also ack ENISR_OVER here, but that would sometimes mask
763 a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
764 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
765 }
766
767 /**
768 * ei_rx_overrun - handle receiver overrun
769 * @dev: network device which threw exception
770 *
771 * We have a receiver overrun: we have to kick the 8390 to get it started
772 * again. Problem is that you have to kick it exactly as NS prescribes in
773 * the updated datasheets, or "the NIC may act in an unpredictable manner."
774 * This includes causing "the NIC to defer indefinitely when it is stopped
775 * on a busy network." Ugh.
776 * Called with lock held. Don't call this with the interrupts off or your
777 * computer will hate you - it takes 10ms or so.
778 */
779
780 static void ei_rx_overrun(struct net_device *dev)
781 {
782 unsigned long e8390_base = dev->base_addr;
783 unsigned char was_txing, must_resend = 0;
784 /* ei_local is used on some platforms via the EI_SHIFT macro */
785 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
786
787 /*
788 * Record whether a Tx was in progress and then issue the
789 * stop command.
790 */
791 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
792 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
793
794 if (ei_debug > 1)
795 netdev_dbg(dev, "Receiver overrun\n");
796 dev->stats.rx_over_errors++;
797
798 /*
799 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
800 * Early datasheets said to poll the reset bit, but now they say that
801 * it "is not a reliable indicator and subsequently should be ignored."
802 * We wait at least 10ms.
803 */
804
805 mdelay(10);
806
807 /*
808 * Reset RBCR[01] back to zero as per magic incantation.
809 */
810 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
811 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
812
813 /*
814 * See if any Tx was interrupted or not. According to NS, this
815 * step is vital, and skipping it will cause no end of havoc.
816 */
817
818 if (was_txing) {
819 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
820 if (!tx_completed)
821 must_resend = 1;
822 }
823
824 /*
825 * Have to enter loopback mode and then restart the NIC before
826 * you are allowed to slurp packets up off the ring.
827 */
828 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
829 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
830
831 /*
832 * Clear the Rx ring of all the debris, and ack the interrupt.
833 */
834 ei_receive(dev);
835 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
836
837 /*
838 * Leave loopback mode, and resend any packet that got stopped.
839 */
840 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
841 if (must_resend)
842 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
843 }
844
845 /*
846 * Collect the stats. This is called unlocked and from several contexts.
847 */
848
849 static struct net_device_stats *__ei_get_stats(struct net_device *dev)
850 {
851 unsigned long ioaddr = dev->base_addr;
852 struct ei_device *ei_local = netdev_priv(dev);
853 unsigned long flags;
854
855 /* If the card is stopped, just return the present stats. */
856 if (!netif_running(dev))
857 return &dev->stats;
858
859 spin_lock_irqsave(&ei_local->page_lock, flags);
860 /* Read the counter registers, assuming we are in page 0. */
861 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
862 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
863 dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
864 spin_unlock_irqrestore(&ei_local->page_lock, flags);
865
866 return &dev->stats;
867 }
868
869 /*
870 * Form the 64 bit 8390 multicast table from the linked list of addresses
871 * associated with this dev structure.
872 */
873
874 static inline void make_mc_bits(u8 *bits, struct net_device *dev)
875 {
876 struct netdev_hw_addr *ha;
877
878 netdev_for_each_mc_addr(ha, dev) {
879 u32 crc = ether_crc(ETH_ALEN, ha->addr);
880 /*
881 * The 8390 uses the 6 most significant bits of the
882 * CRC to index the multicast table.
883 */
884 bits[crc>>29] |= (1<<((crc>>26)&7));
885 }
886 }
887
888 /**
889 * do_set_multicast_list - set/clear multicast filter
890 * @dev: net device for which multicast filter is adjusted
891 *
892 * Set or clear the multicast filter for this adaptor. May be called
893 * from a BH in 2.1.x. Must be called with lock held.
894 */
895
896 static void do_set_multicast_list(struct net_device *dev)
897 {
898 unsigned long e8390_base = dev->base_addr;
899 int i;
900 struct ei_device *ei_local = netdev_priv(dev);
901
902 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
903 memset(ei_local->mcfilter, 0, 8);
904 if (!netdev_mc_empty(dev))
905 make_mc_bits(ei_local->mcfilter, dev);
906 } else
907 memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
908
909 /*
910 * DP8390 manuals don't specify any magic sequence for altering
911 * the multicast regs on an already running card. To be safe, we
912 * ensure multicast mode is off prior to loading up the new hash
913 * table. If this proves to be not enough, we can always resort
914 * to stopping the NIC, loading the table and then restarting.
915 *
916 * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
917 * Elite16) appear to be write-only. The NS 8390 data sheet lists
918 * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
919 * Ultra32 EISA) appears to have this bug fixed.
920 */
921
922 if (netif_running(dev))
923 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
924 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
925 for (i = 0; i < 8; i++) {
926 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
927 #ifndef BUG_83C690
928 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
929 netdev_err(dev, "Multicast filter read/write mismap %d\n",
930 i);
931 #endif
932 }
933 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
934
935 if (dev->flags&IFF_PROMISC)
936 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
937 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
938 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
939 else
940 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
941 }
942
943 /*
944 * Called without lock held. This is invoked from user context and may
945 * be parallel to just about everything else. Its also fairly quick and
946 * not called too often. Must protect against both bh and irq users
947 */
948
949 static void __ei_set_multicast_list(struct net_device *dev)
950 {
951 unsigned long flags;
952 struct ei_device *ei_local = netdev_priv(dev);
953
954 spin_lock_irqsave(&ei_local->page_lock, flags);
955 do_set_multicast_list(dev);
956 spin_unlock_irqrestore(&ei_local->page_lock, flags);
957 }
958
959 /**
960 * ethdev_setup - init rest of 8390 device struct
961 * @dev: network device structure to init
962 *
963 * Initialize the rest of the 8390 device structure. Do NOT __init
964 * this, as it is used by 8390 based modular drivers too.
965 */
966
967 static void ethdev_setup(struct net_device *dev)
968 {
969 struct ei_device *ei_local = netdev_priv(dev);
970 if (ei_debug > 1)
971 printk(version);
972
973 ether_setup(dev);
974
975 spin_lock_init(&ei_local->page_lock);
976 }
977
978 /**
979 * alloc_ei_netdev - alloc_etherdev counterpart for 8390
980 * @size: extra bytes to allocate
981 *
982 * Allocate 8390-specific net_device.
983 */
984 static struct net_device *____alloc_ei_netdev(int size)
985 {
986 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
987 ethdev_setup);
988 }
989
990
991
992
993 /* This page of functions should be 8390 generic */
994 /* Follow National Semi's recommendations for initializing the "NIC". */
995
996 /**
997 * NS8390_init - initialize 8390 hardware
998 * @dev: network device to initialize
999 * @startp: boolean. non-zero value to initiate chip processing
1000 *
1001 * Must be called with lock held.
1002 */
1003
1004 static void __NS8390_init(struct net_device *dev, int startp)
1005 {
1006 unsigned long e8390_base = dev->base_addr;
1007 struct ei_device *ei_local = netdev_priv(dev);
1008 int i;
1009 int endcfg = ei_local->word16
1010 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1011 : 0x48;
1012
1013 if (sizeof(struct e8390_pkt_hdr) != 4)
1014 panic("8390.c: header struct mispacked\n");
1015 /* Follow National Semi's recommendations for initing the DP83902. */
1016 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1017 ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
1018 /* Clear the remote byte count registers. */
1019 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1020 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1021 /* Set to monitor and loopback mode -- this is vital!. */
1022 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1023 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1024 /* Set the transmit page and receive ring. */
1025 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1026 ei_local->tx1 = ei_local->tx2 = 0;
1027 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1028 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
1029 ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
1030 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1031 /* Clear the pending interrupts and mask. */
1032 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1033 ei_outb_p(0x00, e8390_base + EN0_IMR);
1034
1035 /* Copy the station address into the DS8390 registers. */
1036
1037 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1038 for (i = 0; i < 6; i++) {
1039 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1040 if (ei_debug > 1 &&
1041 ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1042 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1043 }
1044
1045 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1046 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1047
1048 ei_local->tx1 = ei_local->tx2 = 0;
1049 ei_local->txing = 0;
1050
1051 if (startp) {
1052 ei_outb_p(0xff, e8390_base + EN0_ISR);
1053 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1054 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1055 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1056 /* 3c503 TechMan says rxconfig only after the NIC is started. */
1057 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
1058 do_set_multicast_list(dev); /* (re)load the mcast table */
1059 }
1060 }
1061
1062 /* Trigger a transmit start, assuming the length is valid.
1063 Always called with the page lock held */
1064
1065 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1066 int start_page)
1067 {
1068 unsigned long e8390_base = dev->base_addr;
1069 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1070
1071 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1072
1073 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1074 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1075 return;
1076 }
1077 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1078 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1079 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1080 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1081 }
This page took 0.05547 seconds and 6 git commands to generate.