lib8390: Use pr_<level> and netdev_<level>
[deliverable/linux.git] / drivers / net / lib8390.c
1 /* 8390.c: A general NS8390 ethernet driver core for linux. */
2 /*
3 Written 1992-94 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
14 Annapolis MD 21403
15
16
17 This is the chip-specific code for many 8390-based ethernet adaptors.
18 This is not a complete driver, it must be combined with board-specific
19 code such as ne.c, wd.c, 3c503.c, etc.
20
21 Seeing how at least eight drivers use this code, (not counting the
22 PCMCIA ones either) it is easy to break some card by what seems like
23 a simple innocent change. Please contact me or Donald if you think
24 you have found something that needs changing. -- PG
25
26
27 Changelog:
28
29 Paul Gortmaker : remove set_bit lock, other cleanups.
30 Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
31 ei_block_input() for eth_io_copy_and_sum().
32 Paul Gortmaker : exchange static int ei_pingpong for a #define,
33 also add better Tx error handling.
34 Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
35 Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
36 Paul Gortmaker : tweak ANK's above multicast changes a bit.
37 Paul Gortmaker : update packet statistics for v2.1.x
38 Alan Cox : support arbitrary stupid port mappings on the
39 68K Macintosh. Support >16bit I/O spaces
40 Paul Gortmaker : add kmod support for auto-loading of the 8390
41 module by all drivers that require it.
42 Alan Cox : Spinlocking work, added 'BUG_83C690'
43 Paul Gortmaker : Separate out Tx timeout code from Tx path.
44 Paul Gortmaker : Remove old unused single Tx buffer code.
45 Hayato Fujiwara : Add m32r support.
46 Paul Gortmaker : use skb_padto() instead of stack scratch area
47
48 Sources:
49 The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
50
51 */
52
53 #include <linux/module.h>
54 #include <linux/kernel.h>
55 #include <linux/jiffies.h>
56 #include <linux/fs.h>
57 #include <linux/types.h>
58 #include <linux/string.h>
59 #include <linux/bitops.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
62 #include <asm/io.h>
63 #include <asm/irq.h>
64 #include <linux/delay.h>
65 #include <linux/errno.h>
66 #include <linux/fcntl.h>
67 #include <linux/in.h>
68 #include <linux/interrupt.h>
69 #include <linux/init.h>
70 #include <linux/crc32.h>
71
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74
75 #define NS8390_CORE
76 #include "8390.h"
77
78 #define BUG_83C690
79
80 /* These are the operational function interfaces to board-specific
81 routines.
82 void reset_8390(struct net_device *dev)
83 Resets the board associated with DEV, including a hardware reset of
84 the 8390. This is only called when there is a transmit timeout, and
85 it is always followed by 8390_init().
86 void block_output(struct net_device *dev, int count, const unsigned char *buf,
87 int start_page)
88 Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
89 "page" value uses the 8390's 256-byte pages.
90 void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
91 Read the 4 byte, page aligned 8390 header. *If* there is a
92 subsequent read, it will be of the rest of the packet.
93 void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
94 Read COUNT bytes from the packet buffer into the skb data area. Start
95 reading from RING_OFFSET, the address as the 8390 sees it. This will always
96 follow the read of the 8390 header.
97 */
98 #define ei_reset_8390 (ei_local->reset_8390)
99 #define ei_block_output (ei_local->block_output)
100 #define ei_block_input (ei_local->block_input)
101 #define ei_get_8390_hdr (ei_local->get_8390_hdr)
102
103 /* use 0 for production, 1 for verification, >2 for debug */
104 #ifndef ei_debug
105 int ei_debug = 1;
106 #endif
107
108 /* Index to functions. */
109 static void ei_tx_intr(struct net_device *dev);
110 static void ei_tx_err(struct net_device *dev);
111 void ei_tx_timeout(struct net_device *dev);
112 static void ei_receive(struct net_device *dev);
113 static void ei_rx_overrun(struct net_device *dev);
114
115 /* Routines generic to NS8390-based boards. */
116 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
117 int start_page);
118 static void do_set_multicast_list(struct net_device *dev);
119 static void __NS8390_init(struct net_device *dev, int startp);
120
121 /*
122 * SMP and the 8390 setup.
123 *
124 * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
125 * a page register that controls bank and packet buffer access. We guard
126 * this with ei_local->page_lock. Nobody should assume or set the page other
127 * than zero when the lock is not held. Lock holders must restore page 0
128 * before unlocking. Even pure readers must take the lock to protect in
129 * page 0.
130 *
131 * To make life difficult the chip can also be very slow. We therefore can't
132 * just use spinlocks. For the longer lockups we disable the irq the device
133 * sits on and hold the lock. We must hold the lock because there is a dual
134 * processor case other than interrupts (get stats/set multicast list in
135 * parallel with each other and transmit).
136 *
137 * Note: in theory we can just disable the irq on the card _but_ there is
138 * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
139 * enter lock, take the queued irq. So we waddle instead of flying.
140 *
141 * Finally by special arrangement for the purpose of being generally
142 * annoying the transmit function is called bh atomic. That places
143 * restrictions on the user context callers as disable_irq won't save
144 * them.
145 *
146 * Additional explanation of problems with locking by Alan Cox:
147 *
148 * "The author (me) didn't use spin_lock_irqsave because the slowness of the
149 * card means that approach caused horrible problems like losing serial data
150 * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
151 * chips with FPGA front ends.
152 *
153 * Ok the logic behind the 8390 is very simple:
154 *
155 * Things to know
156 * - IRQ delivery is asynchronous to the PCI bus
157 * - Blocking the local CPU IRQ via spin locks was too slow
158 * - The chip has register windows needing locking work
159 *
160 * So the path was once (I say once as people appear to have changed it
161 * in the mean time and it now looks rather bogus if the changes to use
162 * disable_irq_nosync_irqsave are disabling the local IRQ)
163 *
164 *
165 * Take the page lock
166 * Mask the IRQ on chip
167 * Disable the IRQ (but not mask locally- someone seems to have
168 * broken this with the lock validator stuff)
169 * [This must be _nosync as the page lock may otherwise
170 * deadlock us]
171 * Drop the page lock and turn IRQs back on
172 *
173 * At this point an existing IRQ may still be running but we can't
174 * get a new one
175 *
176 * Take the lock (so we know the IRQ has terminated) but don't mask
177 * the IRQs on the processor
178 * Set irqlock [for debug]
179 *
180 * Transmit (slow as ****)
181 *
182 * re-enable the IRQ
183 *
184 *
185 * We have to use disable_irq because otherwise you will get delayed
186 * interrupts on the APIC bus deadlocking the transmit path.
187 *
188 * Quite hairy but the chip simply wasn't designed for SMP and you can't
189 * even ACK an interrupt without risking corrupting other parallel
190 * activities on the chip." [lkml, 25 Jul 2007]
191 */
192
193
194
195 /**
196 * ei_open - Open/initialize the board.
197 * @dev: network device to initialize
198 *
199 * This routine goes all-out, setting everything
200 * up anew at each open, even though many of these registers should only
201 * need to be set once at boot.
202 */
203 static int __ei_open(struct net_device *dev)
204 {
205 unsigned long flags;
206 struct ei_device *ei_local = netdev_priv(dev);
207
208 if (dev->watchdog_timeo <= 0)
209 dev->watchdog_timeo = TX_TIMEOUT;
210
211 /*
212 * Grab the page lock so we own the register set, then call
213 * the init function.
214 */
215
216 spin_lock_irqsave(&ei_local->page_lock, flags);
217 __NS8390_init(dev, 1);
218 /* Set the flag before we drop the lock, That way the IRQ arrives
219 after its set and we get no silly warnings */
220 netif_start_queue(dev);
221 spin_unlock_irqrestore(&ei_local->page_lock, flags);
222 ei_local->irqlock = 0;
223 return 0;
224 }
225
226 /**
227 * ei_close - shut down network device
228 * @dev: network device to close
229 *
230 * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
231 */
232 static int __ei_close(struct net_device *dev)
233 {
234 struct ei_device *ei_local = netdev_priv(dev);
235 unsigned long flags;
236
237 /*
238 * Hold the page lock during close
239 */
240
241 spin_lock_irqsave(&ei_local->page_lock, flags);
242 __NS8390_init(dev, 0);
243 spin_unlock_irqrestore(&ei_local->page_lock, flags);
244 netif_stop_queue(dev);
245 return 0;
246 }
247
248 /**
249 * ei_tx_timeout - handle transmit time out condition
250 * @dev: network device which has apparently fallen asleep
251 *
252 * Called by kernel when device never acknowledges a transmit has
253 * completed (or failed) - i.e. never posted a Tx related interrupt.
254 */
255
256 static void __ei_tx_timeout(struct net_device *dev)
257 {
258 unsigned long e8390_base = dev->base_addr;
259 struct ei_device *ei_local = netdev_priv(dev);
260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 unsigned long flags;
262
263 dev->stats.tx_errors++;
264
265 spin_lock_irqsave(&ei_local->page_lock, flags);
266 txsr = ei_inb(e8390_base+EN0_TSR);
267 isr = ei_inb(e8390_base+EN0_ISR);
268 spin_unlock_irqrestore(&ei_local->page_lock, flags);
269
270 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
271 (txsr & ENTSR_ABT) ? "excess collisions." :
272 (isr) ? "lost interrupt?" : "cable problem?",
273 txsr, isr, tickssofar);
274
275 if (!isr && !dev->stats.tx_packets)
276 {
277 /* The 8390 probably hasn't gotten on the cable yet. */
278 ei_local->interface_num ^= 1; /* Try a different xcvr. */
279 }
280
281 /* Ugly but a reset can be slow, yet must be protected */
282
283 disable_irq_nosync_lockdep(dev->irq);
284 spin_lock(&ei_local->page_lock);
285
286 /* Try to restart the card. Perhaps the user has fixed something. */
287 ei_reset_8390(dev);
288 __NS8390_init(dev, 1);
289
290 spin_unlock(&ei_local->page_lock);
291 enable_irq_lockdep(dev->irq);
292 netif_wake_queue(dev);
293 }
294
295 /**
296 * ei_start_xmit - begin packet transmission
297 * @skb: packet to be sent
298 * @dev: network device to which packet is sent
299 *
300 * Sends a packet to an 8390 network device.
301 */
302
303 static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
304 struct net_device *dev)
305 {
306 unsigned long e8390_base = dev->base_addr;
307 struct ei_device *ei_local = netdev_priv(dev);
308 int send_length = skb->len, output_page;
309 unsigned long flags;
310 char buf[ETH_ZLEN];
311 char *data = skb->data;
312
313 if (skb->len < ETH_ZLEN) {
314 memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
315 memcpy(buf, data, skb->len);
316 send_length = ETH_ZLEN;
317 data = buf;
318 }
319
320 /* Mask interrupts from the ethercard.
321 SMP: We have to grab the lock here otherwise the IRQ handler
322 on another CPU can flip window and race the IRQ mask set. We end
323 up trashing the mcast filter not disabling irqs if we don't lock */
324
325 spin_lock_irqsave(&ei_local->page_lock, flags);
326 ei_outb_p(0x00, e8390_base + EN0_IMR);
327 spin_unlock_irqrestore(&ei_local->page_lock, flags);
328
329
330 /*
331 * Slow phase with lock held.
332 */
333
334 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
335
336 spin_lock(&ei_local->page_lock);
337
338 ei_local->irqlock = 1;
339
340 /*
341 * We have two Tx slots available for use. Find the first free
342 * slot, and then perform some sanity checks. With two Tx bufs,
343 * you get very close to transmitting back-to-back packets. With
344 * only one Tx buf, the transmitter sits idle while you reload the
345 * card, leaving a substantial gap between each transmitted packet.
346 */
347
348 if (ei_local->tx1 == 0)
349 {
350 output_page = ei_local->tx_start_page;
351 ei_local->tx1 = send_length;
352 if (ei_debug && ei_local->tx2 > 0)
353 netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
354 ei_local->tx2, ei_local->lasttx, ei_local->txing);
355 }
356 else if (ei_local->tx2 == 0)
357 {
358 output_page = ei_local->tx_start_page + TX_PAGES/2;
359 ei_local->tx2 = send_length;
360 if (ei_debug && ei_local->tx1 > 0)
361 netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
362 ei_local->tx1, ei_local->lasttx, ei_local->txing);
363 }
364 else
365 { /* We should never get here. */
366 if (ei_debug)
367 netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
368 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
369 ei_local->irqlock = 0;
370 netif_stop_queue(dev);
371 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
372 spin_unlock(&ei_local->page_lock);
373 enable_irq_lockdep_irqrestore(dev->irq, &flags);
374 dev->stats.tx_errors++;
375 return NETDEV_TX_BUSY;
376 }
377
378 /*
379 * Okay, now upload the packet and trigger a send if the transmitter
380 * isn't already sending. If it is busy, the interrupt handler will
381 * trigger the send later, upon receiving a Tx done interrupt.
382 */
383
384 ei_block_output(dev, send_length, data, output_page);
385
386 if (! ei_local->txing)
387 {
388 ei_local->txing = 1;
389 NS8390_trigger_send(dev, send_length, output_page);
390 if (output_page == ei_local->tx_start_page)
391 {
392 ei_local->tx1 = -1;
393 ei_local->lasttx = -1;
394 }
395 else
396 {
397 ei_local->tx2 = -1;
398 ei_local->lasttx = -2;
399 }
400 }
401 else ei_local->txqueue++;
402
403 if (ei_local->tx1 && ei_local->tx2)
404 netif_stop_queue(dev);
405 else
406 netif_start_queue(dev);
407
408 /* Turn 8390 interrupts back on. */
409 ei_local->irqlock = 0;
410 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
411
412 spin_unlock(&ei_local->page_lock);
413 enable_irq_lockdep_irqrestore(dev->irq, &flags);
414 skb_tx_timestamp(skb);
415 dev_kfree_skb (skb);
416 dev->stats.tx_bytes += send_length;
417
418 return NETDEV_TX_OK;
419 }
420
421 /**
422 * ei_interrupt - handle the interrupts from an 8390
423 * @irq: interrupt number
424 * @dev_id: a pointer to the net_device
425 *
426 * Handle the ether interface interrupts. We pull packets from
427 * the 8390 via the card specific functions and fire them at the networking
428 * stack. We also handle transmit completions and wake the transmit path if
429 * necessary. We also update the counters and do other housekeeping as
430 * needed.
431 */
432
433 static irqreturn_t __ei_interrupt(int irq, void *dev_id)
434 {
435 struct net_device *dev = dev_id;
436 unsigned long e8390_base = dev->base_addr;
437 int interrupts, nr_serviced = 0;
438 struct ei_device *ei_local = netdev_priv(dev);
439
440 /*
441 * Protect the irq test too.
442 */
443
444 spin_lock(&ei_local->page_lock);
445
446 if (ei_local->irqlock)
447 {
448 /*
449 * This might just be an interrupt for a PCI device sharing
450 * this line
451 */
452 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
453 ei_inb_p(e8390_base + EN0_ISR),
454 ei_inb_p(e8390_base + EN0_IMR));
455 spin_unlock(&ei_local->page_lock);
456 return IRQ_NONE;
457 }
458
459 /* Change to page 0 and read the intr status reg. */
460 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
461 if (ei_debug > 3)
462 netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
463 ei_inb_p(e8390_base + EN0_ISR));
464
465 /* !!Assumption!! -- we stay in page 0. Don't break this. */
466 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
467 ++nr_serviced < MAX_SERVICE)
468 {
469 if (!netif_running(dev)) {
470 netdev_warn(dev, "interrupt from stopped card\n");
471 /* rmk - acknowledge the interrupts */
472 ei_outb_p(interrupts, e8390_base + EN0_ISR);
473 interrupts = 0;
474 break;
475 }
476 if (interrupts & ENISR_OVER)
477 ei_rx_overrun(dev);
478 else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
479 {
480 /* Got a good (?) packet. */
481 ei_receive(dev);
482 }
483 /* Push the next to-transmit packet through. */
484 if (interrupts & ENISR_TX)
485 ei_tx_intr(dev);
486 else if (interrupts & ENISR_TX_ERR)
487 ei_tx_err(dev);
488
489 if (interrupts & ENISR_COUNTERS)
490 {
491 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
492 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
493 dev->stats.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2);
494 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
495 }
496
497 /* Ignore any RDC interrupts that make it back to here. */
498 if (interrupts & ENISR_RDC)
499 {
500 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
501 }
502
503 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
504 }
505
506 if (interrupts && ei_debug)
507 {
508 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
509 if (nr_serviced >= MAX_SERVICE)
510 {
511 /* 0xFF is valid for a card removal */
512 if(interrupts!=0xFF)
513 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
514 interrupts);
515 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
516 } else {
517 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
518 ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
519 }
520 }
521 spin_unlock(&ei_local->page_lock);
522 return IRQ_RETVAL(nr_serviced > 0);
523 }
524
525 #ifdef CONFIG_NET_POLL_CONTROLLER
526 static void __ei_poll(struct net_device *dev)
527 {
528 disable_irq(dev->irq);
529 __ei_interrupt(dev->irq, dev);
530 enable_irq(dev->irq);
531 }
532 #endif
533
534 /**
535 * ei_tx_err - handle transmitter error
536 * @dev: network device which threw the exception
537 *
538 * A transmitter error has happened. Most likely excess collisions (which
539 * is a fairly normal condition). If the error is one where the Tx will
540 * have been aborted, we try and send another one right away, instead of
541 * letting the failed packet sit and collect dust in the Tx buffer. This
542 * is a much better solution as it avoids kernel based Tx timeouts, and
543 * an unnecessary card reset.
544 *
545 * Called with lock held.
546 */
547
548 static void ei_tx_err(struct net_device *dev)
549 {
550 unsigned long e8390_base = dev->base_addr;
551 /* ei_local is used on some platforms via the EI_SHIFT macro */
552 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
553 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
554 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
555
556 #ifdef VERBOSE_ERROR_DUMP
557 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
558 if (txsr & ENTSR_ABT)
559 pr_cont(" excess-collisions ");
560 if (txsr & ENTSR_ND)
561 pr_cont(" non-deferral ");
562 if (txsr & ENTSR_CRS)
563 pr_cont(" lost-carrier ");
564 if (txsr & ENTSR_FU)
565 pr_cont(" FIFO-underrun ");
566 if (txsr & ENTSR_CDH)
567 pr_cont(" lost-heartbeat ");
568 pr_cont("\n");
569 #endif
570
571 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
572
573 if (tx_was_aborted)
574 ei_tx_intr(dev);
575 else
576 {
577 dev->stats.tx_errors++;
578 if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++;
579 if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++;
580 if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;
581 }
582 }
583
584 /**
585 * ei_tx_intr - transmit interrupt handler
586 * @dev: network device for which tx intr is handled
587 *
588 * We have finished a transmit: check for errors and then trigger the next
589 * packet to be sent. Called with lock held.
590 */
591
592 static void ei_tx_intr(struct net_device *dev)
593 {
594 unsigned long e8390_base = dev->base_addr;
595 struct ei_device *ei_local = netdev_priv(dev);
596 int status = ei_inb(e8390_base + EN0_TSR);
597
598 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
599
600 /*
601 * There are two Tx buffers, see which one finished, and trigger
602 * the send of another one if it exists.
603 */
604 ei_local->txqueue--;
605
606 if (ei_local->tx1 < 0)
607 {
608 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
609 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
610 ei_local->name, ei_local->lasttx, ei_local->tx1);
611 ei_local->tx1 = 0;
612 if (ei_local->tx2 > 0)
613 {
614 ei_local->txing = 1;
615 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
616 dev->trans_start = jiffies;
617 ei_local->tx2 = -1,
618 ei_local->lasttx = 2;
619 }
620 else ei_local->lasttx = 20, ei_local->txing = 0;
621 }
622 else if (ei_local->tx2 < 0)
623 {
624 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
625 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
626 ei_local->name, ei_local->lasttx, ei_local->tx2);
627 ei_local->tx2 = 0;
628 if (ei_local->tx1 > 0)
629 {
630 ei_local->txing = 1;
631 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
632 dev->trans_start = jiffies;
633 ei_local->tx1 = -1;
634 ei_local->lasttx = 1;
635 }
636 else
637 ei_local->lasttx = 10, ei_local->txing = 0;
638 }
639 // else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
640 // dev->name, ei_local->lasttx);
641
642 /* Minimize Tx latency: update the statistics after we restart TXing. */
643 if (status & ENTSR_COL)
644 dev->stats.collisions++;
645 if (status & ENTSR_PTX)
646 dev->stats.tx_packets++;
647 else
648 {
649 dev->stats.tx_errors++;
650 if (status & ENTSR_ABT)
651 {
652 dev->stats.tx_aborted_errors++;
653 dev->stats.collisions += 16;
654 }
655 if (status & ENTSR_CRS)
656 dev->stats.tx_carrier_errors++;
657 if (status & ENTSR_FU)
658 dev->stats.tx_fifo_errors++;
659 if (status & ENTSR_CDH)
660 dev->stats.tx_heartbeat_errors++;
661 if (status & ENTSR_OWC)
662 dev->stats.tx_window_errors++;
663 }
664 netif_wake_queue(dev);
665 }
666
667 /**
668 * ei_receive - receive some packets
669 * @dev: network device with which receive will be run
670 *
671 * We have a good packet(s), get it/them out of the buffers.
672 * Called with lock held.
673 */
674
675 static void ei_receive(struct net_device *dev)
676 {
677 unsigned long e8390_base = dev->base_addr;
678 struct ei_device *ei_local = netdev_priv(dev);
679 unsigned char rxing_page, this_frame, next_frame;
680 unsigned short current_offset;
681 int rx_pkt_count = 0;
682 struct e8390_pkt_hdr rx_frame;
683 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
684
685 while (++rx_pkt_count < 10)
686 {
687 int pkt_len, pkt_stat;
688
689 /* Get the rx page (incoming packet pointer). */
690 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
691 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
692 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
693
694 /* Remove one frame from the ring. Boundary is always a page behind. */
695 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
696 if (this_frame >= ei_local->stop_page)
697 this_frame = ei_local->rx_start_page;
698
699 /* Someday we'll omit the previous, iff we never get this message.
700 (There is at least one clone claimed to have a problem.)
701
702 Keep quiet if it looks like a card removal. One problem here
703 is that some clones crash in roughly the same way.
704 */
705 if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
706 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
707 this_frame, ei_local->current_page);
708
709 if (this_frame == rxing_page) /* Read all the frames? */
710 break; /* Done for now */
711
712 current_offset = this_frame << 8;
713 ei_get_8390_hdr(dev, &rx_frame, this_frame);
714
715 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
716 pkt_stat = rx_frame.status;
717
718 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
719
720 /* Check for bogosity warned by 3c503 book: the status byte is never
721 written. This happened a lot during testing! This code should be
722 cleaned up someday. */
723 if (rx_frame.next != next_frame &&
724 rx_frame.next != next_frame + 1 &&
725 rx_frame.next != next_frame - num_rx_pages &&
726 rx_frame.next != next_frame + 1 - num_rx_pages) {
727 ei_local->current_page = rxing_page;
728 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
729 dev->stats.rx_errors++;
730 continue;
731 }
732
733 if (pkt_len < 60 || pkt_len > 1518)
734 {
735 if (ei_debug)
736 netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
737 rx_frame.count, rx_frame.status,
738 rx_frame.next);
739 dev->stats.rx_errors++;
740 dev->stats.rx_length_errors++;
741 }
742 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
743 {
744 struct sk_buff *skb;
745
746 skb = dev_alloc_skb(pkt_len+2);
747 if (skb == NULL)
748 {
749 if (ei_debug > 1)
750 netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
751 pkt_len);
752 dev->stats.rx_dropped++;
753 break;
754 }
755 else
756 {
757 skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
758 skb_put(skb, pkt_len); /* Make room */
759 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
760 skb->protocol=eth_type_trans(skb,dev);
761 if (!skb_defer_rx_timestamp(skb))
762 netif_rx(skb);
763 dev->stats.rx_packets++;
764 dev->stats.rx_bytes += pkt_len;
765 if (pkt_stat & ENRSR_PHY)
766 dev->stats.multicast++;
767 }
768 }
769 else
770 {
771 if (ei_debug)
772 netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
773 rx_frame.status, rx_frame.next,
774 rx_frame.count);
775 dev->stats.rx_errors++;
776 /* NB: The NIC counts CRC, frame and missed errors. */
777 if (pkt_stat & ENRSR_FO)
778 dev->stats.rx_fifo_errors++;
779 }
780 next_frame = rx_frame.next;
781
782 /* This _should_ never happen: it's here for avoiding bad clones. */
783 if (next_frame >= ei_local->stop_page) {
784 netdev_notice(dev, "next frame inconsistency, %#2x\n",
785 next_frame);
786 next_frame = ei_local->rx_start_page;
787 }
788 ei_local->current_page = next_frame;
789 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
790 }
791
792 /* We used to also ack ENISR_OVER here, but that would sometimes mask
793 a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
794 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
795 }
796
797 /**
798 * ei_rx_overrun - handle receiver overrun
799 * @dev: network device which threw exception
800 *
801 * We have a receiver overrun: we have to kick the 8390 to get it started
802 * again. Problem is that you have to kick it exactly as NS prescribes in
803 * the updated datasheets, or "the NIC may act in an unpredictable manner."
804 * This includes causing "the NIC to defer indefinitely when it is stopped
805 * on a busy network." Ugh.
806 * Called with lock held. Don't call this with the interrupts off or your
807 * computer will hate you - it takes 10ms or so.
808 */
809
810 static void ei_rx_overrun(struct net_device *dev)
811 {
812 unsigned long e8390_base = dev->base_addr;
813 unsigned char was_txing, must_resend = 0;
814 /* ei_local is used on some platforms via the EI_SHIFT macro */
815 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
816
817 /*
818 * Record whether a Tx was in progress and then issue the
819 * stop command.
820 */
821 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
822 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
823
824 if (ei_debug > 1)
825 netdev_dbg(dev, "Receiver overrun\n");
826 dev->stats.rx_over_errors++;
827
828 /*
829 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
830 * Early datasheets said to poll the reset bit, but now they say that
831 * it "is not a reliable indicator and subsequently should be ignored."
832 * We wait at least 10ms.
833 */
834
835 mdelay(10);
836
837 /*
838 * Reset RBCR[01] back to zero as per magic incantation.
839 */
840 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
841 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
842
843 /*
844 * See if any Tx was interrupted or not. According to NS, this
845 * step is vital, and skipping it will cause no end of havoc.
846 */
847
848 if (was_txing)
849 {
850 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
851 if (!tx_completed)
852 must_resend = 1;
853 }
854
855 /*
856 * Have to enter loopback mode and then restart the NIC before
857 * you are allowed to slurp packets up off the ring.
858 */
859 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
860 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
861
862 /*
863 * Clear the Rx ring of all the debris, and ack the interrupt.
864 */
865 ei_receive(dev);
866 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
867
868 /*
869 * Leave loopback mode, and resend any packet that got stopped.
870 */
871 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
872 if (must_resend)
873 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
874 }
875
876 /*
877 * Collect the stats. This is called unlocked and from several contexts.
878 */
879
880 static struct net_device_stats *__ei_get_stats(struct net_device *dev)
881 {
882 unsigned long ioaddr = dev->base_addr;
883 struct ei_device *ei_local = netdev_priv(dev);
884 unsigned long flags;
885
886 /* If the card is stopped, just return the present stats. */
887 if (!netif_running(dev))
888 return &dev->stats;
889
890 spin_lock_irqsave(&ei_local->page_lock,flags);
891 /* Read the counter registers, assuming we are in page 0. */
892 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
893 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
894 dev->stats.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2);
895 spin_unlock_irqrestore(&ei_local->page_lock, flags);
896
897 return &dev->stats;
898 }
899
900 /*
901 * Form the 64 bit 8390 multicast table from the linked list of addresses
902 * associated with this dev structure.
903 */
904
905 static inline void make_mc_bits(u8 *bits, struct net_device *dev)
906 {
907 struct netdev_hw_addr *ha;
908
909 netdev_for_each_mc_addr(ha, dev) {
910 u32 crc = ether_crc(ETH_ALEN, ha->addr);
911 /*
912 * The 8390 uses the 6 most significant bits of the
913 * CRC to index the multicast table.
914 */
915 bits[crc>>29] |= (1<<((crc>>26)&7));
916 }
917 }
918
919 /**
920 * do_set_multicast_list - set/clear multicast filter
921 * @dev: net device for which multicast filter is adjusted
922 *
923 * Set or clear the multicast filter for this adaptor. May be called
924 * from a BH in 2.1.x. Must be called with lock held.
925 */
926
927 static void do_set_multicast_list(struct net_device *dev)
928 {
929 unsigned long e8390_base = dev->base_addr;
930 int i;
931 struct ei_device *ei_local = netdev_priv(dev);
932
933 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
934 {
935 memset(ei_local->mcfilter, 0, 8);
936 if (!netdev_mc_empty(dev))
937 make_mc_bits(ei_local->mcfilter, dev);
938 }
939 else
940 memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
941
942 /*
943 * DP8390 manuals don't specify any magic sequence for altering
944 * the multicast regs on an already running card. To be safe, we
945 * ensure multicast mode is off prior to loading up the new hash
946 * table. If this proves to be not enough, we can always resort
947 * to stopping the NIC, loading the table and then restarting.
948 *
949 * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
950 * Elite16) appear to be write-only. The NS 8390 data sheet lists
951 * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
952 * Ultra32 EISA) appears to have this bug fixed.
953 */
954
955 if (netif_running(dev))
956 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
957 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
958 for(i = 0; i < 8; i++)
959 {
960 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
961 #ifndef BUG_83C690
962 if(ei_inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
963 netdev_err(dev, "Multicast filter read/write mismap %d\n",
964 i);
965 #endif
966 }
967 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
968
969 if(dev->flags&IFF_PROMISC)
970 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
971 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
972 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
973 else
974 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
975 }
976
977 /*
978 * Called without lock held. This is invoked from user context and may
979 * be parallel to just about everything else. Its also fairly quick and
980 * not called too often. Must protect against both bh and irq users
981 */
982
983 static void __ei_set_multicast_list(struct net_device *dev)
984 {
985 unsigned long flags;
986 struct ei_device *ei_local = netdev_priv(dev);
987
988 spin_lock_irqsave(&ei_local->page_lock, flags);
989 do_set_multicast_list(dev);
990 spin_unlock_irqrestore(&ei_local->page_lock, flags);
991 }
992
993 /**
994 * ethdev_setup - init rest of 8390 device struct
995 * @dev: network device structure to init
996 *
997 * Initialize the rest of the 8390 device structure. Do NOT __init
998 * this, as it is used by 8390 based modular drivers too.
999 */
1000
1001 static void ethdev_setup(struct net_device *dev)
1002 {
1003 struct ei_device *ei_local = netdev_priv(dev);
1004 if (ei_debug > 1)
1005 printk(version);
1006
1007 ether_setup(dev);
1008
1009 spin_lock_init(&ei_local->page_lock);
1010 }
1011
1012 /**
1013 * alloc_ei_netdev - alloc_etherdev counterpart for 8390
1014 * @size: extra bytes to allocate
1015 *
1016 * Allocate 8390-specific net_device.
1017 */
1018 static struct net_device *____alloc_ei_netdev(int size)
1019 {
1020 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
1021 ethdev_setup);
1022 }
1023
1024
1025
1026
1027 /* This page of functions should be 8390 generic */
1028 /* Follow National Semi's recommendations for initializing the "NIC". */
1029
1030 /**
1031 * NS8390_init - initialize 8390 hardware
1032 * @dev: network device to initialize
1033 * @startp: boolean. non-zero value to initiate chip processing
1034 *
1035 * Must be called with lock held.
1036 */
1037
1038 static void __NS8390_init(struct net_device *dev, int startp)
1039 {
1040 unsigned long e8390_base = dev->base_addr;
1041 struct ei_device *ei_local = netdev_priv(dev);
1042 int i;
1043 int endcfg = ei_local->word16
1044 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1045 : 0x48;
1046
1047 if(sizeof(struct e8390_pkt_hdr)!=4)
1048 panic("8390.c: header struct mispacked\n");
1049 /* Follow National Semi's recommendations for initing the DP83902. */
1050 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1051 ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
1052 /* Clear the remote byte count registers. */
1053 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1054 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1055 /* Set to monitor and loopback mode -- this is vital!. */
1056 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1057 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1058 /* Set the transmit page and receive ring. */
1059 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1060 ei_local->tx1 = ei_local->tx2 = 0;
1061 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1062 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
1063 ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
1064 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1065 /* Clear the pending interrupts and mask. */
1066 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1067 ei_outb_p(0x00, e8390_base + EN0_IMR);
1068
1069 /* Copy the station address into the DS8390 registers. */
1070
1071 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1072 for(i = 0; i < 6; i++)
1073 {
1074 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1075 if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
1076 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1077 }
1078
1079 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1080 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1081
1082 ei_local->tx1 = ei_local->tx2 = 0;
1083 ei_local->txing = 0;
1084
1085 if (startp)
1086 {
1087 ei_outb_p(0xff, e8390_base + EN0_ISR);
1088 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1089 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1090 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1091 /* 3c503 TechMan says rxconfig only after the NIC is started. */
1092 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
1093 do_set_multicast_list(dev); /* (re)load the mcast table */
1094 }
1095 }
1096
1097 /* Trigger a transmit start, assuming the length is valid.
1098 Always called with the page lock held */
1099
1100 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1101 int start_page)
1102 {
1103 unsigned long e8390_base = dev->base_addr;
1104 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1105
1106 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1107
1108 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
1109 {
1110 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1111 return;
1112 }
1113 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1114 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1115 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1116 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1117 }
This page took 0.055651 seconds and 6 git commands to generate.