Merge branch 'neigh_cleanups'
[deliverable/linux.git] / drivers / net / hamradio / dmascc.c
CommitLineData
1da177e4
LT
1/*
2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
4 *
5 * S5SCC/DMA support by Janko Koleznik S52HI
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23#include <linux/module.h>
1977f032 24#include <linux/bitops.h>
1da177e4
LT
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/if_arp.h>
28#include <linux/in.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/ioport.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/netdevice.h>
5a0e3ad6 35#include <linux/slab.h>
1da177e4
LT
36#include <linux/rtnetlink.h>
37#include <linux/sockios.h>
38#include <linux/workqueue.h>
60063497 39#include <linux/atomic.h>
1da177e4
LT
40#include <asm/dma.h>
41#include <asm/io.h>
42#include <asm/irq.h>
43#include <asm/uaccess.h>
44#include <net/ax25.h>
45#include "z8530.h"
46
47
48/* Number of buffers per channel */
49
50#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
51#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
52#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
53
54
55/* Cards supported */
56
57#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
58 0, 8, 1843200, 3686400 }
59#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
60 0, 8, 3686400, 7372800 }
61#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
62 0, 4, 6144000, 6144000 }
63#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
64 0, 8, 4915200, 9830400 }
65
66#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67
68#define TMR_0_HZ 25600 /* Frequency of timer 0 */
69
70#define TYPE_PI 0
71#define TYPE_PI2 1
72#define TYPE_TWIN 2
73#define TYPE_S5 3
74#define NUM_TYPES 4
75
76#define MAX_NUM_DEVS 32
77
78
79/* SCC chips supported */
80
81#define Z8530 0
82#define Z85C30 1
83#define Z85230 2
84
85#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
86
87
88/* I/O registers */
89
90/* 8530 registers relative to card base */
91#define SCCB_CMD 0x00
92#define SCCB_DATA 0x01
93#define SCCA_CMD 0x02
94#define SCCA_DATA 0x03
95
96/* 8253/8254 registers relative to card base */
97#define TMR_CNT0 0x00
98#define TMR_CNT1 0x01
99#define TMR_CNT2 0x02
100#define TMR_CTRL 0x03
101
102/* Additional PI/PI2 registers relative to card base */
103#define PI_DREQ_MASK 0x04
104
105/* Additional PackeTwin registers relative to card base */
106#define TWIN_INT_REG 0x08
107#define TWIN_CLR_TMR1 0x09
108#define TWIN_CLR_TMR2 0x0a
109#define TWIN_SPARE_1 0x0b
110#define TWIN_DMA_CFG 0x08
111#define TWIN_SERIAL_CFG 0x09
112#define TWIN_DMA_CLR_FF 0x0a
113#define TWIN_SPARE_2 0x0b
114
115
116/* PackeTwin I/O register values */
117
118/* INT_REG */
119#define TWIN_SCC_MSK 0x01
120#define TWIN_TMR1_MSK 0x02
121#define TWIN_TMR2_MSK 0x04
122#define TWIN_INT_MSK 0x07
123
124/* SERIAL_CFG */
125#define TWIN_DTRA_ON 0x01
126#define TWIN_DTRB_ON 0x02
127#define TWIN_EXTCLKA 0x04
128#define TWIN_EXTCLKB 0x08
129#define TWIN_LOOPA_ON 0x10
130#define TWIN_LOOPB_ON 0x20
131#define TWIN_EI 0x80
132
133/* DMA_CFG */
134#define TWIN_DMA_HDX_T1 0x08
135#define TWIN_DMA_HDX_R1 0x0a
136#define TWIN_DMA_HDX_T3 0x14
137#define TWIN_DMA_HDX_R3 0x16
138#define TWIN_DMA_FDX_T3R1 0x1b
139#define TWIN_DMA_FDX_T1R3 0x1d
140
141
142/* Status values */
143
144#define IDLE 0
145#define TX_HEAD 1
146#define TX_DATA 2
147#define TX_PAUSE 3
148#define TX_TAIL 4
149#define RTS_OFF 5
150#define WAIT 6
151#define DCD_ON 7
152#define RX_ON 8
153#define DCD_OFF 9
154
155
156/* Ioctls */
157
158#define SIOCGSCCPARAM SIOCDEVPRIVATE
159#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
160
161
162/* Data types */
163
164struct scc_param {
165 int pclk_hz; /* frequency of BRG input (don't change) */
166 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
167 int nrzi; /* 0 (nrz), 1 (nrzi) */
168 int clocks; /* see dmascc_cfg documentation */
169 int txdelay; /* [1/TMR_0_HZ] */
170 int txtimeout; /* [1/HZ] */
171 int txtail; /* [1/TMR_0_HZ] */
172 int waittime; /* [1/TMR_0_HZ] */
173 int slottime; /* [1/TMR_0_HZ] */
174 int persist; /* 1 ... 256 */
175 int dma; /* -1 (disable), 0, 1, 3 */
176 int txpause; /* [1/TMR_0_HZ] */
177 int rtsoff; /* [1/TMR_0_HZ] */
178 int dcdon; /* [1/TMR_0_HZ] */
179 int dcdoff; /* [1/TMR_0_HZ] */
180};
181
182struct scc_hardware {
183 char *name;
184 int io_region;
185 int io_delta;
186 int io_size;
187 int num_devs;
188 int scc_offset;
189 int tmr_offset;
190 int tmr_hz;
191 int pclk_hz;
192};
193
194struct scc_priv {
195 int type;
196 int chip;
197 struct net_device *dev;
198 struct scc_info *info;
13c0582d 199
1da177e4
LT
200 int channel;
201 int card_base, scc_cmd, scc_data;
202 int tmr_cnt, tmr_ctrl, tmr_mode;
203 struct scc_param param;
204 char rx_buf[NUM_RX_BUF][BUF_SIZE];
205 int rx_len[NUM_RX_BUF];
206 int rx_ptr;
207 struct work_struct rx_work;
208 int rx_head, rx_tail, rx_count;
209 int rx_over;
210 char tx_buf[NUM_TX_BUF][BUF_SIZE];
211 int tx_len[NUM_TX_BUF];
212 int tx_ptr;
213 int tx_head, tx_tail, tx_count;
214 int state;
215 unsigned long tx_start;
216 int rr0;
217 spinlock_t *register_lock; /* Per scc_info */
218 spinlock_t ring_lock;
219};
220
221struct scc_info {
222 int irq_used;
223 int twin_serial_cfg;
224 struct net_device *dev[2];
225 struct scc_priv priv[2];
226 struct scc_info *next;
227 spinlock_t register_lock; /* Per device register lock */
228};
229
230
231/* Function declarations */
232static int setup_adapter(int card_base, int type, int n) __init;
233
234static void write_scc(struct scc_priv *priv, int reg, int val);
235static void write_scc_data(struct scc_priv *priv, int val, int fast);
236static int read_scc(struct scc_priv *priv, int reg);
237static int read_scc_data(struct scc_priv *priv);
238
239static int scc_open(struct net_device *dev);
240static int scc_close(struct net_device *dev);
241static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
242static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
1da177e4
LT
243static int scc_set_mac_address(struct net_device *dev, void *sa);
244
245static inline void tx_on(struct scc_priv *priv);
246static inline void rx_on(struct scc_priv *priv);
247static inline void rx_off(struct scc_priv *priv);
248static void start_timer(struct scc_priv *priv, int t, int r15);
249static inline unsigned char random(void);
250
251static inline void z8530_isr(struct scc_info *info);
7d12e780 252static irqreturn_t scc_isr(int irq, void *dev_id);
1da177e4
LT
253static void rx_isr(struct scc_priv *priv);
254static void special_condition(struct scc_priv *priv, int rc);
7a87b6c2 255static void rx_bh(struct work_struct *);
1da177e4
LT
256static void tx_isr(struct scc_priv *priv);
257static void es_isr(struct scc_priv *priv);
258static void tm_isr(struct scc_priv *priv);
259
260
261/* Initialization variables */
262
263static int io[MAX_NUM_DEVS] __initdata = { 0, };
264
cd8d627a
RD
265/* Beware! hw[] is also used in dmascc_exit(). */
266static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
1da177e4
LT
267
268
269/* Global variables */
270
271static struct scc_info *first;
272static unsigned long rand;
273
274
275MODULE_AUTHOR("Klaus Kudielka");
276MODULE_DESCRIPTION("Driver for high-speed SCC boards");
8d3b33f6 277module_param_array(io, int, NULL, 0);
1da177e4
LT
278MODULE_LICENSE("GPL");
279
280static void __exit dmascc_exit(void)
281{
282 int i;
283 struct scc_info *info;
284
285 while (first) {
286 info = first;
287
288 /* Unregister devices */
289 for (i = 0; i < 2; i++)
290 unregister_netdev(info->dev[i]);
291
292 /* Reset board */
293 if (info->priv[0].type == TYPE_TWIN)
294 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
295 write_scc(&info->priv[0], R9, FHWRES);
296 release_region(info->dev[0]->base_addr,
297 hw[info->priv[0].type].io_size);
298
299 for (i = 0; i < 2; i++)
300 free_netdev(info->dev[i]);
301
302 /* Free memory */
303 first = info->next;
304 kfree(info);
305 }
306}
307
1da177e4
LT
308static int __init dmascc_init(void)
309{
310 int h, i, j, n;
311 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
312 t1[MAX_NUM_DEVS];
313 unsigned t_val;
314 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
315 counting[MAX_NUM_DEVS];
316
317 /* Initialize random number generator */
318 rand = jiffies;
319 /* Cards found = 0 */
320 n = 0;
321 /* Warning message */
322 if (!io[0])
323 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
324
325 /* Run autodetection for each card type */
326 for (h = 0; h < NUM_TYPES; h++) {
327
328 if (io[0]) {
329 /* User-specified I/O address regions */
330 for (i = 0; i < hw[h].num_devs; i++)
331 base[i] = 0;
332 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
333 j = (io[i] -
334 hw[h].io_region) / hw[h].io_delta;
8e95a202
JP
335 if (j >= 0 && j < hw[h].num_devs &&
336 hw[h].io_region +
1da177e4
LT
337 j * hw[h].io_delta == io[i]) {
338 base[j] = io[i];
339 }
340 }
341 } else {
342 /* Default I/O address regions */
343 for (i = 0; i < hw[h].num_devs; i++) {
344 base[i] =
345 hw[h].io_region + i * hw[h].io_delta;
346 }
347 }
348
349 /* Check valid I/O address regions */
350 for (i = 0; i < hw[h].num_devs; i++)
351 if (base[i]) {
352 if (!request_region
353 (base[i], hw[h].io_size, "dmascc"))
354 base[i] = 0;
355 else {
356 tcmd[i] =
357 base[i] + hw[h].tmr_offset +
358 TMR_CTRL;
359 t0[i] =
360 base[i] + hw[h].tmr_offset +
361 TMR_CNT0;
362 t1[i] =
363 base[i] + hw[h].tmr_offset +
364 TMR_CNT1;
365 }
366 }
367
368 /* Start timers */
369 for (i = 0; i < hw[h].num_devs; i++)
370 if (base[i]) {
371 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
372 outb(0x36, tcmd[i]);
373 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
374 t0[i]);
375 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
376 t0[i]);
377 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
378 outb(0x70, tcmd[i]);
379 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
380 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
381 start[i] = jiffies;
382 delay[i] = 0;
383 counting[i] = 1;
384 /* Timer 2: LSB+MSB, Mode 0 */
385 outb(0xb0, tcmd[i]);
386 }
387 time = jiffies;
388 /* Wait until counter registers are loaded */
389 udelay(2000000 / TMR_0_HZ);
390
391 /* Timing loop */
392 while (jiffies - time < 13) {
393 for (i = 0; i < hw[h].num_devs; i++)
394 if (base[i] && counting[i]) {
395 /* Read back Timer 1: latch; read LSB; read MSB */
396 outb(0x40, tcmd[i]);
397 t_val =
398 inb(t1[i]) + (inb(t1[i]) << 8);
399 /* Also check whether counter did wrap */
8e95a202
JP
400 if (t_val == 0 ||
401 t_val > TMR_0_HZ / HZ * 10)
1da177e4
LT
402 counting[i] = 0;
403 delay[i] = jiffies - start[i];
404 }
405 }
406
407 /* Evaluate measurements */
408 for (i = 0; i < hw[h].num_devs; i++)
409 if (base[i]) {
410 if ((delay[i] >= 9 && delay[i] <= 11) &&
411 /* Ok, we have found an adapter */
412 (setup_adapter(base[i], h, n) == 0))
413 n++;
414 else
415 release_region(base[i],
416 hw[h].io_size);
417 }
418
419 } /* NUM_TYPES */
420
421 /* If any adapter was successfully initialized, return ok */
422 if (n)
423 return 0;
424
425 /* If no adapter found, return error */
426 printk(KERN_INFO "dmascc: no adapters found\n");
427 return -EIO;
428}
429
430module_init(dmascc_init);
431module_exit(dmascc_exit);
432
e2fdbc03 433static void __init dev_setup(struct net_device *dev)
1da177e4
LT
434{
435 dev->type = ARPHRD_AX25;
3b6a94be 436 dev->neigh_priv_len = sizeof(struct ax25_neigh_priv);
c4bc7ee2 437 dev->hard_header_len = AX25_MAX_HEADER_LEN;
1da177e4 438 dev->mtu = 1500;
c4bc7ee2 439 dev->addr_len = AX25_ADDR_LEN;
1da177e4 440 dev->tx_queue_len = 64;
15b1c0e8
RB
441 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
442 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
1da177e4
LT
443}
444
52db6250
SH
445static const struct net_device_ops scc_netdev_ops = {
446 .ndo_open = scc_open,
447 .ndo_stop = scc_close,
448 .ndo_start_xmit = scc_send_packet,
449 .ndo_do_ioctl = scc_ioctl,
3e8af307 450 .ndo_set_mac_address = scc_set_mac_address,
3b6a94be 451 .ndo_neigh_construct = ax25_neigh_construct,
52db6250
SH
452};
453
1da177e4
LT
454static int __init setup_adapter(int card_base, int type, int n)
455{
456 int i, irq, chip;
457 struct scc_info *info;
458 struct net_device *dev;
459 struct scc_priv *priv;
460 unsigned long time;
461 unsigned int irqs;
462 int tmr_base = card_base + hw[type].tmr_offset;
463 int scc_base = card_base + hw[type].scc_offset;
464 char *chipnames[] = CHIPNAMES;
465
dd00cc48
YP
466 /* Initialize what is necessary for write_scc and write_scc_data */
467 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
e68ed8f0 468 if (!info)
1da177e4 469 goto out;
1da177e4 470
c835a677 471 info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
1da177e4
LT
472 if (!info->dev[0]) {
473 printk(KERN_ERR "dmascc: "
474 "could not allocate memory for %s at %#3x\n",
475 hw[type].name, card_base);
476 goto out1;
477 }
478
c835a677 479 info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
1da177e4
LT
480 if (!info->dev[1]) {
481 printk(KERN_ERR "dmascc: "
482 "could not allocate memory for %s at %#3x\n",
483 hw[type].name, card_base);
484 goto out2;
485 }
486 spin_lock_init(&info->register_lock);
487
488 priv = &info->priv[0];
489 priv->type = type;
490 priv->card_base = card_base;
491 priv->scc_cmd = scc_base + SCCA_CMD;
492 priv->scc_data = scc_base + SCCA_DATA;
493 priv->register_lock = &info->register_lock;
494
495 /* Reset SCC */
496 write_scc(priv, R9, FHWRES | MIE | NV);
497
498 /* Determine type of chip by enabling SDLC/HDLC enhancements */
499 write_scc(priv, R15, SHDLCE);
500 if (!read_scc(priv, R15)) {
501 /* WR7' not present. This is an ordinary Z8530 SCC. */
502 chip = Z8530;
503 } else {
504 /* Put one character in TX FIFO */
505 write_scc_data(priv, 0, 0);
506 if (read_scc(priv, R0) & Tx_BUF_EMP) {
507 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
508 chip = Z85230;
509 } else {
510 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
511 chip = Z85C30;
512 }
513 }
514 write_scc(priv, R15, 0);
515
516 /* Start IRQ auto-detection */
517 irqs = probe_irq_on();
518
519 /* Enable interrupts */
520 if (type == TYPE_TWIN) {
521 outb(0, card_base + TWIN_DMA_CFG);
522 inb(card_base + TWIN_CLR_TMR1);
523 inb(card_base + TWIN_CLR_TMR2);
524 info->twin_serial_cfg = TWIN_EI;
525 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
526 } else {
527 write_scc(priv, R15, CTSIE);
528 write_scc(priv, R0, RES_EXT_INT);
529 write_scc(priv, R1, EXT_INT_ENAB);
530 }
531
532 /* Start timer */
533 outb(1, tmr_base + TMR_CNT1);
534 outb(0, tmr_base + TMR_CNT1);
535
536 /* Wait and detect IRQ */
537 time = jiffies;
538 while (jiffies - time < 2 + HZ / TMR_0_HZ);
539 irq = probe_irq_off(irqs);
540
541 /* Clear pending interrupt, disable interrupts */
542 if (type == TYPE_TWIN) {
543 inb(card_base + TWIN_CLR_TMR1);
544 } else {
545 write_scc(priv, R1, 0);
546 write_scc(priv, R15, 0);
547 write_scc(priv, R0, RES_EXT_INT);
548 }
549
550 if (irq <= 0) {
551 printk(KERN_ERR
552 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
553 hw[type].name, card_base, irq);
554 goto out3;
555 }
556
557 /* Set up data structures */
558 for (i = 0; i < 2; i++) {
559 dev = info->dev[i];
560 priv = &info->priv[i];
561 priv->type = type;
562 priv->chip = chip;
563 priv->dev = dev;
564 priv->info = info;
565 priv->channel = i;
566 spin_lock_init(&priv->ring_lock);
567 priv->register_lock = &info->register_lock;
568 priv->card_base = card_base;
569 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
570 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
571 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
572 priv->tmr_ctrl = tmr_base + TMR_CTRL;
573 priv->tmr_mode = i ? 0xb0 : 0x70;
574 priv->param.pclk_hz = hw[type].pclk_hz;
575 priv->param.brg_tc = -1;
576 priv->param.clocks = TCTRxCP | RCRTxCP;
577 priv->param.persist = 256;
578 priv->param.dma = -1;
7a87b6c2 579 INIT_WORK(&priv->rx_work, rx_bh);
f4bdd264 580 dev->ml_priv = priv;
1da177e4 581 sprintf(dev->name, "dmascc%i", 2 * n + i);
1da177e4
LT
582 dev->base_addr = card_base;
583 dev->irq = irq;
52db6250 584 dev->netdev_ops = &scc_netdev_ops;
3b04ddde 585 dev->header_ops = &ax25_header_ops;
1da177e4
LT
586 }
587 if (register_netdev(info->dev[0])) {
588 printk(KERN_ERR "dmascc: could not register %s\n",
589 info->dev[0]->name);
590 goto out3;
591 }
592 if (register_netdev(info->dev[1])) {
593 printk(KERN_ERR "dmascc: could not register %s\n",
594 info->dev[1]->name);
595 goto out4;
596 }
597
598
599 info->next = first;
600 first = info;
601 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
602 hw[type].name, chipnames[chip], card_base, irq);
603 return 0;
604
605 out4:
606 unregister_netdev(info->dev[0]);
607 out3:
608 if (info->priv[0].type == TYPE_TWIN)
609 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
610 write_scc(&info->priv[0], R9, FHWRES);
611 free_netdev(info->dev[1]);
612 out2:
613 free_netdev(info->dev[0]);
614 out1:
615 kfree(info);
616 out:
617 return -1;
618}
619
620
621/* Driver functions */
622
623static void write_scc(struct scc_priv *priv, int reg, int val)
624{
625 unsigned long flags;
626 switch (priv->type) {
627 case TYPE_S5:
628 if (reg)
629 outb(reg, priv->scc_cmd);
630 outb(val, priv->scc_cmd);
631 return;
632 case TYPE_TWIN:
633 if (reg)
634 outb_p(reg, priv->scc_cmd);
635 outb_p(val, priv->scc_cmd);
636 return;
637 default:
638 spin_lock_irqsave(priv->register_lock, flags);
639 outb_p(0, priv->card_base + PI_DREQ_MASK);
640 if (reg)
641 outb_p(reg, priv->scc_cmd);
642 outb_p(val, priv->scc_cmd);
643 outb(1, priv->card_base + PI_DREQ_MASK);
644 spin_unlock_irqrestore(priv->register_lock, flags);
645 return;
646 }
647}
648
649
650static void write_scc_data(struct scc_priv *priv, int val, int fast)
651{
652 unsigned long flags;
653 switch (priv->type) {
654 case TYPE_S5:
655 outb(val, priv->scc_data);
656 return;
657 case TYPE_TWIN:
658 outb_p(val, priv->scc_data);
659 return;
660 default:
661 if (fast)
662 outb_p(val, priv->scc_data);
663 else {
664 spin_lock_irqsave(priv->register_lock, flags);
665 outb_p(0, priv->card_base + PI_DREQ_MASK);
666 outb_p(val, priv->scc_data);
667 outb(1, priv->card_base + PI_DREQ_MASK);
668 spin_unlock_irqrestore(priv->register_lock, flags);
669 }
670 return;
671 }
672}
673
674
675static int read_scc(struct scc_priv *priv, int reg)
676{
677 int rc;
678 unsigned long flags;
679 switch (priv->type) {
680 case TYPE_S5:
681 if (reg)
682 outb(reg, priv->scc_cmd);
683 return inb(priv->scc_cmd);
684 case TYPE_TWIN:
685 if (reg)
686 outb_p(reg, priv->scc_cmd);
687 return inb_p(priv->scc_cmd);
688 default:
689 spin_lock_irqsave(priv->register_lock, flags);
690 outb_p(0, priv->card_base + PI_DREQ_MASK);
691 if (reg)
692 outb_p(reg, priv->scc_cmd);
693 rc = inb_p(priv->scc_cmd);
694 outb(1, priv->card_base + PI_DREQ_MASK);
695 spin_unlock_irqrestore(priv->register_lock, flags);
696 return rc;
697 }
698}
699
700
701static int read_scc_data(struct scc_priv *priv)
702{
703 int rc;
704 unsigned long flags;
705 switch (priv->type) {
706 case TYPE_S5:
707 return inb(priv->scc_data);
708 case TYPE_TWIN:
709 return inb_p(priv->scc_data);
710 default:
711 spin_lock_irqsave(priv->register_lock, flags);
712 outb_p(0, priv->card_base + PI_DREQ_MASK);
713 rc = inb_p(priv->scc_data);
714 outb(1, priv->card_base + PI_DREQ_MASK);
715 spin_unlock_irqrestore(priv->register_lock, flags);
716 return rc;
717 }
718}
719
720
721static int scc_open(struct net_device *dev)
722{
f4bdd264 723 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
724 struct scc_info *info = priv->info;
725 int card_base = priv->card_base;
726
727 /* Request IRQ if not already used by other channel */
728 if (!info->irq_used) {
729 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
730 return -EAGAIN;
731 }
732 }
733 info->irq_used++;
734
735 /* Request DMA if required */
736 if (priv->param.dma >= 0) {
737 if (request_dma(priv->param.dma, "dmascc")) {
738 if (--info->irq_used == 0)
739 free_irq(dev->irq, info);
740 return -EAGAIN;
741 } else {
742 unsigned long flags = claim_dma_lock();
743 clear_dma_ff(priv->param.dma);
744 release_dma_lock(flags);
745 }
746 }
747
748 /* Initialize local variables */
749 priv->rx_ptr = 0;
750 priv->rx_over = 0;
751 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
752 priv->state = IDLE;
753 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
754 priv->tx_ptr = 0;
755
756 /* Reset channel */
757 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
758 /* X1 clock, SDLC mode */
759 write_scc(priv, R4, SDLC | X1CLK);
760 /* DMA */
761 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
762 /* 8 bit RX char, RX disable */
763 write_scc(priv, R3, Rx8);
764 /* 8 bit TX char, TX disable */
765 write_scc(priv, R5, Tx8);
766 /* SDLC address field */
767 write_scc(priv, R6, 0);
768 /* SDLC flag */
769 write_scc(priv, R7, FLAG);
770 switch (priv->chip) {
771 case Z85C30:
772 /* Select WR7' */
773 write_scc(priv, R15, SHDLCE);
774 /* Auto EOM reset */
775 write_scc(priv, R7, AUTOEOM);
776 write_scc(priv, R15, 0);
777 break;
778 case Z85230:
779 /* Select WR7' */
780 write_scc(priv, R15, SHDLCE);
781 /* The following bits are set (see 2.5.2.1):
782 - Automatic EOM reset
783 - Interrupt request if RX FIFO is half full
784 This bit should be ignored in DMA mode (according to the
785 documentation), but actually isn't. The receiver doesn't work if
786 it is set. Thus, we have to clear it in DMA mode.
787 - Interrupt/DMA request if TX FIFO is completely empty
788 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
789 compatibility).
790 b) If cleared, DMA requests may follow each other very quickly,
791 filling up the TX FIFO.
792 Advantage: TX works even in case of high bus latency.
793 Disadvantage: Edge-triggered DMA request circuitry may miss
794 a request. No more data is delivered, resulting
795 in a TX FIFO underrun.
796 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
797 The PackeTwin doesn't. I don't know about the PI, but let's
798 assume it behaves like the PI2.
799 */
800 if (priv->param.dma >= 0) {
801 if (priv->type == TYPE_TWIN)
802 write_scc(priv, R7, AUTOEOM | TXFIFOE);
803 else
804 write_scc(priv, R7, AUTOEOM);
805 } else {
806 write_scc(priv, R7, AUTOEOM | RXFIFOH);
807 }
808 write_scc(priv, R15, 0);
809 break;
810 }
811 /* Preset CRC, NRZ(I) encoding */
812 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
813
814 /* Configure baud rate generator */
815 if (priv->param.brg_tc >= 0) {
816 /* Program BR generator */
817 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
818 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
819 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
820 PackeTwin, not connected on the PI2); set DPLL source to BRG */
821 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
822 /* Enable DPLL */
823 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
824 } else {
825 /* Disable BR generator */
826 write_scc(priv, R14, DTRREQ | BRSRC);
827 }
828
829 /* Configure clocks */
830 if (priv->type == TYPE_TWIN) {
831 /* Disable external TX clock receiver */
832 outb((info->twin_serial_cfg &=
833 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
834 card_base + TWIN_SERIAL_CFG);
835 }
836 write_scc(priv, R11, priv->param.clocks);
837 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
838 /* Enable external TX clock receiver */
839 outb((info->twin_serial_cfg |=
840 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
841 card_base + TWIN_SERIAL_CFG);
842 }
843
844 /* Configure PackeTwin */
845 if (priv->type == TYPE_TWIN) {
846 /* Assert DTR, enable interrupts */
847 outb((info->twin_serial_cfg |= TWIN_EI |
848 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
849 card_base + TWIN_SERIAL_CFG);
850 }
851
852 /* Read current status */
853 priv->rr0 = read_scc(priv, R0);
854 /* Enable DCD interrupt */
855 write_scc(priv, R15, DCDIE);
856
857 netif_start_queue(dev);
858
859 return 0;
860}
861
862
863static int scc_close(struct net_device *dev)
864{
f4bdd264 865 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
866 struct scc_info *info = priv->info;
867 int card_base = priv->card_base;
868
869 netif_stop_queue(dev);
870
871 if (priv->type == TYPE_TWIN) {
872 /* Drop DTR */
873 outb((info->twin_serial_cfg &=
874 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
875 card_base + TWIN_SERIAL_CFG);
876 }
877
878 /* Reset channel, free DMA and IRQ */
879 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
880 if (priv->param.dma >= 0) {
881 if (priv->type == TYPE_TWIN)
882 outb(0, card_base + TWIN_DMA_CFG);
883 free_dma(priv->param.dma);
884 }
885 if (--info->irq_used == 0)
886 free_irq(dev->irq, info);
887
888 return 0;
889}
890
891
892static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
893{
f4bdd264 894 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
895
896 switch (cmd) {
897 case SIOCGSCCPARAM:
898 if (copy_to_user
899 (ifr->ifr_data, &priv->param,
900 sizeof(struct scc_param)))
901 return -EFAULT;
902 return 0;
903 case SIOCSSCCPARAM:
904 if (!capable(CAP_NET_ADMIN))
905 return -EPERM;
906 if (netif_running(dev))
907 return -EAGAIN;
908 if (copy_from_user
909 (&priv->param, ifr->ifr_data,
910 sizeof(struct scc_param)))
911 return -EFAULT;
912 return 0;
913 default:
914 return -EINVAL;
915 }
916}
917
918
919static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
920{
f4bdd264 921 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
922 unsigned long flags;
923 int i;
924
925 /* Temporarily stop the scheduler feeding us packets */
926 netif_stop_queue(dev);
927
928 /* Transfer data to DMA buffer */
929 i = priv->tx_head;
d626f62b 930 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
1da177e4
LT
931 priv->tx_len[i] = skb->len - 1;
932
933 /* Clear interrupts while we touch our circular buffers */
934
935 spin_lock_irqsave(&priv->ring_lock, flags);
936 /* Move the ring buffer's head */
937 priv->tx_head = (i + 1) % NUM_TX_BUF;
938 priv->tx_count++;
939
940 /* If we just filled up the last buffer, leave queue stopped.
941 The higher layers must wait until we have a DMA buffer
942 to accept the data. */
943 if (priv->tx_count < NUM_TX_BUF)
944 netif_wake_queue(dev);
945
946 /* Set new TX state */
947 if (priv->state == IDLE) {
948 /* Assert RTS, start timer */
949 priv->state = TX_HEAD;
950 priv->tx_start = jiffies;
951 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
952 write_scc(priv, R15, 0);
953 start_timer(priv, priv->param.txdelay, 0);
954 }
955
956 /* Turn interrupts back on and free buffer */
957 spin_unlock_irqrestore(&priv->ring_lock, flags);
958 dev_kfree_skb(skb);
959
6ed10654 960 return NETDEV_TX_OK;
1da177e4
LT
961}
962
963
1da177e4
LT
964static int scc_set_mac_address(struct net_device *dev, void *sa)
965{
966 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
967 dev->addr_len);
968 return 0;
969}
970
971
972static inline void tx_on(struct scc_priv *priv)
973{
974 int i, n;
975 unsigned long flags;
976
977 if (priv->param.dma >= 0) {
978 n = (priv->chip == Z85230) ? 3 : 1;
979 /* Program DMA controller */
980 flags = claim_dma_lock();
981 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
982 set_dma_addr(priv->param.dma,
983 (int) priv->tx_buf[priv->tx_tail] + n);
984 set_dma_count(priv->param.dma,
985 priv->tx_len[priv->tx_tail] - n);
986 release_dma_lock(flags);
987 /* Enable TX underrun interrupt */
988 write_scc(priv, R15, TxUIE);
989 /* Configure DREQ */
990 if (priv->type == TYPE_TWIN)
991 outb((priv->param.dma ==
992 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
993 priv->card_base + TWIN_DMA_CFG);
994 else
995 write_scc(priv, R1,
996 EXT_INT_ENAB | WT_FN_RDYFN |
997 WT_RDY_ENAB);
998 /* Write first byte(s) */
999 spin_lock_irqsave(priv->register_lock, flags);
1000 for (i = 0; i < n; i++)
1001 write_scc_data(priv,
1002 priv->tx_buf[priv->tx_tail][i], 1);
1003 enable_dma(priv->param.dma);
1004 spin_unlock_irqrestore(priv->register_lock, flags);
1005 } else {
1006 write_scc(priv, R15, TxUIE);
1007 write_scc(priv, R1,
1008 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1009 tx_isr(priv);
1010 }
1011 /* Reset EOM latch if we do not have the AUTOEOM feature */
1012 if (priv->chip == Z8530)
1013 write_scc(priv, R0, RES_EOM_L);
1014}
1015
1016
1017static inline void rx_on(struct scc_priv *priv)
1018{
1019 unsigned long flags;
1020
1021 /* Clear RX FIFO */
1022 while (read_scc(priv, R0) & Rx_CH_AV)
1023 read_scc_data(priv);
1024 priv->rx_over = 0;
1025 if (priv->param.dma >= 0) {
1026 /* Program DMA controller */
1027 flags = claim_dma_lock();
1028 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1029 set_dma_addr(priv->param.dma,
1030 (int) priv->rx_buf[priv->rx_head]);
1031 set_dma_count(priv->param.dma, BUF_SIZE);
1032 release_dma_lock(flags);
1033 enable_dma(priv->param.dma);
1034 /* Configure PackeTwin DMA */
1035 if (priv->type == TYPE_TWIN) {
1036 outb((priv->param.dma ==
1037 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1038 priv->card_base + TWIN_DMA_CFG);
1039 }
1040 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1041 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1042 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1043 } else {
1044 /* Reset current frame */
1045 priv->rx_ptr = 0;
1046 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1047 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1048 WT_FN_RDYFN);
1049 }
1050 write_scc(priv, R0, ERR_RES);
1051 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1052}
1053
1054
1055static inline void rx_off(struct scc_priv *priv)
1056{
1057 /* Disable receiver */
1058 write_scc(priv, R3, Rx8);
1059 /* Disable DREQ / RX interrupt */
1060 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1061 outb(0, priv->card_base + TWIN_DMA_CFG);
1062 else
1063 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1064 /* Disable DMA */
1065 if (priv->param.dma >= 0)
1066 disable_dma(priv->param.dma);
1067}
1068
1069
1070static void start_timer(struct scc_priv *priv, int t, int r15)
1071{
1da177e4
LT
1072 outb(priv->tmr_mode, priv->tmr_ctrl);
1073 if (t == 0) {
1074 tm_isr(priv);
1075 } else if (t > 0) {
1da177e4
LT
1076 outb(t & 0xFF, priv->tmr_cnt);
1077 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1078 if (priv->type != TYPE_TWIN) {
1079 write_scc(priv, R15, r15 | CTSIE);
1080 priv->rr0 |= CTS;
1081 }
1da177e4
LT
1082 }
1083}
1084
1085
1086static inline unsigned char random(void)
1087{
1088 /* See "Numerical Recipes in C", second edition, p. 284 */
1089 rand = rand * 1664525L + 1013904223L;
1090 return (unsigned char) (rand >> 24);
1091}
1092
1093static inline void z8530_isr(struct scc_info *info)
1094{
1095 int is, i = 100;
1096
1097 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1098 if (is & CHARxIP) {
1099 rx_isr(&info->priv[0]);
1100 } else if (is & CHATxIP) {
1101 tx_isr(&info->priv[0]);
1102 } else if (is & CHAEXT) {
1103 es_isr(&info->priv[0]);
1104 } else if (is & CHBRxIP) {
1105 rx_isr(&info->priv[1]);
1106 } else if (is & CHBTxIP) {
1107 tx_isr(&info->priv[1]);
1108 } else {
1109 es_isr(&info->priv[1]);
1110 }
1111 write_scc(&info->priv[0], R0, RES_H_IUS);
1112 i++;
1113 }
1114 if (i < 0) {
1115 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1116 is);
1117 }
1118 /* Ok, no interrupts pending from this 8530. The INT line should
1119 be inactive now. */
1120}
1121
1122
7d12e780 1123static irqreturn_t scc_isr(int irq, void *dev_id)
1da177e4
LT
1124{
1125 struct scc_info *info = dev_id;
1126
1127 spin_lock(info->priv[0].register_lock);
1128 /* At this point interrupts are enabled, and the interrupt under service
1129 is already acknowledged, but masked off.
1130
1131 Interrupt processing: We loop until we know that the IRQ line is
1132 low. If another positive edge occurs afterwards during the ISR,
1133 another interrupt will be triggered by the interrupt controller
1134 as soon as the IRQ level is enabled again (see asm/irq.h).
1135
1136 Bottom-half handlers will be processed after scc_isr(). This is
1137 important, since we only have small ringbuffers and want new data
1138 to be fetched/delivered immediately. */
1139
1140 if (info->priv[0].type == TYPE_TWIN) {
1141 int is, card_base = info->priv[0].card_base;
1142 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1143 TWIN_INT_MSK) {
1144 if (is & TWIN_SCC_MSK) {
1145 z8530_isr(info);
1146 } else if (is & TWIN_TMR1_MSK) {
1147 inb(card_base + TWIN_CLR_TMR1);
1148 tm_isr(&info->priv[0]);
1149 } else {
1150 inb(card_base + TWIN_CLR_TMR2);
1151 tm_isr(&info->priv[1]);
1152 }
1153 }
1154 } else
1155 z8530_isr(info);
1156 spin_unlock(info->priv[0].register_lock);
1157 return IRQ_HANDLED;
1158}
1159
1160
1161static void rx_isr(struct scc_priv *priv)
1162{
1163 if (priv->param.dma >= 0) {
1164 /* Check special condition and perform error reset. See 2.4.7.5. */
1165 special_condition(priv, read_scc(priv, R1));
1166 write_scc(priv, R0, ERR_RES);
1167 } else {
1168 /* Check special condition for each character. Error reset not necessary.
1169 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1170 int rc;
1171 while (read_scc(priv, R0) & Rx_CH_AV) {
1172 rc = read_scc(priv, R1);
1173 if (priv->rx_ptr < BUF_SIZE)
1174 priv->rx_buf[priv->rx_head][priv->
1175 rx_ptr++] =
1176 read_scc_data(priv);
1177 else {
1178 priv->rx_over = 2;
1179 read_scc_data(priv);
1180 }
1181 special_condition(priv, rc);
1182 }
1183 }
1184}
1185
1186
1187static void special_condition(struct scc_priv *priv, int rc)
1188{
1189 int cb;
1190 unsigned long flags;
1191
1192 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1193
1194 if (rc & Rx_OVR) {
1195 /* Receiver overrun */
1196 priv->rx_over = 1;
1197 if (priv->param.dma < 0)
1198 write_scc(priv, R0, ERR_RES);
1199 } else if (rc & END_FR) {
1200 /* End of frame. Get byte count */
1201 if (priv->param.dma >= 0) {
1202 flags = claim_dma_lock();
1203 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1204 2;
1205 release_dma_lock(flags);
1206 } else {
1207 cb = priv->rx_ptr - 2;
1208 }
1209 if (priv->rx_over) {
1210 /* We had an overrun */
13c0582d 1211 priv->dev->stats.rx_errors++;
1da177e4 1212 if (priv->rx_over == 2)
13c0582d 1213 priv->dev->stats.rx_length_errors++;
1da177e4 1214 else
13c0582d 1215 priv->dev->stats.rx_fifo_errors++;
1da177e4
LT
1216 priv->rx_over = 0;
1217 } else if (rc & CRC_ERR) {
1218 /* Count invalid CRC only if packet length >= minimum */
1219 if (cb >= 15) {
13c0582d
SH
1220 priv->dev->stats.rx_errors++;
1221 priv->dev->stats.rx_crc_errors++;
1da177e4
LT
1222 }
1223 } else {
1224 if (cb >= 15) {
1225 if (priv->rx_count < NUM_RX_BUF - 1) {
1226 /* Put good frame in FIFO */
1227 priv->rx_len[priv->rx_head] = cb;
1228 priv->rx_head =
1229 (priv->rx_head +
1230 1) % NUM_RX_BUF;
1231 priv->rx_count++;
1232 schedule_work(&priv->rx_work);
1233 } else {
13c0582d
SH
1234 priv->dev->stats.rx_errors++;
1235 priv->dev->stats.rx_over_errors++;
1da177e4
LT
1236 }
1237 }
1238 }
1239 /* Get ready for new frame */
1240 if (priv->param.dma >= 0) {
1241 flags = claim_dma_lock();
1242 set_dma_addr(priv->param.dma,
1243 (int) priv->rx_buf[priv->rx_head]);
1244 set_dma_count(priv->param.dma, BUF_SIZE);
1245 release_dma_lock(flags);
1246 } else {
1247 priv->rx_ptr = 0;
1248 }
1249 }
1250}
1251
1252
7a87b6c2 1253static void rx_bh(struct work_struct *ugli_api)
1da177e4 1254{
7a87b6c2 1255 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1da177e4
LT
1256 int i = priv->rx_tail;
1257 int cb;
1258 unsigned long flags;
1259 struct sk_buff *skb;
1260 unsigned char *data;
1261
1262 spin_lock_irqsave(&priv->ring_lock, flags);
1263 while (priv->rx_count) {
1264 spin_unlock_irqrestore(&priv->ring_lock, flags);
1265 cb = priv->rx_len[i];
1266 /* Allocate buffer */
1267 skb = dev_alloc_skb(cb + 1);
1268 if (skb == NULL) {
1269 /* Drop packet */
13c0582d 1270 priv->dev->stats.rx_dropped++;
1da177e4
LT
1271 } else {
1272 /* Fill buffer */
1273 data = skb_put(skb, cb + 1);
1274 data[0] = 0;
1275 memcpy(&data[1], priv->rx_buf[i], cb);
56cb5156 1276 skb->protocol = ax25_type_trans(skb, priv->dev);
1da177e4 1277 netif_rx(skb);
13c0582d
SH
1278 priv->dev->stats.rx_packets++;
1279 priv->dev->stats.rx_bytes += cb;
1da177e4
LT
1280 }
1281 spin_lock_irqsave(&priv->ring_lock, flags);
1282 /* Move tail */
1283 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1284 priv->rx_count--;
1285 }
1286 spin_unlock_irqrestore(&priv->ring_lock, flags);
1287}
1288
1289
1290static void tx_isr(struct scc_priv *priv)
1291{
1292 int i = priv->tx_tail, p = priv->tx_ptr;
1293
1294 /* Suspend TX interrupts if we don't want to send anything.
1295 See Figure 2-22. */
1296 if (p == priv->tx_len[i]) {
1297 write_scc(priv, R0, RES_Tx_P);
1298 return;
1299 }
1300
1301 /* Write characters */
1302 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1303 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1304 }
1305
1306 /* Reset EOM latch of Z8530 */
1307 if (!priv->tx_ptr && p && priv->chip == Z8530)
1308 write_scc(priv, R0, RES_EOM_L);
1309
1310 priv->tx_ptr = p;
1311}
1312
1313
1314static void es_isr(struct scc_priv *priv)
1315{
1316 int i, rr0, drr0, res;
1317 unsigned long flags;
1318
1319 /* Read status, reset interrupt bit (open latches) */
1320 rr0 = read_scc(priv, R0);
1321 write_scc(priv, R0, RES_EXT_INT);
1322 drr0 = priv->rr0 ^ rr0;
1323 priv->rr0 = rr0;
1324
1325 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1326 it might have already been cleared again by AUTOEOM. */
1327 if (priv->state == TX_DATA) {
1328 /* Get remaining bytes */
1329 i = priv->tx_tail;
1330 if (priv->param.dma >= 0) {
1331 disable_dma(priv->param.dma);
1332 flags = claim_dma_lock();
1333 res = get_dma_residue(priv->param.dma);
1334 release_dma_lock(flags);
1335 } else {
1336 res = priv->tx_len[i] - priv->tx_ptr;
1337 priv->tx_ptr = 0;
1338 }
1339 /* Disable DREQ / TX interrupt */
1340 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1341 outb(0, priv->card_base + TWIN_DMA_CFG);
1342 else
1343 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1344 if (res) {
1345 /* Update packet statistics */
13c0582d
SH
1346 priv->dev->stats.tx_errors++;
1347 priv->dev->stats.tx_fifo_errors++;
1da177e4
LT
1348 /* Other underrun interrupts may already be waiting */
1349 write_scc(priv, R0, RES_EXT_INT);
1350 write_scc(priv, R0, RES_EXT_INT);
1351 } else {
1352 /* Update packet statistics */
13c0582d
SH
1353 priv->dev->stats.tx_packets++;
1354 priv->dev->stats.tx_bytes += priv->tx_len[i];
1da177e4
LT
1355 /* Remove frame from FIFO */
1356 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1357 priv->tx_count--;
1358 /* Inform upper layers */
1359 netif_wake_queue(priv->dev);
1360 }
1361 /* Switch state */
1362 write_scc(priv, R15, 0);
1363 if (priv->tx_count &&
1364 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1365 priv->state = TX_PAUSE;
1366 start_timer(priv, priv->param.txpause, 0);
1367 } else {
1368 priv->state = TX_TAIL;
1369 start_timer(priv, priv->param.txtail, 0);
1370 }
1371 }
1372
1373 /* DCD transition */
1374 if (drr0 & DCD) {
1375 if (rr0 & DCD) {
1376 switch (priv->state) {
1377 case IDLE:
1378 case WAIT:
1379 priv->state = DCD_ON;
1380 write_scc(priv, R15, 0);
1381 start_timer(priv, priv->param.dcdon, 0);
1382 }
1383 } else {
1384 switch (priv->state) {
1385 case RX_ON:
1386 rx_off(priv);
1387 priv->state = DCD_OFF;
1388 write_scc(priv, R15, 0);
1389 start_timer(priv, priv->param.dcdoff, 0);
1390 }
1391 }
1392 }
1393
1394 /* CTS transition */
1395 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1396 tm_isr(priv);
1397
1398}
1399
1400
1401static void tm_isr(struct scc_priv *priv)
1402{
1403 switch (priv->state) {
1404 case TX_HEAD:
1405 case TX_PAUSE:
1406 tx_on(priv);
1407 priv->state = TX_DATA;
1408 break;
1409 case TX_TAIL:
1410 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1411 priv->state = RTS_OFF;
1412 if (priv->type != TYPE_TWIN)
1413 write_scc(priv, R15, 0);
1414 start_timer(priv, priv->param.rtsoff, 0);
1415 break;
1416 case RTS_OFF:
1417 write_scc(priv, R15, DCDIE);
1418 priv->rr0 = read_scc(priv, R0);
1419 if (priv->rr0 & DCD) {
13c0582d 1420 priv->dev->stats.collisions++;
1da177e4
LT
1421 rx_on(priv);
1422 priv->state = RX_ON;
1423 } else {
1424 priv->state = WAIT;
1425 start_timer(priv, priv->param.waittime, DCDIE);
1426 }
1427 break;
1428 case WAIT:
1429 if (priv->tx_count) {
1430 priv->state = TX_HEAD;
1431 priv->tx_start = jiffies;
1432 write_scc(priv, R5,
1433 TxCRC_ENAB | RTS | TxENAB | Tx8);
1434 write_scc(priv, R15, 0);
1435 start_timer(priv, priv->param.txdelay, 0);
1436 } else {
1437 priv->state = IDLE;
1438 if (priv->type != TYPE_TWIN)
1439 write_scc(priv, R15, DCDIE);
1440 }
1441 break;
1442 case DCD_ON:
1443 case DCD_OFF:
1444 write_scc(priv, R15, DCDIE);
1445 priv->rr0 = read_scc(priv, R0);
1446 if (priv->rr0 & DCD) {
1447 rx_on(priv);
1448 priv->state = RX_ON;
1449 } else {
1450 priv->state = WAIT;
1451 start_timer(priv,
1452 random() / priv->param.persist *
1453 priv->param.slottime, DCDIE);
1454 }
1455 break;
1456 }
1457}
This page took 0.951966 seconds and 5 git commands to generate.