[PATCH] bcm43xx: fix dyn tssi2dbm memleak
[deliverable/linux.git] / drivers / net / hamradio / dmascc.c
1 /*
2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
4 *
5 * S5SCC/DMA support by Janko Koleznik S52HI
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/if_arp.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/kernel.h>
32 #include <linux/mm.h>
33 #include <linux/netdevice.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/sockios.h>
36 #include <linux/workqueue.h>
37 #include <asm/atomic.h>
38 #include <asm/bitops.h>
39 #include <asm/dma.h>
40 #include <asm/io.h>
41 #include <asm/irq.h>
42 #include <asm/uaccess.h>
43 #include <net/ax25.h>
44 #include "z8530.h"
45
46
47 /* Number of buffers per channel */
48
49 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
52
53
54 /* Cards supported */
55
56 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
64
65 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
66
67 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
68
69 #define TYPE_PI 0
70 #define TYPE_PI2 1
71 #define TYPE_TWIN 2
72 #define TYPE_S5 3
73 #define NUM_TYPES 4
74
75 #define MAX_NUM_DEVS 32
76
77
78 /* SCC chips supported */
79
80 #define Z8530 0
81 #define Z85C30 1
82 #define Z85230 2
83
84 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
85
86
87 /* I/O registers */
88
89 /* 8530 registers relative to card base */
90 #define SCCB_CMD 0x00
91 #define SCCB_DATA 0x01
92 #define SCCA_CMD 0x02
93 #define SCCA_DATA 0x03
94
95 /* 8253/8254 registers relative to card base */
96 #define TMR_CNT0 0x00
97 #define TMR_CNT1 0x01
98 #define TMR_CNT2 0x02
99 #define TMR_CTRL 0x03
100
101 /* Additional PI/PI2 registers relative to card base */
102 #define PI_DREQ_MASK 0x04
103
104 /* Additional PackeTwin registers relative to card base */
105 #define TWIN_INT_REG 0x08
106 #define TWIN_CLR_TMR1 0x09
107 #define TWIN_CLR_TMR2 0x0a
108 #define TWIN_SPARE_1 0x0b
109 #define TWIN_DMA_CFG 0x08
110 #define TWIN_SERIAL_CFG 0x09
111 #define TWIN_DMA_CLR_FF 0x0a
112 #define TWIN_SPARE_2 0x0b
113
114
115 /* PackeTwin I/O register values */
116
117 /* INT_REG */
118 #define TWIN_SCC_MSK 0x01
119 #define TWIN_TMR1_MSK 0x02
120 #define TWIN_TMR2_MSK 0x04
121 #define TWIN_INT_MSK 0x07
122
123 /* SERIAL_CFG */
124 #define TWIN_DTRA_ON 0x01
125 #define TWIN_DTRB_ON 0x02
126 #define TWIN_EXTCLKA 0x04
127 #define TWIN_EXTCLKB 0x08
128 #define TWIN_LOOPA_ON 0x10
129 #define TWIN_LOOPB_ON 0x20
130 #define TWIN_EI 0x80
131
132 /* DMA_CFG */
133 #define TWIN_DMA_HDX_T1 0x08
134 #define TWIN_DMA_HDX_R1 0x0a
135 #define TWIN_DMA_HDX_T3 0x14
136 #define TWIN_DMA_HDX_R3 0x16
137 #define TWIN_DMA_FDX_T3R1 0x1b
138 #define TWIN_DMA_FDX_T1R3 0x1d
139
140
141 /* Status values */
142
143 #define IDLE 0
144 #define TX_HEAD 1
145 #define TX_DATA 2
146 #define TX_PAUSE 3
147 #define TX_TAIL 4
148 #define RTS_OFF 5
149 #define WAIT 6
150 #define DCD_ON 7
151 #define RX_ON 8
152 #define DCD_OFF 9
153
154
155 /* Ioctls */
156
157 #define SIOCGSCCPARAM SIOCDEVPRIVATE
158 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
159
160
161 /* Data types */
162
163 struct scc_param {
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
179 };
180
181 struct scc_hardware {
182 char *name;
183 int io_region;
184 int io_delta;
185 int io_size;
186 int num_devs;
187 int scc_offset;
188 int tmr_offset;
189 int tmr_hz;
190 int pclk_hz;
191 };
192
193 struct scc_priv {
194 int type;
195 int chip;
196 struct net_device *dev;
197 struct scc_info *info;
198 struct net_device_stats stats;
199 int channel;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
205 int rx_ptr;
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
208 int rx_over;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
211 int tx_ptr;
212 int tx_head, tx_tail, tx_count;
213 int state;
214 unsigned long tx_start;
215 int rr0;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
218 };
219
220 struct scc_info {
221 int irq_used;
222 int twin_serial_cfg;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
227 };
228
229
230 /* Function declarations */
231 static int setup_adapter(int card_base, int type, int n) __init;
232
233 static void write_scc(struct scc_priv *priv, int reg, int val);
234 static void write_scc_data(struct scc_priv *priv, int val, int fast);
235 static int read_scc(struct scc_priv *priv, int reg);
236 static int read_scc_data(struct scc_priv *priv);
237
238 static int scc_open(struct net_device *dev);
239 static int scc_close(struct net_device *dev);
240 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242 static struct net_device_stats *scc_get_stats(struct net_device *dev);
243 static int scc_set_mac_address(struct net_device *dev, void *sa);
244
245 static inline void tx_on(struct scc_priv *priv);
246 static inline void rx_on(struct scc_priv *priv);
247 static inline void rx_off(struct scc_priv *priv);
248 static void start_timer(struct scc_priv *priv, int t, int r15);
249 static inline unsigned char random(void);
250
251 static inline void z8530_isr(struct scc_info *info);
252 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs);
253 static void rx_isr(struct scc_priv *priv);
254 static void special_condition(struct scc_priv *priv, int rc);
255 static void rx_bh(void *arg);
256 static void tx_isr(struct scc_priv *priv);
257 static void es_isr(struct scc_priv *priv);
258 static void tm_isr(struct scc_priv *priv);
259
260
261 /* Initialization variables */
262
263 static int io[MAX_NUM_DEVS] __initdata = { 0, };
264
265 /* Beware! hw[] is also used in cleanup_module(). */
266 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
267 static char ax25_broadcast[7] __initdata =
268 { 'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1,
269 '0' << 1 };
270 static char ax25_test[7] __initdata =
271 { 'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1,
272 '1' << 1 };
273
274
275 /* Global variables */
276
277 static struct scc_info *first;
278 static unsigned long rand;
279
280
281 MODULE_AUTHOR("Klaus Kudielka");
282 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
283 module_param_array(io, int, NULL, 0);
284 MODULE_LICENSE("GPL");
285
286 static void __exit dmascc_exit(void)
287 {
288 int i;
289 struct scc_info *info;
290
291 while (first) {
292 info = first;
293
294 /* Unregister devices */
295 for (i = 0; i < 2; i++)
296 unregister_netdev(info->dev[i]);
297
298 /* Reset board */
299 if (info->priv[0].type == TYPE_TWIN)
300 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
301 write_scc(&info->priv[0], R9, FHWRES);
302 release_region(info->dev[0]->base_addr,
303 hw[info->priv[0].type].io_size);
304
305 for (i = 0; i < 2; i++)
306 free_netdev(info->dev[i]);
307
308 /* Free memory */
309 first = info->next;
310 kfree(info);
311 }
312 }
313
314 static int __init dmascc_init(void)
315 {
316 int h, i, j, n;
317 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
318 t1[MAX_NUM_DEVS];
319 unsigned t_val;
320 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
321 counting[MAX_NUM_DEVS];
322
323 /* Initialize random number generator */
324 rand = jiffies;
325 /* Cards found = 0 */
326 n = 0;
327 /* Warning message */
328 if (!io[0])
329 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
330
331 /* Run autodetection for each card type */
332 for (h = 0; h < NUM_TYPES; h++) {
333
334 if (io[0]) {
335 /* User-specified I/O address regions */
336 for (i = 0; i < hw[h].num_devs; i++)
337 base[i] = 0;
338 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
339 j = (io[i] -
340 hw[h].io_region) / hw[h].io_delta;
341 if (j >= 0 && j < hw[h].num_devs
342 && hw[h].io_region +
343 j * hw[h].io_delta == io[i]) {
344 base[j] = io[i];
345 }
346 }
347 } else {
348 /* Default I/O address regions */
349 for (i = 0; i < hw[h].num_devs; i++) {
350 base[i] =
351 hw[h].io_region + i * hw[h].io_delta;
352 }
353 }
354
355 /* Check valid I/O address regions */
356 for (i = 0; i < hw[h].num_devs; i++)
357 if (base[i]) {
358 if (!request_region
359 (base[i], hw[h].io_size, "dmascc"))
360 base[i] = 0;
361 else {
362 tcmd[i] =
363 base[i] + hw[h].tmr_offset +
364 TMR_CTRL;
365 t0[i] =
366 base[i] + hw[h].tmr_offset +
367 TMR_CNT0;
368 t1[i] =
369 base[i] + hw[h].tmr_offset +
370 TMR_CNT1;
371 }
372 }
373
374 /* Start timers */
375 for (i = 0; i < hw[h].num_devs; i++)
376 if (base[i]) {
377 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
378 outb(0x36, tcmd[i]);
379 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
380 t0[i]);
381 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
382 t0[i]);
383 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
384 outb(0x70, tcmd[i]);
385 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
386 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
387 start[i] = jiffies;
388 delay[i] = 0;
389 counting[i] = 1;
390 /* Timer 2: LSB+MSB, Mode 0 */
391 outb(0xb0, tcmd[i]);
392 }
393 time = jiffies;
394 /* Wait until counter registers are loaded */
395 udelay(2000000 / TMR_0_HZ);
396
397 /* Timing loop */
398 while (jiffies - time < 13) {
399 for (i = 0; i < hw[h].num_devs; i++)
400 if (base[i] && counting[i]) {
401 /* Read back Timer 1: latch; read LSB; read MSB */
402 outb(0x40, tcmd[i]);
403 t_val =
404 inb(t1[i]) + (inb(t1[i]) << 8);
405 /* Also check whether counter did wrap */
406 if (t_val == 0
407 || t_val > TMR_0_HZ / HZ * 10)
408 counting[i] = 0;
409 delay[i] = jiffies - start[i];
410 }
411 }
412
413 /* Evaluate measurements */
414 for (i = 0; i < hw[h].num_devs; i++)
415 if (base[i]) {
416 if ((delay[i] >= 9 && delay[i] <= 11) &&
417 /* Ok, we have found an adapter */
418 (setup_adapter(base[i], h, n) == 0))
419 n++;
420 else
421 release_region(base[i],
422 hw[h].io_size);
423 }
424
425 } /* NUM_TYPES */
426
427 /* If any adapter was successfully initialized, return ok */
428 if (n)
429 return 0;
430
431 /* If no adapter found, return error */
432 printk(KERN_INFO "dmascc: no adapters found\n");
433 return -EIO;
434 }
435
436 module_init(dmascc_init);
437 module_exit(dmascc_exit);
438
439 static void dev_setup(struct net_device *dev)
440 {
441 dev->type = ARPHRD_AX25;
442 dev->hard_header_len = AX25_MAX_HEADER_LEN;
443 dev->mtu = 1500;
444 dev->addr_len = AX25_ADDR_LEN;
445 dev->tx_queue_len = 64;
446 memcpy(dev->broadcast, ax25_broadcast, AX25_ADDR_LEN);
447 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
448 }
449
450 static int __init setup_adapter(int card_base, int type, int n)
451 {
452 int i, irq, chip;
453 struct scc_info *info;
454 struct net_device *dev;
455 struct scc_priv *priv;
456 unsigned long time;
457 unsigned int irqs;
458 int tmr_base = card_base + hw[type].tmr_offset;
459 int scc_base = card_base + hw[type].scc_offset;
460 char *chipnames[] = CHIPNAMES;
461
462 /* Allocate memory */
463 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
464 if (!info) {
465 printk(KERN_ERR "dmascc: "
466 "could not allocate memory for %s at %#3x\n",
467 hw[type].name, card_base);
468 goto out;
469 }
470
471 /* Initialize what is necessary for write_scc and write_scc_data */
472 memset(info, 0, sizeof(struct scc_info));
473
474 info->dev[0] = alloc_netdev(0, "", dev_setup);
475 if (!info->dev[0]) {
476 printk(KERN_ERR "dmascc: "
477 "could not allocate memory for %s at %#3x\n",
478 hw[type].name, card_base);
479 goto out1;
480 }
481
482 info->dev[1] = alloc_netdev(0, "", dev_setup);
483 if (!info->dev[1]) {
484 printk(KERN_ERR "dmascc: "
485 "could not allocate memory for %s at %#3x\n",
486 hw[type].name, card_base);
487 goto out2;
488 }
489 spin_lock_init(&info->register_lock);
490
491 priv = &info->priv[0];
492 priv->type = type;
493 priv->card_base = card_base;
494 priv->scc_cmd = scc_base + SCCA_CMD;
495 priv->scc_data = scc_base + SCCA_DATA;
496 priv->register_lock = &info->register_lock;
497
498 /* Reset SCC */
499 write_scc(priv, R9, FHWRES | MIE | NV);
500
501 /* Determine type of chip by enabling SDLC/HDLC enhancements */
502 write_scc(priv, R15, SHDLCE);
503 if (!read_scc(priv, R15)) {
504 /* WR7' not present. This is an ordinary Z8530 SCC. */
505 chip = Z8530;
506 } else {
507 /* Put one character in TX FIFO */
508 write_scc_data(priv, 0, 0);
509 if (read_scc(priv, R0) & Tx_BUF_EMP) {
510 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
511 chip = Z85230;
512 } else {
513 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
514 chip = Z85C30;
515 }
516 }
517 write_scc(priv, R15, 0);
518
519 /* Start IRQ auto-detection */
520 irqs = probe_irq_on();
521
522 /* Enable interrupts */
523 if (type == TYPE_TWIN) {
524 outb(0, card_base + TWIN_DMA_CFG);
525 inb(card_base + TWIN_CLR_TMR1);
526 inb(card_base + TWIN_CLR_TMR2);
527 info->twin_serial_cfg = TWIN_EI;
528 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
529 } else {
530 write_scc(priv, R15, CTSIE);
531 write_scc(priv, R0, RES_EXT_INT);
532 write_scc(priv, R1, EXT_INT_ENAB);
533 }
534
535 /* Start timer */
536 outb(1, tmr_base + TMR_CNT1);
537 outb(0, tmr_base + TMR_CNT1);
538
539 /* Wait and detect IRQ */
540 time = jiffies;
541 while (jiffies - time < 2 + HZ / TMR_0_HZ);
542 irq = probe_irq_off(irqs);
543
544 /* Clear pending interrupt, disable interrupts */
545 if (type == TYPE_TWIN) {
546 inb(card_base + TWIN_CLR_TMR1);
547 } else {
548 write_scc(priv, R1, 0);
549 write_scc(priv, R15, 0);
550 write_scc(priv, R0, RES_EXT_INT);
551 }
552
553 if (irq <= 0) {
554 printk(KERN_ERR
555 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
556 hw[type].name, card_base, irq);
557 goto out3;
558 }
559
560 /* Set up data structures */
561 for (i = 0; i < 2; i++) {
562 dev = info->dev[i];
563 priv = &info->priv[i];
564 priv->type = type;
565 priv->chip = chip;
566 priv->dev = dev;
567 priv->info = info;
568 priv->channel = i;
569 spin_lock_init(&priv->ring_lock);
570 priv->register_lock = &info->register_lock;
571 priv->card_base = card_base;
572 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
573 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
574 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
575 priv->tmr_ctrl = tmr_base + TMR_CTRL;
576 priv->tmr_mode = i ? 0xb0 : 0x70;
577 priv->param.pclk_hz = hw[type].pclk_hz;
578 priv->param.brg_tc = -1;
579 priv->param.clocks = TCTRxCP | RCRTxCP;
580 priv->param.persist = 256;
581 priv->param.dma = -1;
582 INIT_WORK(&priv->rx_work, rx_bh, priv);
583 dev->priv = priv;
584 sprintf(dev->name, "dmascc%i", 2 * n + i);
585 SET_MODULE_OWNER(dev);
586 dev->base_addr = card_base;
587 dev->irq = irq;
588 dev->open = scc_open;
589 dev->stop = scc_close;
590 dev->do_ioctl = scc_ioctl;
591 dev->hard_start_xmit = scc_send_packet;
592 dev->get_stats = scc_get_stats;
593 dev->hard_header = ax25_hard_header;
594 dev->rebuild_header = ax25_rebuild_header;
595 dev->set_mac_address = scc_set_mac_address;
596 }
597 if (register_netdev(info->dev[0])) {
598 printk(KERN_ERR "dmascc: could not register %s\n",
599 info->dev[0]->name);
600 goto out3;
601 }
602 if (register_netdev(info->dev[1])) {
603 printk(KERN_ERR "dmascc: could not register %s\n",
604 info->dev[1]->name);
605 goto out4;
606 }
607
608
609 info->next = first;
610 first = info;
611 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
612 hw[type].name, chipnames[chip], card_base, irq);
613 return 0;
614
615 out4:
616 unregister_netdev(info->dev[0]);
617 out3:
618 if (info->priv[0].type == TYPE_TWIN)
619 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
620 write_scc(&info->priv[0], R9, FHWRES);
621 free_netdev(info->dev[1]);
622 out2:
623 free_netdev(info->dev[0]);
624 out1:
625 kfree(info);
626 out:
627 return -1;
628 }
629
630
631 /* Driver functions */
632
633 static void write_scc(struct scc_priv *priv, int reg, int val)
634 {
635 unsigned long flags;
636 switch (priv->type) {
637 case TYPE_S5:
638 if (reg)
639 outb(reg, priv->scc_cmd);
640 outb(val, priv->scc_cmd);
641 return;
642 case TYPE_TWIN:
643 if (reg)
644 outb_p(reg, priv->scc_cmd);
645 outb_p(val, priv->scc_cmd);
646 return;
647 default:
648 spin_lock_irqsave(priv->register_lock, flags);
649 outb_p(0, priv->card_base + PI_DREQ_MASK);
650 if (reg)
651 outb_p(reg, priv->scc_cmd);
652 outb_p(val, priv->scc_cmd);
653 outb(1, priv->card_base + PI_DREQ_MASK);
654 spin_unlock_irqrestore(priv->register_lock, flags);
655 return;
656 }
657 }
658
659
660 static void write_scc_data(struct scc_priv *priv, int val, int fast)
661 {
662 unsigned long flags;
663 switch (priv->type) {
664 case TYPE_S5:
665 outb(val, priv->scc_data);
666 return;
667 case TYPE_TWIN:
668 outb_p(val, priv->scc_data);
669 return;
670 default:
671 if (fast)
672 outb_p(val, priv->scc_data);
673 else {
674 spin_lock_irqsave(priv->register_lock, flags);
675 outb_p(0, priv->card_base + PI_DREQ_MASK);
676 outb_p(val, priv->scc_data);
677 outb(1, priv->card_base + PI_DREQ_MASK);
678 spin_unlock_irqrestore(priv->register_lock, flags);
679 }
680 return;
681 }
682 }
683
684
685 static int read_scc(struct scc_priv *priv, int reg)
686 {
687 int rc;
688 unsigned long flags;
689 switch (priv->type) {
690 case TYPE_S5:
691 if (reg)
692 outb(reg, priv->scc_cmd);
693 return inb(priv->scc_cmd);
694 case TYPE_TWIN:
695 if (reg)
696 outb_p(reg, priv->scc_cmd);
697 return inb_p(priv->scc_cmd);
698 default:
699 spin_lock_irqsave(priv->register_lock, flags);
700 outb_p(0, priv->card_base + PI_DREQ_MASK);
701 if (reg)
702 outb_p(reg, priv->scc_cmd);
703 rc = inb_p(priv->scc_cmd);
704 outb(1, priv->card_base + PI_DREQ_MASK);
705 spin_unlock_irqrestore(priv->register_lock, flags);
706 return rc;
707 }
708 }
709
710
711 static int read_scc_data(struct scc_priv *priv)
712 {
713 int rc;
714 unsigned long flags;
715 switch (priv->type) {
716 case TYPE_S5:
717 return inb(priv->scc_data);
718 case TYPE_TWIN:
719 return inb_p(priv->scc_data);
720 default:
721 spin_lock_irqsave(priv->register_lock, flags);
722 outb_p(0, priv->card_base + PI_DREQ_MASK);
723 rc = inb_p(priv->scc_data);
724 outb(1, priv->card_base + PI_DREQ_MASK);
725 spin_unlock_irqrestore(priv->register_lock, flags);
726 return rc;
727 }
728 }
729
730
731 static int scc_open(struct net_device *dev)
732 {
733 struct scc_priv *priv = dev->priv;
734 struct scc_info *info = priv->info;
735 int card_base = priv->card_base;
736
737 /* Request IRQ if not already used by other channel */
738 if (!info->irq_used) {
739 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
740 return -EAGAIN;
741 }
742 }
743 info->irq_used++;
744
745 /* Request DMA if required */
746 if (priv->param.dma >= 0) {
747 if (request_dma(priv->param.dma, "dmascc")) {
748 if (--info->irq_used == 0)
749 free_irq(dev->irq, info);
750 return -EAGAIN;
751 } else {
752 unsigned long flags = claim_dma_lock();
753 clear_dma_ff(priv->param.dma);
754 release_dma_lock(flags);
755 }
756 }
757
758 /* Initialize local variables */
759 priv->rx_ptr = 0;
760 priv->rx_over = 0;
761 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
762 priv->state = IDLE;
763 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
764 priv->tx_ptr = 0;
765
766 /* Reset channel */
767 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
768 /* X1 clock, SDLC mode */
769 write_scc(priv, R4, SDLC | X1CLK);
770 /* DMA */
771 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
772 /* 8 bit RX char, RX disable */
773 write_scc(priv, R3, Rx8);
774 /* 8 bit TX char, TX disable */
775 write_scc(priv, R5, Tx8);
776 /* SDLC address field */
777 write_scc(priv, R6, 0);
778 /* SDLC flag */
779 write_scc(priv, R7, FLAG);
780 switch (priv->chip) {
781 case Z85C30:
782 /* Select WR7' */
783 write_scc(priv, R15, SHDLCE);
784 /* Auto EOM reset */
785 write_scc(priv, R7, AUTOEOM);
786 write_scc(priv, R15, 0);
787 break;
788 case Z85230:
789 /* Select WR7' */
790 write_scc(priv, R15, SHDLCE);
791 /* The following bits are set (see 2.5.2.1):
792 - Automatic EOM reset
793 - Interrupt request if RX FIFO is half full
794 This bit should be ignored in DMA mode (according to the
795 documentation), but actually isn't. The receiver doesn't work if
796 it is set. Thus, we have to clear it in DMA mode.
797 - Interrupt/DMA request if TX FIFO is completely empty
798 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
799 compatibility).
800 b) If cleared, DMA requests may follow each other very quickly,
801 filling up the TX FIFO.
802 Advantage: TX works even in case of high bus latency.
803 Disadvantage: Edge-triggered DMA request circuitry may miss
804 a request. No more data is delivered, resulting
805 in a TX FIFO underrun.
806 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
807 The PackeTwin doesn't. I don't know about the PI, but let's
808 assume it behaves like the PI2.
809 */
810 if (priv->param.dma >= 0) {
811 if (priv->type == TYPE_TWIN)
812 write_scc(priv, R7, AUTOEOM | TXFIFOE);
813 else
814 write_scc(priv, R7, AUTOEOM);
815 } else {
816 write_scc(priv, R7, AUTOEOM | RXFIFOH);
817 }
818 write_scc(priv, R15, 0);
819 break;
820 }
821 /* Preset CRC, NRZ(I) encoding */
822 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
823
824 /* Configure baud rate generator */
825 if (priv->param.brg_tc >= 0) {
826 /* Program BR generator */
827 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
828 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
829 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
830 PackeTwin, not connected on the PI2); set DPLL source to BRG */
831 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
832 /* Enable DPLL */
833 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
834 } else {
835 /* Disable BR generator */
836 write_scc(priv, R14, DTRREQ | BRSRC);
837 }
838
839 /* Configure clocks */
840 if (priv->type == TYPE_TWIN) {
841 /* Disable external TX clock receiver */
842 outb((info->twin_serial_cfg &=
843 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
844 card_base + TWIN_SERIAL_CFG);
845 }
846 write_scc(priv, R11, priv->param.clocks);
847 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
848 /* Enable external TX clock receiver */
849 outb((info->twin_serial_cfg |=
850 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
851 card_base + TWIN_SERIAL_CFG);
852 }
853
854 /* Configure PackeTwin */
855 if (priv->type == TYPE_TWIN) {
856 /* Assert DTR, enable interrupts */
857 outb((info->twin_serial_cfg |= TWIN_EI |
858 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
859 card_base + TWIN_SERIAL_CFG);
860 }
861
862 /* Read current status */
863 priv->rr0 = read_scc(priv, R0);
864 /* Enable DCD interrupt */
865 write_scc(priv, R15, DCDIE);
866
867 netif_start_queue(dev);
868
869 return 0;
870 }
871
872
873 static int scc_close(struct net_device *dev)
874 {
875 struct scc_priv *priv = dev->priv;
876 struct scc_info *info = priv->info;
877 int card_base = priv->card_base;
878
879 netif_stop_queue(dev);
880
881 if (priv->type == TYPE_TWIN) {
882 /* Drop DTR */
883 outb((info->twin_serial_cfg &=
884 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
885 card_base + TWIN_SERIAL_CFG);
886 }
887
888 /* Reset channel, free DMA and IRQ */
889 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
890 if (priv->param.dma >= 0) {
891 if (priv->type == TYPE_TWIN)
892 outb(0, card_base + TWIN_DMA_CFG);
893 free_dma(priv->param.dma);
894 }
895 if (--info->irq_used == 0)
896 free_irq(dev->irq, info);
897
898 return 0;
899 }
900
901
902 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
903 {
904 struct scc_priv *priv = dev->priv;
905
906 switch (cmd) {
907 case SIOCGSCCPARAM:
908 if (copy_to_user
909 (ifr->ifr_data, &priv->param,
910 sizeof(struct scc_param)))
911 return -EFAULT;
912 return 0;
913 case SIOCSSCCPARAM:
914 if (!capable(CAP_NET_ADMIN))
915 return -EPERM;
916 if (netif_running(dev))
917 return -EAGAIN;
918 if (copy_from_user
919 (&priv->param, ifr->ifr_data,
920 sizeof(struct scc_param)))
921 return -EFAULT;
922 return 0;
923 default:
924 return -EINVAL;
925 }
926 }
927
928
929 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
930 {
931 struct scc_priv *priv = dev->priv;
932 unsigned long flags;
933 int i;
934
935 /* Temporarily stop the scheduler feeding us packets */
936 netif_stop_queue(dev);
937
938 /* Transfer data to DMA buffer */
939 i = priv->tx_head;
940 memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
941 priv->tx_len[i] = skb->len - 1;
942
943 /* Clear interrupts while we touch our circular buffers */
944
945 spin_lock_irqsave(&priv->ring_lock, flags);
946 /* Move the ring buffer's head */
947 priv->tx_head = (i + 1) % NUM_TX_BUF;
948 priv->tx_count++;
949
950 /* If we just filled up the last buffer, leave queue stopped.
951 The higher layers must wait until we have a DMA buffer
952 to accept the data. */
953 if (priv->tx_count < NUM_TX_BUF)
954 netif_wake_queue(dev);
955
956 /* Set new TX state */
957 if (priv->state == IDLE) {
958 /* Assert RTS, start timer */
959 priv->state = TX_HEAD;
960 priv->tx_start = jiffies;
961 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
962 write_scc(priv, R15, 0);
963 start_timer(priv, priv->param.txdelay, 0);
964 }
965
966 /* Turn interrupts back on and free buffer */
967 spin_unlock_irqrestore(&priv->ring_lock, flags);
968 dev_kfree_skb(skb);
969
970 return 0;
971 }
972
973
974 static struct net_device_stats *scc_get_stats(struct net_device *dev)
975 {
976 struct scc_priv *priv = dev->priv;
977
978 return &priv->stats;
979 }
980
981
982 static int scc_set_mac_address(struct net_device *dev, void *sa)
983 {
984 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
985 dev->addr_len);
986 return 0;
987 }
988
989
990 static inline void tx_on(struct scc_priv *priv)
991 {
992 int i, n;
993 unsigned long flags;
994
995 if (priv->param.dma >= 0) {
996 n = (priv->chip == Z85230) ? 3 : 1;
997 /* Program DMA controller */
998 flags = claim_dma_lock();
999 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
1000 set_dma_addr(priv->param.dma,
1001 (int) priv->tx_buf[priv->tx_tail] + n);
1002 set_dma_count(priv->param.dma,
1003 priv->tx_len[priv->tx_tail] - n);
1004 release_dma_lock(flags);
1005 /* Enable TX underrun interrupt */
1006 write_scc(priv, R15, TxUIE);
1007 /* Configure DREQ */
1008 if (priv->type == TYPE_TWIN)
1009 outb((priv->param.dma ==
1010 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1011 priv->card_base + TWIN_DMA_CFG);
1012 else
1013 write_scc(priv, R1,
1014 EXT_INT_ENAB | WT_FN_RDYFN |
1015 WT_RDY_ENAB);
1016 /* Write first byte(s) */
1017 spin_lock_irqsave(priv->register_lock, flags);
1018 for (i = 0; i < n; i++)
1019 write_scc_data(priv,
1020 priv->tx_buf[priv->tx_tail][i], 1);
1021 enable_dma(priv->param.dma);
1022 spin_unlock_irqrestore(priv->register_lock, flags);
1023 } else {
1024 write_scc(priv, R15, TxUIE);
1025 write_scc(priv, R1,
1026 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1027 tx_isr(priv);
1028 }
1029 /* Reset EOM latch if we do not have the AUTOEOM feature */
1030 if (priv->chip == Z8530)
1031 write_scc(priv, R0, RES_EOM_L);
1032 }
1033
1034
1035 static inline void rx_on(struct scc_priv *priv)
1036 {
1037 unsigned long flags;
1038
1039 /* Clear RX FIFO */
1040 while (read_scc(priv, R0) & Rx_CH_AV)
1041 read_scc_data(priv);
1042 priv->rx_over = 0;
1043 if (priv->param.dma >= 0) {
1044 /* Program DMA controller */
1045 flags = claim_dma_lock();
1046 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1047 set_dma_addr(priv->param.dma,
1048 (int) priv->rx_buf[priv->rx_head]);
1049 set_dma_count(priv->param.dma, BUF_SIZE);
1050 release_dma_lock(flags);
1051 enable_dma(priv->param.dma);
1052 /* Configure PackeTwin DMA */
1053 if (priv->type == TYPE_TWIN) {
1054 outb((priv->param.dma ==
1055 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1056 priv->card_base + TWIN_DMA_CFG);
1057 }
1058 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1059 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1060 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1061 } else {
1062 /* Reset current frame */
1063 priv->rx_ptr = 0;
1064 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1065 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1066 WT_FN_RDYFN);
1067 }
1068 write_scc(priv, R0, ERR_RES);
1069 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1070 }
1071
1072
1073 static inline void rx_off(struct scc_priv *priv)
1074 {
1075 /* Disable receiver */
1076 write_scc(priv, R3, Rx8);
1077 /* Disable DREQ / RX interrupt */
1078 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1079 outb(0, priv->card_base + TWIN_DMA_CFG);
1080 else
1081 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1082 /* Disable DMA */
1083 if (priv->param.dma >= 0)
1084 disable_dma(priv->param.dma);
1085 }
1086
1087
1088 static void start_timer(struct scc_priv *priv, int t, int r15)
1089 {
1090 unsigned long flags;
1091
1092 outb(priv->tmr_mode, priv->tmr_ctrl);
1093 if (t == 0) {
1094 tm_isr(priv);
1095 } else if (t > 0) {
1096 save_flags(flags);
1097 cli();
1098 outb(t & 0xFF, priv->tmr_cnt);
1099 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1100 if (priv->type != TYPE_TWIN) {
1101 write_scc(priv, R15, r15 | CTSIE);
1102 priv->rr0 |= CTS;
1103 }
1104 restore_flags(flags);
1105 }
1106 }
1107
1108
1109 static inline unsigned char random(void)
1110 {
1111 /* See "Numerical Recipes in C", second edition, p. 284 */
1112 rand = rand * 1664525L + 1013904223L;
1113 return (unsigned char) (rand >> 24);
1114 }
1115
1116 static inline void z8530_isr(struct scc_info *info)
1117 {
1118 int is, i = 100;
1119
1120 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1121 if (is & CHARxIP) {
1122 rx_isr(&info->priv[0]);
1123 } else if (is & CHATxIP) {
1124 tx_isr(&info->priv[0]);
1125 } else if (is & CHAEXT) {
1126 es_isr(&info->priv[0]);
1127 } else if (is & CHBRxIP) {
1128 rx_isr(&info->priv[1]);
1129 } else if (is & CHBTxIP) {
1130 tx_isr(&info->priv[1]);
1131 } else {
1132 es_isr(&info->priv[1]);
1133 }
1134 write_scc(&info->priv[0], R0, RES_H_IUS);
1135 i++;
1136 }
1137 if (i < 0) {
1138 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1139 is);
1140 }
1141 /* Ok, no interrupts pending from this 8530. The INT line should
1142 be inactive now. */
1143 }
1144
1145
1146 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs)
1147 {
1148 struct scc_info *info = dev_id;
1149
1150 spin_lock(info->priv[0].register_lock);
1151 /* At this point interrupts are enabled, and the interrupt under service
1152 is already acknowledged, but masked off.
1153
1154 Interrupt processing: We loop until we know that the IRQ line is
1155 low. If another positive edge occurs afterwards during the ISR,
1156 another interrupt will be triggered by the interrupt controller
1157 as soon as the IRQ level is enabled again (see asm/irq.h).
1158
1159 Bottom-half handlers will be processed after scc_isr(). This is
1160 important, since we only have small ringbuffers and want new data
1161 to be fetched/delivered immediately. */
1162
1163 if (info->priv[0].type == TYPE_TWIN) {
1164 int is, card_base = info->priv[0].card_base;
1165 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1166 TWIN_INT_MSK) {
1167 if (is & TWIN_SCC_MSK) {
1168 z8530_isr(info);
1169 } else if (is & TWIN_TMR1_MSK) {
1170 inb(card_base + TWIN_CLR_TMR1);
1171 tm_isr(&info->priv[0]);
1172 } else {
1173 inb(card_base + TWIN_CLR_TMR2);
1174 tm_isr(&info->priv[1]);
1175 }
1176 }
1177 } else
1178 z8530_isr(info);
1179 spin_unlock(info->priv[0].register_lock);
1180 return IRQ_HANDLED;
1181 }
1182
1183
1184 static void rx_isr(struct scc_priv *priv)
1185 {
1186 if (priv->param.dma >= 0) {
1187 /* Check special condition and perform error reset. See 2.4.7.5. */
1188 special_condition(priv, read_scc(priv, R1));
1189 write_scc(priv, R0, ERR_RES);
1190 } else {
1191 /* Check special condition for each character. Error reset not necessary.
1192 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1193 int rc;
1194 while (read_scc(priv, R0) & Rx_CH_AV) {
1195 rc = read_scc(priv, R1);
1196 if (priv->rx_ptr < BUF_SIZE)
1197 priv->rx_buf[priv->rx_head][priv->
1198 rx_ptr++] =
1199 read_scc_data(priv);
1200 else {
1201 priv->rx_over = 2;
1202 read_scc_data(priv);
1203 }
1204 special_condition(priv, rc);
1205 }
1206 }
1207 }
1208
1209
1210 static void special_condition(struct scc_priv *priv, int rc)
1211 {
1212 int cb;
1213 unsigned long flags;
1214
1215 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1216
1217 if (rc & Rx_OVR) {
1218 /* Receiver overrun */
1219 priv->rx_over = 1;
1220 if (priv->param.dma < 0)
1221 write_scc(priv, R0, ERR_RES);
1222 } else if (rc & END_FR) {
1223 /* End of frame. Get byte count */
1224 if (priv->param.dma >= 0) {
1225 flags = claim_dma_lock();
1226 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1227 2;
1228 release_dma_lock(flags);
1229 } else {
1230 cb = priv->rx_ptr - 2;
1231 }
1232 if (priv->rx_over) {
1233 /* We had an overrun */
1234 priv->stats.rx_errors++;
1235 if (priv->rx_over == 2)
1236 priv->stats.rx_length_errors++;
1237 else
1238 priv->stats.rx_fifo_errors++;
1239 priv->rx_over = 0;
1240 } else if (rc & CRC_ERR) {
1241 /* Count invalid CRC only if packet length >= minimum */
1242 if (cb >= 15) {
1243 priv->stats.rx_errors++;
1244 priv->stats.rx_crc_errors++;
1245 }
1246 } else {
1247 if (cb >= 15) {
1248 if (priv->rx_count < NUM_RX_BUF - 1) {
1249 /* Put good frame in FIFO */
1250 priv->rx_len[priv->rx_head] = cb;
1251 priv->rx_head =
1252 (priv->rx_head +
1253 1) % NUM_RX_BUF;
1254 priv->rx_count++;
1255 schedule_work(&priv->rx_work);
1256 } else {
1257 priv->stats.rx_errors++;
1258 priv->stats.rx_over_errors++;
1259 }
1260 }
1261 }
1262 /* Get ready for new frame */
1263 if (priv->param.dma >= 0) {
1264 flags = claim_dma_lock();
1265 set_dma_addr(priv->param.dma,
1266 (int) priv->rx_buf[priv->rx_head]);
1267 set_dma_count(priv->param.dma, BUF_SIZE);
1268 release_dma_lock(flags);
1269 } else {
1270 priv->rx_ptr = 0;
1271 }
1272 }
1273 }
1274
1275
1276 static void rx_bh(void *arg)
1277 {
1278 struct scc_priv *priv = arg;
1279 int i = priv->rx_tail;
1280 int cb;
1281 unsigned long flags;
1282 struct sk_buff *skb;
1283 unsigned char *data;
1284
1285 spin_lock_irqsave(&priv->ring_lock, flags);
1286 while (priv->rx_count) {
1287 spin_unlock_irqrestore(&priv->ring_lock, flags);
1288 cb = priv->rx_len[i];
1289 /* Allocate buffer */
1290 skb = dev_alloc_skb(cb + 1);
1291 if (skb == NULL) {
1292 /* Drop packet */
1293 priv->stats.rx_dropped++;
1294 } else {
1295 /* Fill buffer */
1296 data = skb_put(skb, cb + 1);
1297 data[0] = 0;
1298 memcpy(&data[1], priv->rx_buf[i], cb);
1299 skb->protocol = ax25_type_trans(skb, priv->dev);
1300 netif_rx(skb);
1301 priv->dev->last_rx = jiffies;
1302 priv->stats.rx_packets++;
1303 priv->stats.rx_bytes += cb;
1304 }
1305 spin_lock_irqsave(&priv->ring_lock, flags);
1306 /* Move tail */
1307 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1308 priv->rx_count--;
1309 }
1310 spin_unlock_irqrestore(&priv->ring_lock, flags);
1311 }
1312
1313
1314 static void tx_isr(struct scc_priv *priv)
1315 {
1316 int i = priv->tx_tail, p = priv->tx_ptr;
1317
1318 /* Suspend TX interrupts if we don't want to send anything.
1319 See Figure 2-22. */
1320 if (p == priv->tx_len[i]) {
1321 write_scc(priv, R0, RES_Tx_P);
1322 return;
1323 }
1324
1325 /* Write characters */
1326 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1327 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1328 }
1329
1330 /* Reset EOM latch of Z8530 */
1331 if (!priv->tx_ptr && p && priv->chip == Z8530)
1332 write_scc(priv, R0, RES_EOM_L);
1333
1334 priv->tx_ptr = p;
1335 }
1336
1337
1338 static void es_isr(struct scc_priv *priv)
1339 {
1340 int i, rr0, drr0, res;
1341 unsigned long flags;
1342
1343 /* Read status, reset interrupt bit (open latches) */
1344 rr0 = read_scc(priv, R0);
1345 write_scc(priv, R0, RES_EXT_INT);
1346 drr0 = priv->rr0 ^ rr0;
1347 priv->rr0 = rr0;
1348
1349 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1350 it might have already been cleared again by AUTOEOM. */
1351 if (priv->state == TX_DATA) {
1352 /* Get remaining bytes */
1353 i = priv->tx_tail;
1354 if (priv->param.dma >= 0) {
1355 disable_dma(priv->param.dma);
1356 flags = claim_dma_lock();
1357 res = get_dma_residue(priv->param.dma);
1358 release_dma_lock(flags);
1359 } else {
1360 res = priv->tx_len[i] - priv->tx_ptr;
1361 priv->tx_ptr = 0;
1362 }
1363 /* Disable DREQ / TX interrupt */
1364 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1365 outb(0, priv->card_base + TWIN_DMA_CFG);
1366 else
1367 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1368 if (res) {
1369 /* Update packet statistics */
1370 priv->stats.tx_errors++;
1371 priv->stats.tx_fifo_errors++;
1372 /* Other underrun interrupts may already be waiting */
1373 write_scc(priv, R0, RES_EXT_INT);
1374 write_scc(priv, R0, RES_EXT_INT);
1375 } else {
1376 /* Update packet statistics */
1377 priv->stats.tx_packets++;
1378 priv->stats.tx_bytes += priv->tx_len[i];
1379 /* Remove frame from FIFO */
1380 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1381 priv->tx_count--;
1382 /* Inform upper layers */
1383 netif_wake_queue(priv->dev);
1384 }
1385 /* Switch state */
1386 write_scc(priv, R15, 0);
1387 if (priv->tx_count &&
1388 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1389 priv->state = TX_PAUSE;
1390 start_timer(priv, priv->param.txpause, 0);
1391 } else {
1392 priv->state = TX_TAIL;
1393 start_timer(priv, priv->param.txtail, 0);
1394 }
1395 }
1396
1397 /* DCD transition */
1398 if (drr0 & DCD) {
1399 if (rr0 & DCD) {
1400 switch (priv->state) {
1401 case IDLE:
1402 case WAIT:
1403 priv->state = DCD_ON;
1404 write_scc(priv, R15, 0);
1405 start_timer(priv, priv->param.dcdon, 0);
1406 }
1407 } else {
1408 switch (priv->state) {
1409 case RX_ON:
1410 rx_off(priv);
1411 priv->state = DCD_OFF;
1412 write_scc(priv, R15, 0);
1413 start_timer(priv, priv->param.dcdoff, 0);
1414 }
1415 }
1416 }
1417
1418 /* CTS transition */
1419 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1420 tm_isr(priv);
1421
1422 }
1423
1424
1425 static void tm_isr(struct scc_priv *priv)
1426 {
1427 switch (priv->state) {
1428 case TX_HEAD:
1429 case TX_PAUSE:
1430 tx_on(priv);
1431 priv->state = TX_DATA;
1432 break;
1433 case TX_TAIL:
1434 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1435 priv->state = RTS_OFF;
1436 if (priv->type != TYPE_TWIN)
1437 write_scc(priv, R15, 0);
1438 start_timer(priv, priv->param.rtsoff, 0);
1439 break;
1440 case RTS_OFF:
1441 write_scc(priv, R15, DCDIE);
1442 priv->rr0 = read_scc(priv, R0);
1443 if (priv->rr0 & DCD) {
1444 priv->stats.collisions++;
1445 rx_on(priv);
1446 priv->state = RX_ON;
1447 } else {
1448 priv->state = WAIT;
1449 start_timer(priv, priv->param.waittime, DCDIE);
1450 }
1451 break;
1452 case WAIT:
1453 if (priv->tx_count) {
1454 priv->state = TX_HEAD;
1455 priv->tx_start = jiffies;
1456 write_scc(priv, R5,
1457 TxCRC_ENAB | RTS | TxENAB | Tx8);
1458 write_scc(priv, R15, 0);
1459 start_timer(priv, priv->param.txdelay, 0);
1460 } else {
1461 priv->state = IDLE;
1462 if (priv->type != TYPE_TWIN)
1463 write_scc(priv, R15, DCDIE);
1464 }
1465 break;
1466 case DCD_ON:
1467 case DCD_OFF:
1468 write_scc(priv, R15, DCDIE);
1469 priv->rr0 = read_scc(priv, R0);
1470 if (priv->rr0 & DCD) {
1471 rx_on(priv);
1472 priv->state = RX_ON;
1473 } else {
1474 priv->state = WAIT;
1475 start_timer(priv,
1476 random() / priv->param.persist *
1477 priv->param.slottime, DCDIE);
1478 }
1479 break;
1480 }
1481 }
This page took 0.086466 seconds and 5 git commands to generate.