Merge git://git.kvack.org/~bcrl/aio-next
[deliverable/linux.git] / drivers / net / irda / w83977af_ir.c
1 /*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <linux/netdevice.h>
47 #include <linux/ioport.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/interrupt.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
54
55 #include <asm/io.h>
56 #include <asm/dma.h>
57 #include <asm/byteorder.h>
58
59 #include <net/irda/irda.h>
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda_device.h>
62 #include "w83977af.h"
63 #include "w83977af_ir.h"
64
65 #define CONFIG_USE_W977_PNP /* Currently needed */
66 #define PIO_MAX_SPEED 115200
67
68 static char *driver_name = "w83977af_ir";
69 static int qos_mtt_bits = 0x07; /* 1 ms or more */
70
71 #define CHIP_IO_EXTENT 8
72
73 static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
74 #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
75 static unsigned int irq[] = { 6, 0, 0, 0 };
76 #else
77 static unsigned int irq[] = { 11, 0, 0, 0 };
78 #endif
79 static unsigned int dma[] = { 1, 0, 0, 0 };
80 static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
81 static unsigned int efio = W977_EFIO_BASE;
82
83 static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
84
85 /* Some prototypes */
86 static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
87 unsigned int dma);
88 static int w83977af_close(struct w83977af_ir *self);
89 static int w83977af_probe(int iobase, int irq, int dma);
90 static int w83977af_dma_receive(struct w83977af_ir *self);
91 static int w83977af_dma_receive_complete(struct w83977af_ir *self);
92 static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
93 struct net_device *dev);
94 static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
95 static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
96 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
97 static int w83977af_is_receiving(struct w83977af_ir *self);
98
99 static int w83977af_net_open(struct net_device *dev);
100 static int w83977af_net_close(struct net_device *dev);
101 static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
102
103 /*
104 * Function w83977af_init ()
105 *
106 * Initialize chip. Just try to find out how many chips we are dealing with
107 * and where they are
108 */
109 static int __init w83977af_init(void)
110 {
111 int i;
112
113 IRDA_DEBUG(0, "%s()\n", __func__ );
114
115 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
116 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
117 return 0;
118 }
119 return -ENODEV;
120 }
121
122 /*
123 * Function w83977af_cleanup ()
124 *
125 * Close all configured chips
126 *
127 */
128 static void __exit w83977af_cleanup(void)
129 {
130 int i;
131
132 IRDA_DEBUG(4, "%s()\n", __func__ );
133
134 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
135 if (dev_self[i])
136 w83977af_close(dev_self[i]);
137 }
138 }
139
140 static const struct net_device_ops w83977_netdev_ops = {
141 .ndo_open = w83977af_net_open,
142 .ndo_stop = w83977af_net_close,
143 .ndo_start_xmit = w83977af_hard_xmit,
144 .ndo_do_ioctl = w83977af_net_ioctl,
145 };
146
147 /*
148 * Function w83977af_open (iobase, irq)
149 *
150 * Open driver instance
151 *
152 */
153 static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
154 unsigned int dma)
155 {
156 struct net_device *dev;
157 struct w83977af_ir *self;
158 int err;
159
160 IRDA_DEBUG(0, "%s()\n", __func__ );
161
162 /* Lock the port that we need */
163 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
164 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
165 __func__ , iobase);
166 return -ENODEV;
167 }
168
169 if (w83977af_probe(iobase, irq, dma) == -1) {
170 err = -1;
171 goto err_out;
172 }
173 /*
174 * Allocate new instance of the driver
175 */
176 dev = alloc_irdadev(sizeof(struct w83977af_ir));
177 if (dev == NULL) {
178 printk( KERN_ERR "IrDA: Can't allocate memory for "
179 "IrDA control block!\n");
180 err = -ENOMEM;
181 goto err_out;
182 }
183
184 self = netdev_priv(dev);
185 spin_lock_init(&self->lock);
186
187
188 /* Initialize IO */
189 self->io.fir_base = iobase;
190 self->io.irq = irq;
191 self->io.fir_ext = CHIP_IO_EXTENT;
192 self->io.dma = dma;
193 self->io.fifo_size = 32;
194
195 /* Initialize QoS for this device */
196 irda_init_max_qos_capabilies(&self->qos);
197
198 /* The only value we must override it the baudrate */
199
200 /* FIXME: The HP HDLS-1100 does not support 1152000! */
201 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
202 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
203
204 /* The HP HDLS-1100 needs 1 ms according to the specs */
205 self->qos.min_turn_time.bits = qos_mtt_bits;
206 irda_qos_bits_to_value(&self->qos);
207
208 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
209 self->rx_buff.truesize = 14384;
210 self->tx_buff.truesize = 4000;
211
212 /* Allocate memory if needed */
213 self->rx_buff.head =
214 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
215 &self->rx_buff_dma, GFP_KERNEL);
216 if (self->rx_buff.head == NULL) {
217 err = -ENOMEM;
218 goto err_out1;
219 }
220
221 self->tx_buff.head =
222 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
223 &self->tx_buff_dma, GFP_KERNEL);
224 if (self->tx_buff.head == NULL) {
225 err = -ENOMEM;
226 goto err_out2;
227 }
228
229 self->rx_buff.in_frame = FALSE;
230 self->rx_buff.state = OUTSIDE_FRAME;
231 self->tx_buff.data = self->tx_buff.head;
232 self->rx_buff.data = self->rx_buff.head;
233 self->netdev = dev;
234
235 dev->netdev_ops = &w83977_netdev_ops;
236
237 err = register_netdev(dev);
238 if (err) {
239 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
240 goto err_out3;
241 }
242 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
243
244 /* Need to store self somewhere */
245 dev_self[i] = self;
246
247 return 0;
248 err_out3:
249 dma_free_coherent(NULL, self->tx_buff.truesize,
250 self->tx_buff.head, self->tx_buff_dma);
251 err_out2:
252 dma_free_coherent(NULL, self->rx_buff.truesize,
253 self->rx_buff.head, self->rx_buff_dma);
254 err_out1:
255 free_netdev(dev);
256 err_out:
257 release_region(iobase, CHIP_IO_EXTENT);
258 return err;
259 }
260
261 /*
262 * Function w83977af_close (self)
263 *
264 * Close driver instance
265 *
266 */
267 static int w83977af_close(struct w83977af_ir *self)
268 {
269 int iobase;
270
271 IRDA_DEBUG(0, "%s()\n", __func__ );
272
273 iobase = self->io.fir_base;
274
275 #ifdef CONFIG_USE_W977_PNP
276 /* enter PnP configuration mode */
277 w977_efm_enter(efio);
278
279 w977_select_device(W977_DEVICE_IR, efio);
280
281 /* Deactivate device */
282 w977_write_reg(0x30, 0x00, efio);
283
284 w977_efm_exit(efio);
285 #endif /* CONFIG_USE_W977_PNP */
286
287 /* Remove netdevice */
288 unregister_netdev(self->netdev);
289
290 /* Release the PORT that this driver is using */
291 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
292 __func__ , self->io.fir_base);
293 release_region(self->io.fir_base, self->io.fir_ext);
294
295 if (self->tx_buff.head)
296 dma_free_coherent(NULL, self->tx_buff.truesize,
297 self->tx_buff.head, self->tx_buff_dma);
298
299 if (self->rx_buff.head)
300 dma_free_coherent(NULL, self->rx_buff.truesize,
301 self->rx_buff.head, self->rx_buff_dma);
302
303 free_netdev(self->netdev);
304
305 return 0;
306 }
307
308 static int w83977af_probe(int iobase, int irq, int dma)
309 {
310 int version;
311 int i;
312
313 for (i=0; i < 2; i++) {
314 IRDA_DEBUG( 0, "%s()\n", __func__ );
315 #ifdef CONFIG_USE_W977_PNP
316 /* Enter PnP configuration mode */
317 w977_efm_enter(efbase[i]);
318
319 w977_select_device(W977_DEVICE_IR, efbase[i]);
320
321 /* Configure PnP port, IRQ, and DMA channel */
322 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
323 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
324
325 w977_write_reg(0x70, irq, efbase[i]);
326 #ifdef CONFIG_ARCH_NETWINDER
327 /* Netwinder uses 1 higher than Linux */
328 w977_write_reg(0x74, dma+1, efbase[i]);
329 #else
330 w977_write_reg(0x74, dma, efbase[i]);
331 #endif /* CONFIG_ARCH_NETWINDER */
332 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
333
334 /* Set append hardware CRC, enable IR bank selection */
335 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
336
337 /* Activate device */
338 w977_write_reg(0x30, 0x01, efbase[i]);
339
340 w977_efm_exit(efbase[i]);
341 #endif /* CONFIG_USE_W977_PNP */
342 /* Disable Advanced mode */
343 switch_bank(iobase, SET2);
344 outb(iobase+2, 0x00);
345
346 /* Turn on UART (global) interrupts */
347 switch_bank(iobase, SET0);
348 outb(HCR_EN_IRQ, iobase+HCR);
349
350 /* Switch to advanced mode */
351 switch_bank(iobase, SET2);
352 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
353
354 /* Set default IR-mode */
355 switch_bank(iobase, SET0);
356 outb(HCR_SIR, iobase+HCR);
357
358 /* Read the Advanced IR ID */
359 switch_bank(iobase, SET3);
360 version = inb(iobase+AUID);
361
362 /* Should be 0x1? */
363 if (0x10 == (version & 0xf0)) {
364 efio = efbase[i];
365
366 /* Set FIFO size to 32 */
367 switch_bank(iobase, SET2);
368 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
369
370 /* Set FIFO threshold to TX17, RX16 */
371 switch_bank(iobase, SET0);
372 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
373 UFR_EN_FIFO,iobase+UFR);
374
375 /* Receiver frame length */
376 switch_bank(iobase, SET4);
377 outb(2048 & 0xff, iobase+6);
378 outb((2048 >> 8) & 0x1f, iobase+7);
379
380 /*
381 * Init HP HSDL-1100 transceiver.
382 *
383 * Set IRX_MSL since we have 2 * receive paths IRRX,
384 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
385 * be a input pin used for IRRXH
386 *
387 * IRRX pin 37 connected to receiver
388 * IRTX pin 38 connected to transmitter
389 * FIRRX pin 39 connected to receiver (IRSL0)
390 * CIRRX pin 40 connected to pin 37
391 */
392 switch_bank(iobase, SET7);
393 outb(0x40, iobase+7);
394
395 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
396 "Version: 0x%02x\n", version);
397
398 return 0;
399 } else {
400 /* Try next extented function register address */
401 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
402 }
403 }
404 return -1;
405 }
406
407 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
408 {
409 int ir_mode = HCR_SIR;
410 int iobase;
411 __u8 set;
412
413 iobase = self->io.fir_base;
414
415 /* Update accounting for new speed */
416 self->io.speed = speed;
417
418 /* Save current bank */
419 set = inb(iobase+SSR);
420
421 /* Disable interrupts */
422 switch_bank(iobase, SET0);
423 outb(0, iobase+ICR);
424
425 /* Select Set 2 */
426 switch_bank(iobase, SET2);
427 outb(0x00, iobase+ABHL);
428
429 switch (speed) {
430 case 9600: outb(0x0c, iobase+ABLL); break;
431 case 19200: outb(0x06, iobase+ABLL); break;
432 case 38400: outb(0x03, iobase+ABLL); break;
433 case 57600: outb(0x02, iobase+ABLL); break;
434 case 115200: outb(0x01, iobase+ABLL); break;
435 case 576000:
436 ir_mode = HCR_MIR_576;
437 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
438 break;
439 case 1152000:
440 ir_mode = HCR_MIR_1152;
441 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
442 break;
443 case 4000000:
444 ir_mode = HCR_FIR;
445 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
446 break;
447 default:
448 ir_mode = HCR_FIR;
449 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
450 break;
451 }
452
453 /* Set speed mode */
454 switch_bank(iobase, SET0);
455 outb(ir_mode, iobase+HCR);
456
457 /* set FIFO size to 32 */
458 switch_bank(iobase, SET2);
459 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
460
461 /* set FIFO threshold to TX17, RX16 */
462 switch_bank(iobase, SET0);
463 outb(0x00, iobase+UFR); /* Reset */
464 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
465 outb(0xa7, iobase+UFR);
466
467 netif_wake_queue(self->netdev);
468
469 /* Enable some interrupts so we can receive frames */
470 switch_bank(iobase, SET0);
471 if (speed > PIO_MAX_SPEED) {
472 outb(ICR_EFSFI, iobase+ICR);
473 w83977af_dma_receive(self);
474 } else
475 outb(ICR_ERBRI, iobase+ICR);
476
477 /* Restore SSR */
478 outb(set, iobase+SSR);
479 }
480
481 /*
482 * Function w83977af_hard_xmit (skb, dev)
483 *
484 * Sets up a DMA transfer to send the current frame.
485 *
486 */
487 static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
488 struct net_device *dev)
489 {
490 struct w83977af_ir *self;
491 __s32 speed;
492 int iobase;
493 __u8 set;
494 int mtt;
495
496 self = netdev_priv(dev);
497
498 iobase = self->io.fir_base;
499
500 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
501 (int) skb->len);
502
503 /* Lock transmit buffer */
504 netif_stop_queue(dev);
505
506 /* Check if we need to change the speed */
507 speed = irda_get_next_speed(skb);
508 if ((speed != self->io.speed) && (speed != -1)) {
509 /* Check for empty frame */
510 if (!skb->len) {
511 w83977af_change_speed(self, speed);
512 dev_kfree_skb(skb);
513 return NETDEV_TX_OK;
514 } else
515 self->new_speed = speed;
516 }
517
518 /* Save current set */
519 set = inb(iobase+SSR);
520
521 /* Decide if we should use PIO or DMA transfer */
522 if (self->io.speed > PIO_MAX_SPEED) {
523 self->tx_buff.data = self->tx_buff.head;
524 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
525 self->tx_buff.len = skb->len;
526
527 mtt = irda_get_mtt(skb);
528 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
529 if (mtt)
530 udelay(mtt);
531
532 /* Enable DMA interrupt */
533 switch_bank(iobase, SET0);
534 outb(ICR_EDMAI, iobase+ICR);
535 w83977af_dma_write(self, iobase);
536 } else {
537 self->tx_buff.data = self->tx_buff.head;
538 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
539 self->tx_buff.truesize);
540
541 /* Add interrupt on tx low level (will fire immediately) */
542 switch_bank(iobase, SET0);
543 outb(ICR_ETXTHI, iobase+ICR);
544 }
545 dev_kfree_skb(skb);
546
547 /* Restore set register */
548 outb(set, iobase+SSR);
549
550 return NETDEV_TX_OK;
551 }
552
553 /*
554 * Function w83977af_dma_write (self, iobase)
555 *
556 * Send frame using DMA
557 *
558 */
559 static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
560 {
561 __u8 set;
562 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
563
564 /* Save current set */
565 set = inb(iobase+SSR);
566
567 /* Disable DMA */
568 switch_bank(iobase, SET0);
569 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
570
571 /* Choose transmit DMA channel */
572 switch_bank(iobase, SET2);
573 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
574 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
575 DMA_MODE_WRITE);
576 self->io.direction = IO_XMIT;
577
578 /* Enable DMA */
579 switch_bank(iobase, SET0);
580 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
581
582 /* Restore set register */
583 outb(set, iobase+SSR);
584 }
585
586 /*
587 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
588 *
589 *
590 *
591 */
592 static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
593 {
594 int actual = 0;
595 __u8 set;
596
597 IRDA_DEBUG(4, "%s()\n", __func__ );
598
599 /* Save current bank */
600 set = inb(iobase+SSR);
601
602 switch_bank(iobase, SET0);
603 if (!(inb_p(iobase+USR) & USR_TSRE)) {
604 IRDA_DEBUG(4,
605 "%s(), warning, FIFO not empty yet!\n", __func__ );
606
607 fifo_size -= 17;
608 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
609 __func__ , fifo_size);
610 }
611
612 /* Fill FIFO with current frame */
613 while ((fifo_size-- > 0) && (actual < len)) {
614 /* Transmit next byte */
615 outb(buf[actual++], iobase+TBR);
616 }
617
618 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
619 __func__ , fifo_size, actual, len);
620
621 /* Restore bank */
622 outb(set, iobase+SSR);
623
624 return actual;
625 }
626
627 /*
628 * Function w83977af_dma_xmit_complete (self)
629 *
630 * The transfer of a frame in finished. So do the necessary things
631 *
632 *
633 */
634 static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
635 {
636 int iobase;
637 __u8 set;
638
639 IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
640
641 IRDA_ASSERT(self != NULL, return;);
642
643 iobase = self->io.fir_base;
644
645 /* Save current set */
646 set = inb(iobase+SSR);
647
648 /* Disable DMA */
649 switch_bank(iobase, SET0);
650 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
651
652 /* Check for underrun! */
653 if (inb(iobase+AUDR) & AUDR_UNDR) {
654 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
655
656 self->netdev->stats.tx_errors++;
657 self->netdev->stats.tx_fifo_errors++;
658
659 /* Clear bit, by writing 1 to it */
660 outb(AUDR_UNDR, iobase+AUDR);
661 } else
662 self->netdev->stats.tx_packets++;
663
664
665 if (self->new_speed) {
666 w83977af_change_speed(self, self->new_speed);
667 self->new_speed = 0;
668 }
669
670 /* Unlock tx_buff and request another frame */
671 /* Tell the network layer, that we want more frames */
672 netif_wake_queue(self->netdev);
673
674 /* Restore set */
675 outb(set, iobase+SSR);
676 }
677
678 /*
679 * Function w83977af_dma_receive (self)
680 *
681 * Get ready for receiving a frame. The device will initiate a DMA
682 * if it starts to receive a frame.
683 *
684 */
685 static int w83977af_dma_receive(struct w83977af_ir *self)
686 {
687 int iobase;
688 __u8 set;
689 #ifdef CONFIG_ARCH_NETWINDER
690 unsigned long flags;
691 __u8 hcr;
692 #endif
693 IRDA_ASSERT(self != NULL, return -1;);
694
695 IRDA_DEBUG(4, "%s\n", __func__ );
696
697 iobase= self->io.fir_base;
698
699 /* Save current set */
700 set = inb(iobase+SSR);
701
702 /* Disable DMA */
703 switch_bank(iobase, SET0);
704 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
705
706 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
707 switch_bank(iobase, SET2);
708 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
709 iobase+ADCR1);
710
711 self->io.direction = IO_RECV;
712 self->rx_buff.data = self->rx_buff.head;
713
714 #ifdef CONFIG_ARCH_NETWINDER
715 spin_lock_irqsave(&self->lock, flags);
716
717 disable_dma(self->io.dma);
718 clear_dma_ff(self->io.dma);
719 set_dma_mode(self->io.dma, DMA_MODE_READ);
720 set_dma_addr(self->io.dma, self->rx_buff_dma);
721 set_dma_count(self->io.dma, self->rx_buff.truesize);
722 #else
723 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
724 DMA_MODE_READ);
725 #endif
726 /*
727 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
728 * important that we don't reset the Tx FIFO since it might not
729 * be finished transmitting yet
730 */
731 switch_bank(iobase, SET0);
732 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
733 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
734
735 /* Enable DMA */
736 switch_bank(iobase, SET0);
737 #ifdef CONFIG_ARCH_NETWINDER
738 hcr = inb(iobase+HCR);
739 outb(hcr | HCR_EN_DMA, iobase+HCR);
740 enable_dma(self->io.dma);
741 spin_unlock_irqrestore(&self->lock, flags);
742 #else
743 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
744 #endif
745 /* Restore set */
746 outb(set, iobase+SSR);
747
748 return 0;
749 }
750
751 /*
752 * Function w83977af_receive_complete (self)
753 *
754 * Finished with receiving a frame
755 *
756 */
757 static int w83977af_dma_receive_complete(struct w83977af_ir *self)
758 {
759 struct sk_buff *skb;
760 struct st_fifo *st_fifo;
761 int len;
762 int iobase;
763 __u8 set;
764 __u8 status;
765
766 IRDA_DEBUG(4, "%s\n", __func__ );
767
768 st_fifo = &self->st_fifo;
769
770 iobase = self->io.fir_base;
771
772 /* Save current set */
773 set = inb(iobase+SSR);
774
775 iobase = self->io.fir_base;
776
777 /* Read status FIFO */
778 switch_bank(iobase, SET5);
779 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
780 st_fifo->entries[st_fifo->tail].status = status;
781
782 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
783 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
784
785 st_fifo->tail++;
786 st_fifo->len++;
787 }
788
789 while (st_fifo->len) {
790 /* Get first entry */
791 status = st_fifo->entries[st_fifo->head].status;
792 len = st_fifo->entries[st_fifo->head].len;
793 st_fifo->head++;
794 st_fifo->len--;
795
796 /* Check for errors */
797 if (status & FS_FO_ERR_MSK) {
798 if (status & FS_FO_LST_FR) {
799 /* Add number of lost frames to stats */
800 self->netdev->stats.rx_errors += len;
801 } else {
802 /* Skip frame */
803 self->netdev->stats.rx_errors++;
804
805 self->rx_buff.data += len;
806
807 if (status & FS_FO_MX_LEX)
808 self->netdev->stats.rx_length_errors++;
809
810 if (status & FS_FO_PHY_ERR)
811 self->netdev->stats.rx_frame_errors++;
812
813 if (status & FS_FO_CRC_ERR)
814 self->netdev->stats.rx_crc_errors++;
815 }
816 /* The errors below can be reported in both cases */
817 if (status & FS_FO_RX_OV)
818 self->netdev->stats.rx_fifo_errors++;
819
820 if (status & FS_FO_FSF_OV)
821 self->netdev->stats.rx_fifo_errors++;
822
823 } else {
824 /* Check if we have transferred all data to memory */
825 switch_bank(iobase, SET0);
826 if (inb(iobase+USR) & USR_RDR) {
827 udelay(80); /* Should be enough!? */
828 }
829
830 skb = dev_alloc_skb(len+1);
831 if (skb == NULL) {
832 printk(KERN_INFO
833 "%s(), memory squeeze, dropping frame.\n", __func__);
834 /* Restore set register */
835 outb(set, iobase+SSR);
836
837 return FALSE;
838 }
839
840 /* Align to 20 bytes */
841 skb_reserve(skb, 1);
842
843 /* Copy frame without CRC */
844 if (self->io.speed < 4000000) {
845 skb_put(skb, len-2);
846 skb_copy_to_linear_data(skb,
847 self->rx_buff.data,
848 len - 2);
849 } else {
850 skb_put(skb, len-4);
851 skb_copy_to_linear_data(skb,
852 self->rx_buff.data,
853 len - 4);
854 }
855
856 /* Move to next frame */
857 self->rx_buff.data += len;
858 self->netdev->stats.rx_packets++;
859
860 skb->dev = self->netdev;
861 skb_reset_mac_header(skb);
862 skb->protocol = htons(ETH_P_IRDA);
863 netif_rx(skb);
864 }
865 }
866 /* Restore set register */
867 outb(set, iobase+SSR);
868
869 return TRUE;
870 }
871
872 /*
873 * Function pc87108_pio_receive (self)
874 *
875 * Receive all data in receiver FIFO
876 *
877 */
878 static void w83977af_pio_receive(struct w83977af_ir *self)
879 {
880 __u8 byte = 0x00;
881 int iobase;
882
883 IRDA_DEBUG(4, "%s()\n", __func__ );
884
885 IRDA_ASSERT(self != NULL, return;);
886
887 iobase = self->io.fir_base;
888
889 /* Receive all characters in Rx FIFO */
890 do {
891 byte = inb(iobase+RBR);
892 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
893 byte);
894 } while (inb(iobase+USR) & USR_RDR); /* Data available */
895 }
896
897 /*
898 * Function w83977af_sir_interrupt (self, eir)
899 *
900 * Handle SIR interrupt
901 *
902 */
903 static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
904 {
905 int actual;
906 __u8 new_icr = 0;
907 __u8 set;
908 int iobase;
909
910 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
911
912 iobase = self->io.fir_base;
913 /* Transmit FIFO low on data */
914 if (isr & ISR_TXTH_I) {
915 /* Write data left in transmit buffer */
916 actual = w83977af_pio_write(self->io.fir_base,
917 self->tx_buff.data,
918 self->tx_buff.len,
919 self->io.fifo_size);
920
921 self->tx_buff.data += actual;
922 self->tx_buff.len -= actual;
923
924 self->io.direction = IO_XMIT;
925
926 /* Check if finished */
927 if (self->tx_buff.len > 0) {
928 new_icr |= ICR_ETXTHI;
929 } else {
930 set = inb(iobase+SSR);
931 switch_bank(iobase, SET0);
932 outb(AUDR_SFEND, iobase+AUDR);
933 outb(set, iobase+SSR);
934
935 self->netdev->stats.tx_packets++;
936
937 /* Feed me more packets */
938 netif_wake_queue(self->netdev);
939 new_icr |= ICR_ETBREI;
940 }
941 }
942 /* Check if transmission has completed */
943 if (isr & ISR_TXEMP_I) {
944 /* Check if we need to change the speed? */
945 if (self->new_speed) {
946 IRDA_DEBUG(2,
947 "%s(), Changing speed!\n", __func__ );
948 w83977af_change_speed(self, self->new_speed);
949 self->new_speed = 0;
950 }
951
952 /* Turn around and get ready to receive some data */
953 self->io.direction = IO_RECV;
954 new_icr |= ICR_ERBRI;
955 }
956
957 /* Rx FIFO threshold or timeout */
958 if (isr & ISR_RXTH_I) {
959 w83977af_pio_receive(self);
960
961 /* Keep receiving */
962 new_icr |= ICR_ERBRI;
963 }
964 return new_icr;
965 }
966
967 /*
968 * Function pc87108_fir_interrupt (self, eir)
969 *
970 * Handle MIR/FIR interrupt
971 *
972 */
973 static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
974 {
975 __u8 new_icr = 0;
976 __u8 set;
977 int iobase;
978
979 iobase = self->io.fir_base;
980 set = inb(iobase+SSR);
981
982 /* End of frame detected in FIFO */
983 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
984 if (w83977af_dma_receive_complete(self)) {
985
986 /* Wait for next status FIFO interrupt */
987 new_icr |= ICR_EFSFI;
988 } else {
989 /* DMA not finished yet */
990
991 /* Set timer value, resolution 1 ms */
992 switch_bank(iobase, SET4);
993 outb(0x01, iobase+TMRL); /* 1 ms */
994 outb(0x00, iobase+TMRH);
995
996 /* Start timer */
997 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
998
999 new_icr |= ICR_ETMRI;
1000 }
1001 }
1002 /* Timer finished */
1003 if (isr & ISR_TMR_I) {
1004 /* Disable timer */
1005 switch_bank(iobase, SET4);
1006 outb(0, iobase+IR_MSL);
1007
1008 /* Clear timer event */
1009 /* switch_bank(iobase, SET0); */
1010 /* outb(ASCR_CTE, iobase+ASCR); */
1011
1012 /* Check if this is a TX timer interrupt */
1013 if (self->io.direction == IO_XMIT) {
1014 w83977af_dma_write(self, iobase);
1015
1016 new_icr |= ICR_EDMAI;
1017 } else {
1018 /* Check if DMA has now finished */
1019 w83977af_dma_receive_complete(self);
1020
1021 new_icr |= ICR_EFSFI;
1022 }
1023 }
1024 /* Finished with DMA */
1025 if (isr & ISR_DMA_I) {
1026 w83977af_dma_xmit_complete(self);
1027
1028 /* Check if there are more frames to be transmitted */
1029 /* if (irda_device_txqueue_empty(self)) { */
1030
1031 /* Prepare for receive
1032 *
1033 * ** Netwinder Tx DMA likes that we do this anyway **
1034 */
1035 w83977af_dma_receive(self);
1036 new_icr = ICR_EFSFI;
1037 /* } */
1038 }
1039
1040 /* Restore set */
1041 outb(set, iobase+SSR);
1042
1043 return new_icr;
1044 }
1045
1046 /*
1047 * Function w83977af_interrupt (irq, dev_id, regs)
1048 *
1049 * An interrupt from the chip has arrived. Time to do some work
1050 *
1051 */
1052 static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1053 {
1054 struct net_device *dev = dev_id;
1055 struct w83977af_ir *self;
1056 __u8 set, icr, isr;
1057 int iobase;
1058
1059 self = netdev_priv(dev);
1060
1061 iobase = self->io.fir_base;
1062
1063 /* Save current bank */
1064 set = inb(iobase+SSR);
1065 switch_bank(iobase, SET0);
1066
1067 icr = inb(iobase+ICR);
1068 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1069
1070 outb(0, iobase+ICR); /* Disable interrupts */
1071
1072 if (isr) {
1073 /* Dispatch interrupt handler for the current speed */
1074 if (self->io.speed > PIO_MAX_SPEED )
1075 icr = w83977af_fir_interrupt(self, isr);
1076 else
1077 icr = w83977af_sir_interrupt(self, isr);
1078 }
1079
1080 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1081 outb(set, iobase+SSR); /* Restore bank register */
1082 return IRQ_RETVAL(isr);
1083 }
1084
1085 /*
1086 * Function w83977af_is_receiving (self)
1087 *
1088 * Return TRUE is we are currently receiving a frame
1089 *
1090 */
1091 static int w83977af_is_receiving(struct w83977af_ir *self)
1092 {
1093 int status = FALSE;
1094 int iobase;
1095 __u8 set;
1096
1097 IRDA_ASSERT(self != NULL, return FALSE;);
1098
1099 if (self->io.speed > 115200) {
1100 iobase = self->io.fir_base;
1101
1102 /* Check if rx FIFO is not empty */
1103 set = inb(iobase+SSR);
1104 switch_bank(iobase, SET2);
1105 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1106 /* We are receiving something */
1107 status = TRUE;
1108 }
1109 outb(set, iobase+SSR);
1110 } else
1111 status = (self->rx_buff.state != OUTSIDE_FRAME);
1112
1113 return status;
1114 }
1115
1116 /*
1117 * Function w83977af_net_open (dev)
1118 *
1119 * Start the device
1120 *
1121 */
1122 static int w83977af_net_open(struct net_device *dev)
1123 {
1124 struct w83977af_ir *self;
1125 int iobase;
1126 char hwname[32];
1127 __u8 set;
1128
1129 IRDA_DEBUG(0, "%s()\n", __func__ );
1130
1131 IRDA_ASSERT(dev != NULL, return -1;);
1132 self = netdev_priv(dev);
1133
1134 IRDA_ASSERT(self != NULL, return 0;);
1135
1136 iobase = self->io.fir_base;
1137
1138 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1139 (void *) dev)) {
1140 return -EAGAIN;
1141 }
1142 /*
1143 * Always allocate the DMA channel after the IRQ,
1144 * and clean up on failure.
1145 */
1146 if (request_dma(self->io.dma, dev->name)) {
1147 free_irq(self->io.irq, dev);
1148 return -EAGAIN;
1149 }
1150
1151 /* Save current set */
1152 set = inb(iobase+SSR);
1153
1154 /* Enable some interrupts so we can receive frames again */
1155 switch_bank(iobase, SET0);
1156 if (self->io.speed > 115200) {
1157 outb(ICR_EFSFI, iobase+ICR);
1158 w83977af_dma_receive(self);
1159 } else
1160 outb(ICR_ERBRI, iobase+ICR);
1161
1162 /* Restore bank register */
1163 outb(set, iobase+SSR);
1164
1165 /* Ready to play! */
1166 netif_start_queue(dev);
1167
1168 /* Give self a hardware name */
1169 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1170
1171 /*
1172 * Open new IrLAP layer instance, now that everything should be
1173 * initialized properly
1174 */
1175 self->irlap = irlap_open(dev, &self->qos, hwname);
1176
1177 return 0;
1178 }
1179
1180 /*
1181 * Function w83977af_net_close (dev)
1182 *
1183 * Stop the device
1184 *
1185 */
1186 static int w83977af_net_close(struct net_device *dev)
1187 {
1188 struct w83977af_ir *self;
1189 int iobase;
1190 __u8 set;
1191
1192 IRDA_DEBUG(0, "%s()\n", __func__ );
1193
1194 IRDA_ASSERT(dev != NULL, return -1;);
1195
1196 self = netdev_priv(dev);
1197
1198 IRDA_ASSERT(self != NULL, return 0;);
1199
1200 iobase = self->io.fir_base;
1201
1202 /* Stop device */
1203 netif_stop_queue(dev);
1204
1205 /* Stop and remove instance of IrLAP */
1206 if (self->irlap)
1207 irlap_close(self->irlap);
1208 self->irlap = NULL;
1209
1210 disable_dma(self->io.dma);
1211
1212 /* Save current set */
1213 set = inb(iobase+SSR);
1214
1215 /* Disable interrupts */
1216 switch_bank(iobase, SET0);
1217 outb(0, iobase+ICR);
1218
1219 free_irq(self->io.irq, dev);
1220 free_dma(self->io.dma);
1221
1222 /* Restore bank register */
1223 outb(set, iobase+SSR);
1224
1225 return 0;
1226 }
1227
1228 /*
1229 * Function w83977af_net_ioctl (dev, rq, cmd)
1230 *
1231 * Process IOCTL commands for this device
1232 *
1233 */
1234 static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1235 {
1236 struct if_irda_req *irq = (struct if_irda_req *) rq;
1237 struct w83977af_ir *self;
1238 unsigned long flags;
1239 int ret = 0;
1240
1241 IRDA_ASSERT(dev != NULL, return -1;);
1242
1243 self = netdev_priv(dev);
1244
1245 IRDA_ASSERT(self != NULL, return -1;);
1246
1247 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1248
1249 spin_lock_irqsave(&self->lock, flags);
1250
1251 switch (cmd) {
1252 case SIOCSBANDWIDTH: /* Set bandwidth */
1253 if (!capable(CAP_NET_ADMIN)) {
1254 ret = -EPERM;
1255 goto out;
1256 }
1257 w83977af_change_speed(self, irq->ifr_baudrate);
1258 break;
1259 case SIOCSMEDIABUSY: /* Set media busy */
1260 if (!capable(CAP_NET_ADMIN)) {
1261 ret = -EPERM;
1262 goto out;
1263 }
1264 irda_device_set_media_busy(self->netdev, TRUE);
1265 break;
1266 case SIOCGRECEIVING: /* Check if we are receiving right now */
1267 irq->ifr_receiving = w83977af_is_receiving(self);
1268 break;
1269 default:
1270 ret = -EOPNOTSUPP;
1271 }
1272 out:
1273 spin_unlock_irqrestore(&self->lock, flags);
1274 return ret;
1275 }
1276
1277 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1278 MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1279 MODULE_LICENSE("GPL");
1280
1281
1282 module_param(qos_mtt_bits, int, 0);
1283 MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1284 module_param_array(io, int, NULL, 0);
1285 MODULE_PARM_DESC(io, "Base I/O addresses");
1286 module_param_array(irq, int, NULL, 0);
1287 MODULE_PARM_DESC(irq, "IRQ lines");
1288
1289 /*
1290 * Function init_module (void)
1291 *
1292 *
1293 *
1294 */
1295 module_init(w83977af_init);
1296
1297 /*
1298 * Function cleanup_module (void)
1299 *
1300 *
1301 *
1302 */
1303 module_exit(w83977af_cleanup);
This page took 0.347173 seconds and 5 git commands to generate.