drivers:net: Convert dma_alloc_coherent(...__GFP_ZERO) to dma_zalloc_coherent
[deliverable/linux.git] / drivers / net / irda / w83977af_ir.c
1 /*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <linux/netdevice.h>
47 #include <linux/ioport.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/interrupt.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
54
55 #include <asm/io.h>
56 #include <asm/dma.h>
57 #include <asm/byteorder.h>
58
59 #include <net/irda/irda.h>
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda_device.h>
62 #include "w83977af.h"
63 #include "w83977af_ir.h"
64
65 #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
66 #undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
67 #define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
68 #endif
69 #define CONFIG_USE_W977_PNP /* Currently needed */
70 #define PIO_MAX_SPEED 115200
71
72 static char *driver_name = "w83977af_ir";
73 static int qos_mtt_bits = 0x07; /* 1 ms or more */
74
75 #define CHIP_IO_EXTENT 8
76
77 static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
78 #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
79 static unsigned int irq[] = { 6, 0, 0, 0 };
80 #else
81 static unsigned int irq[] = { 11, 0, 0, 0 };
82 #endif
83 static unsigned int dma[] = { 1, 0, 0, 0 };
84 static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
85 static unsigned int efio = W977_EFIO_BASE;
86
87 static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
88
89 /* Some prototypes */
90 static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
91 unsigned int dma);
92 static int w83977af_close(struct w83977af_ir *self);
93 static int w83977af_probe(int iobase, int irq, int dma);
94 static int w83977af_dma_receive(struct w83977af_ir *self);
95 static int w83977af_dma_receive_complete(struct w83977af_ir *self);
96 static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
97 struct net_device *dev);
98 static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
99 static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
100 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
101 static int w83977af_is_receiving(struct w83977af_ir *self);
102
103 static int w83977af_net_open(struct net_device *dev);
104 static int w83977af_net_close(struct net_device *dev);
105 static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
106
107 /*
108 * Function w83977af_init ()
109 *
110 * Initialize chip. Just try to find out how many chips we are dealing with
111 * and where they are
112 */
113 static int __init w83977af_init(void)
114 {
115 int i;
116
117 IRDA_DEBUG(0, "%s()\n", __func__ );
118
119 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
121 return 0;
122 }
123 return -ENODEV;
124 }
125
126 /*
127 * Function w83977af_cleanup ()
128 *
129 * Close all configured chips
130 *
131 */
132 static void __exit w83977af_cleanup(void)
133 {
134 int i;
135
136 IRDA_DEBUG(4, "%s()\n", __func__ );
137
138 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
139 if (dev_self[i])
140 w83977af_close(dev_self[i]);
141 }
142 }
143
144 static const struct net_device_ops w83977_netdev_ops = {
145 .ndo_open = w83977af_net_open,
146 .ndo_stop = w83977af_net_close,
147 .ndo_start_xmit = w83977af_hard_xmit,
148 .ndo_do_ioctl = w83977af_net_ioctl,
149 };
150
151 /*
152 * Function w83977af_open (iobase, irq)
153 *
154 * Open driver instance
155 *
156 */
157 static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
158 unsigned int dma)
159 {
160 struct net_device *dev;
161 struct w83977af_ir *self;
162 int err;
163
164 IRDA_DEBUG(0, "%s()\n", __func__ );
165
166 /* Lock the port that we need */
167 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
168 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
169 __func__ , iobase);
170 return -ENODEV;
171 }
172
173 if (w83977af_probe(iobase, irq, dma) == -1) {
174 err = -1;
175 goto err_out;
176 }
177 /*
178 * Allocate new instance of the driver
179 */
180 dev = alloc_irdadev(sizeof(struct w83977af_ir));
181 if (dev == NULL) {
182 printk( KERN_ERR "IrDA: Can't allocate memory for "
183 "IrDA control block!\n");
184 err = -ENOMEM;
185 goto err_out;
186 }
187
188 self = netdev_priv(dev);
189 spin_lock_init(&self->lock);
190
191
192 /* Initialize IO */
193 self->io.fir_base = iobase;
194 self->io.irq = irq;
195 self->io.fir_ext = CHIP_IO_EXTENT;
196 self->io.dma = dma;
197 self->io.fifo_size = 32;
198
199 /* Initialize QoS for this device */
200 irda_init_max_qos_capabilies(&self->qos);
201
202 /* The only value we must override it the baudrate */
203
204 /* FIXME: The HP HDLS-1100 does not support 1152000! */
205 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
206 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
207
208 /* The HP HDLS-1100 needs 1 ms according to the specs */
209 self->qos.min_turn_time.bits = qos_mtt_bits;
210 irda_qos_bits_to_value(&self->qos);
211
212 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
213 self->rx_buff.truesize = 14384;
214 self->tx_buff.truesize = 4000;
215
216 /* Allocate memory if needed */
217 self->rx_buff.head =
218 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
219 &self->rx_buff_dma, GFP_KERNEL);
220 if (self->rx_buff.head == NULL) {
221 err = -ENOMEM;
222 goto err_out1;
223 }
224
225 self->tx_buff.head =
226 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
227 &self->tx_buff_dma, GFP_KERNEL);
228 if (self->tx_buff.head == NULL) {
229 err = -ENOMEM;
230 goto err_out2;
231 }
232
233 self->rx_buff.in_frame = FALSE;
234 self->rx_buff.state = OUTSIDE_FRAME;
235 self->tx_buff.data = self->tx_buff.head;
236 self->rx_buff.data = self->rx_buff.head;
237 self->netdev = dev;
238
239 dev->netdev_ops = &w83977_netdev_ops;
240
241 err = register_netdev(dev);
242 if (err) {
243 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
244 goto err_out3;
245 }
246 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
247
248 /* Need to store self somewhere */
249 dev_self[i] = self;
250
251 return 0;
252 err_out3:
253 dma_free_coherent(NULL, self->tx_buff.truesize,
254 self->tx_buff.head, self->tx_buff_dma);
255 err_out2:
256 dma_free_coherent(NULL, self->rx_buff.truesize,
257 self->rx_buff.head, self->rx_buff_dma);
258 err_out1:
259 free_netdev(dev);
260 err_out:
261 release_region(iobase, CHIP_IO_EXTENT);
262 return err;
263 }
264
265 /*
266 * Function w83977af_close (self)
267 *
268 * Close driver instance
269 *
270 */
271 static int w83977af_close(struct w83977af_ir *self)
272 {
273 int iobase;
274
275 IRDA_DEBUG(0, "%s()\n", __func__ );
276
277 iobase = self->io.fir_base;
278
279 #ifdef CONFIG_USE_W977_PNP
280 /* enter PnP configuration mode */
281 w977_efm_enter(efio);
282
283 w977_select_device(W977_DEVICE_IR, efio);
284
285 /* Deactivate device */
286 w977_write_reg(0x30, 0x00, efio);
287
288 w977_efm_exit(efio);
289 #endif /* CONFIG_USE_W977_PNP */
290
291 /* Remove netdevice */
292 unregister_netdev(self->netdev);
293
294 /* Release the PORT that this driver is using */
295 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
296 __func__ , self->io.fir_base);
297 release_region(self->io.fir_base, self->io.fir_ext);
298
299 if (self->tx_buff.head)
300 dma_free_coherent(NULL, self->tx_buff.truesize,
301 self->tx_buff.head, self->tx_buff_dma);
302
303 if (self->rx_buff.head)
304 dma_free_coherent(NULL, self->rx_buff.truesize,
305 self->rx_buff.head, self->rx_buff_dma);
306
307 free_netdev(self->netdev);
308
309 return 0;
310 }
311
312 static int w83977af_probe(int iobase, int irq, int dma)
313 {
314 int version;
315 int i;
316
317 for (i=0; i < 2; i++) {
318 IRDA_DEBUG( 0, "%s()\n", __func__ );
319 #ifdef CONFIG_USE_W977_PNP
320 /* Enter PnP configuration mode */
321 w977_efm_enter(efbase[i]);
322
323 w977_select_device(W977_DEVICE_IR, efbase[i]);
324
325 /* Configure PnP port, IRQ, and DMA channel */
326 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
327 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
328
329 w977_write_reg(0x70, irq, efbase[i]);
330 #ifdef CONFIG_ARCH_NETWINDER
331 /* Netwinder uses 1 higher than Linux */
332 w977_write_reg(0x74, dma+1, efbase[i]);
333 #else
334 w977_write_reg(0x74, dma, efbase[i]);
335 #endif /*CONFIG_ARCH_NETWINDER */
336 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
337
338 /* Set append hardware CRC, enable IR bank selection */
339 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
340
341 /* Activate device */
342 w977_write_reg(0x30, 0x01, efbase[i]);
343
344 w977_efm_exit(efbase[i]);
345 #endif /* CONFIG_USE_W977_PNP */
346 /* Disable Advanced mode */
347 switch_bank(iobase, SET2);
348 outb(iobase+2, 0x00);
349
350 /* Turn on UART (global) interrupts */
351 switch_bank(iobase, SET0);
352 outb(HCR_EN_IRQ, iobase+HCR);
353
354 /* Switch to advanced mode */
355 switch_bank(iobase, SET2);
356 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
357
358 /* Set default IR-mode */
359 switch_bank(iobase, SET0);
360 outb(HCR_SIR, iobase+HCR);
361
362 /* Read the Advanced IR ID */
363 switch_bank(iobase, SET3);
364 version = inb(iobase+AUID);
365
366 /* Should be 0x1? */
367 if (0x10 == (version & 0xf0)) {
368 efio = efbase[i];
369
370 /* Set FIFO size to 32 */
371 switch_bank(iobase, SET2);
372 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
373
374 /* Set FIFO threshold to TX17, RX16 */
375 switch_bank(iobase, SET0);
376 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
377 UFR_EN_FIFO,iobase+UFR);
378
379 /* Receiver frame length */
380 switch_bank(iobase, SET4);
381 outb(2048 & 0xff, iobase+6);
382 outb((2048 >> 8) & 0x1f, iobase+7);
383
384 /*
385 * Init HP HSDL-1100 transceiver.
386 *
387 * Set IRX_MSL since we have 2 * receive paths IRRX,
388 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
389 * be a input pin used for IRRXH
390 *
391 * IRRX pin 37 connected to receiver
392 * IRTX pin 38 connected to transmitter
393 * FIRRX pin 39 connected to receiver (IRSL0)
394 * CIRRX pin 40 connected to pin 37
395 */
396 switch_bank(iobase, SET7);
397 outb(0x40, iobase+7);
398
399 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
400 "Version: 0x%02x\n", version);
401
402 return 0;
403 } else {
404 /* Try next extented function register address */
405 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
406 }
407 }
408 return -1;
409 }
410
411 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
412 {
413 int ir_mode = HCR_SIR;
414 int iobase;
415 __u8 set;
416
417 iobase = self->io.fir_base;
418
419 /* Update accounting for new speed */
420 self->io.speed = speed;
421
422 /* Save current bank */
423 set = inb(iobase+SSR);
424
425 /* Disable interrupts */
426 switch_bank(iobase, SET0);
427 outb(0, iobase+ICR);
428
429 /* Select Set 2 */
430 switch_bank(iobase, SET2);
431 outb(0x00, iobase+ABHL);
432
433 switch (speed) {
434 case 9600: outb(0x0c, iobase+ABLL); break;
435 case 19200: outb(0x06, iobase+ABLL); break;
436 case 38400: outb(0x03, iobase+ABLL); break;
437 case 57600: outb(0x02, iobase+ABLL); break;
438 case 115200: outb(0x01, iobase+ABLL); break;
439 case 576000:
440 ir_mode = HCR_MIR_576;
441 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
442 break;
443 case 1152000:
444 ir_mode = HCR_MIR_1152;
445 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
446 break;
447 case 4000000:
448 ir_mode = HCR_FIR;
449 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
450 break;
451 default:
452 ir_mode = HCR_FIR;
453 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
454 break;
455 }
456
457 /* Set speed mode */
458 switch_bank(iobase, SET0);
459 outb(ir_mode, iobase+HCR);
460
461 /* set FIFO size to 32 */
462 switch_bank(iobase, SET2);
463 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
464
465 /* set FIFO threshold to TX17, RX16 */
466 switch_bank(iobase, SET0);
467 outb(0x00, iobase+UFR); /* Reset */
468 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
469 outb(0xa7, iobase+UFR);
470
471 netif_wake_queue(self->netdev);
472
473 /* Enable some interrupts so we can receive frames */
474 switch_bank(iobase, SET0);
475 if (speed > PIO_MAX_SPEED) {
476 outb(ICR_EFSFI, iobase+ICR);
477 w83977af_dma_receive(self);
478 } else
479 outb(ICR_ERBRI, iobase+ICR);
480
481 /* Restore SSR */
482 outb(set, iobase+SSR);
483 }
484
485 /*
486 * Function w83977af_hard_xmit (skb, dev)
487 *
488 * Sets up a DMA transfer to send the current frame.
489 *
490 */
491 static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
492 struct net_device *dev)
493 {
494 struct w83977af_ir *self;
495 __s32 speed;
496 int iobase;
497 __u8 set;
498 int mtt;
499
500 self = netdev_priv(dev);
501
502 iobase = self->io.fir_base;
503
504 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
505 (int) skb->len);
506
507 /* Lock transmit buffer */
508 netif_stop_queue(dev);
509
510 /* Check if we need to change the speed */
511 speed = irda_get_next_speed(skb);
512 if ((speed != self->io.speed) && (speed != -1)) {
513 /* Check for empty frame */
514 if (!skb->len) {
515 w83977af_change_speed(self, speed);
516 dev_kfree_skb(skb);
517 return NETDEV_TX_OK;
518 } else
519 self->new_speed = speed;
520 }
521
522 /* Save current set */
523 set = inb(iobase+SSR);
524
525 /* Decide if we should use PIO or DMA transfer */
526 if (self->io.speed > PIO_MAX_SPEED) {
527 self->tx_buff.data = self->tx_buff.head;
528 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
529 self->tx_buff.len = skb->len;
530
531 mtt = irda_get_mtt(skb);
532 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
533 if (mtt)
534 udelay(mtt);
535
536 /* Enable DMA interrupt */
537 switch_bank(iobase, SET0);
538 outb(ICR_EDMAI, iobase+ICR);
539 w83977af_dma_write(self, iobase);
540 } else {
541 self->tx_buff.data = self->tx_buff.head;
542 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
543 self->tx_buff.truesize);
544
545 /* Add interrupt on tx low level (will fire immediately) */
546 switch_bank(iobase, SET0);
547 outb(ICR_ETXTHI, iobase+ICR);
548 }
549 dev_kfree_skb(skb);
550
551 /* Restore set register */
552 outb(set, iobase+SSR);
553
554 return NETDEV_TX_OK;
555 }
556
557 /*
558 * Function w83977af_dma_write (self, iobase)
559 *
560 * Send frame using DMA
561 *
562 */
563 static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
564 {
565 __u8 set;
566 #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
567 unsigned long flags;
568 __u8 hcr;
569 #endif
570 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
571
572 /* Save current set */
573 set = inb(iobase+SSR);
574
575 /* Disable DMA */
576 switch_bank(iobase, SET0);
577 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
578
579 /* Choose transmit DMA channel */
580 switch_bank(iobase, SET2);
581 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
582 #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
583 spin_lock_irqsave(&self->lock, flags);
584
585 disable_dma(self->io.dma);
586 clear_dma_ff(self->io.dma);
587 set_dma_mode(self->io.dma, DMA_MODE_READ);
588 set_dma_addr(self->io.dma, self->tx_buff_dma);
589 set_dma_count(self->io.dma, self->tx_buff.len);
590 #else
591 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
592 DMA_MODE_WRITE);
593 #endif
594 self->io.direction = IO_XMIT;
595
596 /* Enable DMA */
597 switch_bank(iobase, SET0);
598 #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
599 hcr = inb(iobase+HCR);
600 outb(hcr | HCR_EN_DMA, iobase+HCR);
601 enable_dma(self->io.dma);
602 spin_unlock_irqrestore(&self->lock, flags);
603 #else
604 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
605 #endif
606
607 /* Restore set register */
608 outb(set, iobase+SSR);
609 }
610
611 /*
612 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
613 *
614 *
615 *
616 */
617 static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
618 {
619 int actual = 0;
620 __u8 set;
621
622 IRDA_DEBUG(4, "%s()\n", __func__ );
623
624 /* Save current bank */
625 set = inb(iobase+SSR);
626
627 switch_bank(iobase, SET0);
628 if (!(inb_p(iobase+USR) & USR_TSRE)) {
629 IRDA_DEBUG(4,
630 "%s(), warning, FIFO not empty yet!\n", __func__ );
631
632 fifo_size -= 17;
633 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
634 __func__ , fifo_size);
635 }
636
637 /* Fill FIFO with current frame */
638 while ((fifo_size-- > 0) && (actual < len)) {
639 /* Transmit next byte */
640 outb(buf[actual++], iobase+TBR);
641 }
642
643 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
644 __func__ , fifo_size, actual, len);
645
646 /* Restore bank */
647 outb(set, iobase+SSR);
648
649 return actual;
650 }
651
652 /*
653 * Function w83977af_dma_xmit_complete (self)
654 *
655 * The transfer of a frame in finished. So do the necessary things
656 *
657 *
658 */
659 static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
660 {
661 int iobase;
662 __u8 set;
663
664 IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
665
666 IRDA_ASSERT(self != NULL, return;);
667
668 iobase = self->io.fir_base;
669
670 /* Save current set */
671 set = inb(iobase+SSR);
672
673 /* Disable DMA */
674 switch_bank(iobase, SET0);
675 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
676
677 /* Check for underrun! */
678 if (inb(iobase+AUDR) & AUDR_UNDR) {
679 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
680
681 self->netdev->stats.tx_errors++;
682 self->netdev->stats.tx_fifo_errors++;
683
684 /* Clear bit, by writing 1 to it */
685 outb(AUDR_UNDR, iobase+AUDR);
686 } else
687 self->netdev->stats.tx_packets++;
688
689
690 if (self->new_speed) {
691 w83977af_change_speed(self, self->new_speed);
692 self->new_speed = 0;
693 }
694
695 /* Unlock tx_buff and request another frame */
696 /* Tell the network layer, that we want more frames */
697 netif_wake_queue(self->netdev);
698
699 /* Restore set */
700 outb(set, iobase+SSR);
701 }
702
703 /*
704 * Function w83977af_dma_receive (self)
705 *
706 * Get ready for receiving a frame. The device will initiate a DMA
707 * if it starts to receive a frame.
708 *
709 */
710 static int w83977af_dma_receive(struct w83977af_ir *self)
711 {
712 int iobase;
713 __u8 set;
714 #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
715 unsigned long flags;
716 __u8 hcr;
717 #endif
718 IRDA_ASSERT(self != NULL, return -1;);
719
720 IRDA_DEBUG(4, "%s\n", __func__ );
721
722 iobase= self->io.fir_base;
723
724 /* Save current set */
725 set = inb(iobase+SSR);
726
727 /* Disable DMA */
728 switch_bank(iobase, SET0);
729 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
730
731 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
732 switch_bank(iobase, SET2);
733 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
734 iobase+ADCR1);
735
736 self->io.direction = IO_RECV;
737 self->rx_buff.data = self->rx_buff.head;
738
739 #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
740 spin_lock_irqsave(&self->lock, flags);
741
742 disable_dma(self->io.dma);
743 clear_dma_ff(self->io.dma);
744 set_dma_mode(self->io.dma, DMA_MODE_READ);
745 set_dma_addr(self->io.dma, self->rx_buff_dma);
746 set_dma_count(self->io.dma, self->rx_buff.truesize);
747 #else
748 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
749 DMA_MODE_READ);
750 #endif
751 /*
752 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
753 * important that we don't reset the Tx FIFO since it might not
754 * be finished transmitting yet
755 */
756 switch_bank(iobase, SET0);
757 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
758 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
759
760 /* Enable DMA */
761 switch_bank(iobase, SET0);
762 #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
763 hcr = inb(iobase+HCR);
764 outb(hcr | HCR_EN_DMA, iobase+HCR);
765 enable_dma(self->io.dma);
766 spin_unlock_irqrestore(&self->lock, flags);
767 #else
768 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
769 #endif
770 /* Restore set */
771 outb(set, iobase+SSR);
772
773 return 0;
774 }
775
776 /*
777 * Function w83977af_receive_complete (self)
778 *
779 * Finished with receiving a frame
780 *
781 */
782 static int w83977af_dma_receive_complete(struct w83977af_ir *self)
783 {
784 struct sk_buff *skb;
785 struct st_fifo *st_fifo;
786 int len;
787 int iobase;
788 __u8 set;
789 __u8 status;
790
791 IRDA_DEBUG(4, "%s\n", __func__ );
792
793 st_fifo = &self->st_fifo;
794
795 iobase = self->io.fir_base;
796
797 /* Save current set */
798 set = inb(iobase+SSR);
799
800 iobase = self->io.fir_base;
801
802 /* Read status FIFO */
803 switch_bank(iobase, SET5);
804 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
805 st_fifo->entries[st_fifo->tail].status = status;
806
807 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
808 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
809
810 st_fifo->tail++;
811 st_fifo->len++;
812 }
813
814 while (st_fifo->len) {
815 /* Get first entry */
816 status = st_fifo->entries[st_fifo->head].status;
817 len = st_fifo->entries[st_fifo->head].len;
818 st_fifo->head++;
819 st_fifo->len--;
820
821 /* Check for errors */
822 if (status & FS_FO_ERR_MSK) {
823 if (status & FS_FO_LST_FR) {
824 /* Add number of lost frames to stats */
825 self->netdev->stats.rx_errors += len;
826 } else {
827 /* Skip frame */
828 self->netdev->stats.rx_errors++;
829
830 self->rx_buff.data += len;
831
832 if (status & FS_FO_MX_LEX)
833 self->netdev->stats.rx_length_errors++;
834
835 if (status & FS_FO_PHY_ERR)
836 self->netdev->stats.rx_frame_errors++;
837
838 if (status & FS_FO_CRC_ERR)
839 self->netdev->stats.rx_crc_errors++;
840 }
841 /* The errors below can be reported in both cases */
842 if (status & FS_FO_RX_OV)
843 self->netdev->stats.rx_fifo_errors++;
844
845 if (status & FS_FO_FSF_OV)
846 self->netdev->stats.rx_fifo_errors++;
847
848 } else {
849 /* Check if we have transferred all data to memory */
850 switch_bank(iobase, SET0);
851 if (inb(iobase+USR) & USR_RDR) {
852 udelay(80); /* Should be enough!? */
853 }
854
855 skb = dev_alloc_skb(len+1);
856 if (skb == NULL) {
857 printk(KERN_INFO
858 "%s(), memory squeeze, dropping frame.\n", __func__);
859 /* Restore set register */
860 outb(set, iobase+SSR);
861
862 return FALSE;
863 }
864
865 /* Align to 20 bytes */
866 skb_reserve(skb, 1);
867
868 /* Copy frame without CRC */
869 if (self->io.speed < 4000000) {
870 skb_put(skb, len-2);
871 skb_copy_to_linear_data(skb,
872 self->rx_buff.data,
873 len - 2);
874 } else {
875 skb_put(skb, len-4);
876 skb_copy_to_linear_data(skb,
877 self->rx_buff.data,
878 len - 4);
879 }
880
881 /* Move to next frame */
882 self->rx_buff.data += len;
883 self->netdev->stats.rx_packets++;
884
885 skb->dev = self->netdev;
886 skb_reset_mac_header(skb);
887 skb->protocol = htons(ETH_P_IRDA);
888 netif_rx(skb);
889 }
890 }
891 /* Restore set register */
892 outb(set, iobase+SSR);
893
894 return TRUE;
895 }
896
897 /*
898 * Function pc87108_pio_receive (self)
899 *
900 * Receive all data in receiver FIFO
901 *
902 */
903 static void w83977af_pio_receive(struct w83977af_ir *self)
904 {
905 __u8 byte = 0x00;
906 int iobase;
907
908 IRDA_DEBUG(4, "%s()\n", __func__ );
909
910 IRDA_ASSERT(self != NULL, return;);
911
912 iobase = self->io.fir_base;
913
914 /* Receive all characters in Rx FIFO */
915 do {
916 byte = inb(iobase+RBR);
917 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
918 byte);
919 } while (inb(iobase+USR) & USR_RDR); /* Data available */
920 }
921
922 /*
923 * Function w83977af_sir_interrupt (self, eir)
924 *
925 * Handle SIR interrupt
926 *
927 */
928 static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
929 {
930 int actual;
931 __u8 new_icr = 0;
932 __u8 set;
933 int iobase;
934
935 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
936
937 iobase = self->io.fir_base;
938 /* Transmit FIFO low on data */
939 if (isr & ISR_TXTH_I) {
940 /* Write data left in transmit buffer */
941 actual = w83977af_pio_write(self->io.fir_base,
942 self->tx_buff.data,
943 self->tx_buff.len,
944 self->io.fifo_size);
945
946 self->tx_buff.data += actual;
947 self->tx_buff.len -= actual;
948
949 self->io.direction = IO_XMIT;
950
951 /* Check if finished */
952 if (self->tx_buff.len > 0) {
953 new_icr |= ICR_ETXTHI;
954 } else {
955 set = inb(iobase+SSR);
956 switch_bank(iobase, SET0);
957 outb(AUDR_SFEND, iobase+AUDR);
958 outb(set, iobase+SSR);
959
960 self->netdev->stats.tx_packets++;
961
962 /* Feed me more packets */
963 netif_wake_queue(self->netdev);
964 new_icr |= ICR_ETBREI;
965 }
966 }
967 /* Check if transmission has completed */
968 if (isr & ISR_TXEMP_I) {
969 /* Check if we need to change the speed? */
970 if (self->new_speed) {
971 IRDA_DEBUG(2,
972 "%s(), Changing speed!\n", __func__ );
973 w83977af_change_speed(self, self->new_speed);
974 self->new_speed = 0;
975 }
976
977 /* Turn around and get ready to receive some data */
978 self->io.direction = IO_RECV;
979 new_icr |= ICR_ERBRI;
980 }
981
982 /* Rx FIFO threshold or timeout */
983 if (isr & ISR_RXTH_I) {
984 w83977af_pio_receive(self);
985
986 /* Keep receiving */
987 new_icr |= ICR_ERBRI;
988 }
989 return new_icr;
990 }
991
992 /*
993 * Function pc87108_fir_interrupt (self, eir)
994 *
995 * Handle MIR/FIR interrupt
996 *
997 */
998 static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
999 {
1000 __u8 new_icr = 0;
1001 __u8 set;
1002 int iobase;
1003
1004 iobase = self->io.fir_base;
1005 set = inb(iobase+SSR);
1006
1007 /* End of frame detected in FIFO */
1008 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1009 if (w83977af_dma_receive_complete(self)) {
1010
1011 /* Wait for next status FIFO interrupt */
1012 new_icr |= ICR_EFSFI;
1013 } else {
1014 /* DMA not finished yet */
1015
1016 /* Set timer value, resolution 1 ms */
1017 switch_bank(iobase, SET4);
1018 outb(0x01, iobase+TMRL); /* 1 ms */
1019 outb(0x00, iobase+TMRH);
1020
1021 /* Start timer */
1022 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1023
1024 new_icr |= ICR_ETMRI;
1025 }
1026 }
1027 /* Timer finished */
1028 if (isr & ISR_TMR_I) {
1029 /* Disable timer */
1030 switch_bank(iobase, SET4);
1031 outb(0, iobase+IR_MSL);
1032
1033 /* Clear timer event */
1034 /* switch_bank(iobase, SET0); */
1035 /* outb(ASCR_CTE, iobase+ASCR); */
1036
1037 /* Check if this is a TX timer interrupt */
1038 if (self->io.direction == IO_XMIT) {
1039 w83977af_dma_write(self, iobase);
1040
1041 new_icr |= ICR_EDMAI;
1042 } else {
1043 /* Check if DMA has now finished */
1044 w83977af_dma_receive_complete(self);
1045
1046 new_icr |= ICR_EFSFI;
1047 }
1048 }
1049 /* Finished with DMA */
1050 if (isr & ISR_DMA_I) {
1051 w83977af_dma_xmit_complete(self);
1052
1053 /* Check if there are more frames to be transmitted */
1054 /* if (irda_device_txqueue_empty(self)) { */
1055
1056 /* Prepare for receive
1057 *
1058 * ** Netwinder Tx DMA likes that we do this anyway **
1059 */
1060 w83977af_dma_receive(self);
1061 new_icr = ICR_EFSFI;
1062 /* } */
1063 }
1064
1065 /* Restore set */
1066 outb(set, iobase+SSR);
1067
1068 return new_icr;
1069 }
1070
1071 /*
1072 * Function w83977af_interrupt (irq, dev_id, regs)
1073 *
1074 * An interrupt from the chip has arrived. Time to do some work
1075 *
1076 */
1077 static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1078 {
1079 struct net_device *dev = dev_id;
1080 struct w83977af_ir *self;
1081 __u8 set, icr, isr;
1082 int iobase;
1083
1084 self = netdev_priv(dev);
1085
1086 iobase = self->io.fir_base;
1087
1088 /* Save current bank */
1089 set = inb(iobase+SSR);
1090 switch_bank(iobase, SET0);
1091
1092 icr = inb(iobase+ICR);
1093 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1094
1095 outb(0, iobase+ICR); /* Disable interrupts */
1096
1097 if (isr) {
1098 /* Dispatch interrupt handler for the current speed */
1099 if (self->io.speed > PIO_MAX_SPEED )
1100 icr = w83977af_fir_interrupt(self, isr);
1101 else
1102 icr = w83977af_sir_interrupt(self, isr);
1103 }
1104
1105 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1106 outb(set, iobase+SSR); /* Restore bank register */
1107 return IRQ_RETVAL(isr);
1108 }
1109
1110 /*
1111 * Function w83977af_is_receiving (self)
1112 *
1113 * Return TRUE is we are currently receiving a frame
1114 *
1115 */
1116 static int w83977af_is_receiving(struct w83977af_ir *self)
1117 {
1118 int status = FALSE;
1119 int iobase;
1120 __u8 set;
1121
1122 IRDA_ASSERT(self != NULL, return FALSE;);
1123
1124 if (self->io.speed > 115200) {
1125 iobase = self->io.fir_base;
1126
1127 /* Check if rx FIFO is not empty */
1128 set = inb(iobase+SSR);
1129 switch_bank(iobase, SET2);
1130 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1131 /* We are receiving something */
1132 status = TRUE;
1133 }
1134 outb(set, iobase+SSR);
1135 } else
1136 status = (self->rx_buff.state != OUTSIDE_FRAME);
1137
1138 return status;
1139 }
1140
1141 /*
1142 * Function w83977af_net_open (dev)
1143 *
1144 * Start the device
1145 *
1146 */
1147 static int w83977af_net_open(struct net_device *dev)
1148 {
1149 struct w83977af_ir *self;
1150 int iobase;
1151 char hwname[32];
1152 __u8 set;
1153
1154 IRDA_DEBUG(0, "%s()\n", __func__ );
1155
1156 IRDA_ASSERT(dev != NULL, return -1;);
1157 self = netdev_priv(dev);
1158
1159 IRDA_ASSERT(self != NULL, return 0;);
1160
1161 iobase = self->io.fir_base;
1162
1163 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1164 (void *) dev)) {
1165 return -EAGAIN;
1166 }
1167 /*
1168 * Always allocate the DMA channel after the IRQ,
1169 * and clean up on failure.
1170 */
1171 if (request_dma(self->io.dma, dev->name)) {
1172 free_irq(self->io.irq, dev);
1173 return -EAGAIN;
1174 }
1175
1176 /* Save current set */
1177 set = inb(iobase+SSR);
1178
1179 /* Enable some interrupts so we can receive frames again */
1180 switch_bank(iobase, SET0);
1181 if (self->io.speed > 115200) {
1182 outb(ICR_EFSFI, iobase+ICR);
1183 w83977af_dma_receive(self);
1184 } else
1185 outb(ICR_ERBRI, iobase+ICR);
1186
1187 /* Restore bank register */
1188 outb(set, iobase+SSR);
1189
1190 /* Ready to play! */
1191 netif_start_queue(dev);
1192
1193 /* Give self a hardware name */
1194 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1195
1196 /*
1197 * Open new IrLAP layer instance, now that everything should be
1198 * initialized properly
1199 */
1200 self->irlap = irlap_open(dev, &self->qos, hwname);
1201
1202 return 0;
1203 }
1204
1205 /*
1206 * Function w83977af_net_close (dev)
1207 *
1208 * Stop the device
1209 *
1210 */
1211 static int w83977af_net_close(struct net_device *dev)
1212 {
1213 struct w83977af_ir *self;
1214 int iobase;
1215 __u8 set;
1216
1217 IRDA_DEBUG(0, "%s()\n", __func__ );
1218
1219 IRDA_ASSERT(dev != NULL, return -1;);
1220
1221 self = netdev_priv(dev);
1222
1223 IRDA_ASSERT(self != NULL, return 0;);
1224
1225 iobase = self->io.fir_base;
1226
1227 /* Stop device */
1228 netif_stop_queue(dev);
1229
1230 /* Stop and remove instance of IrLAP */
1231 if (self->irlap)
1232 irlap_close(self->irlap);
1233 self->irlap = NULL;
1234
1235 disable_dma(self->io.dma);
1236
1237 /* Save current set */
1238 set = inb(iobase+SSR);
1239
1240 /* Disable interrupts */
1241 switch_bank(iobase, SET0);
1242 outb(0, iobase+ICR);
1243
1244 free_irq(self->io.irq, dev);
1245 free_dma(self->io.dma);
1246
1247 /* Restore bank register */
1248 outb(set, iobase+SSR);
1249
1250 return 0;
1251 }
1252
1253 /*
1254 * Function w83977af_net_ioctl (dev, rq, cmd)
1255 *
1256 * Process IOCTL commands for this device
1257 *
1258 */
1259 static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1260 {
1261 struct if_irda_req *irq = (struct if_irda_req *) rq;
1262 struct w83977af_ir *self;
1263 unsigned long flags;
1264 int ret = 0;
1265
1266 IRDA_ASSERT(dev != NULL, return -1;);
1267
1268 self = netdev_priv(dev);
1269
1270 IRDA_ASSERT(self != NULL, return -1;);
1271
1272 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1273
1274 spin_lock_irqsave(&self->lock, flags);
1275
1276 switch (cmd) {
1277 case SIOCSBANDWIDTH: /* Set bandwidth */
1278 if (!capable(CAP_NET_ADMIN)) {
1279 ret = -EPERM;
1280 goto out;
1281 }
1282 w83977af_change_speed(self, irq->ifr_baudrate);
1283 break;
1284 case SIOCSMEDIABUSY: /* Set media busy */
1285 if (!capable(CAP_NET_ADMIN)) {
1286 ret = -EPERM;
1287 goto out;
1288 }
1289 irda_device_set_media_busy(self->netdev, TRUE);
1290 break;
1291 case SIOCGRECEIVING: /* Check if we are receiving right now */
1292 irq->ifr_receiving = w83977af_is_receiving(self);
1293 break;
1294 default:
1295 ret = -EOPNOTSUPP;
1296 }
1297 out:
1298 spin_unlock_irqrestore(&self->lock, flags);
1299 return ret;
1300 }
1301
1302 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1303 MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1304 MODULE_LICENSE("GPL");
1305
1306
1307 module_param(qos_mtt_bits, int, 0);
1308 MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1309 module_param_array(io, int, NULL, 0);
1310 MODULE_PARM_DESC(io, "Base I/O addresses");
1311 module_param_array(irq, int, NULL, 0);
1312 MODULE_PARM_DESC(irq, "IRQ lines");
1313
1314 /*
1315 * Function init_module (void)
1316 *
1317 *
1318 *
1319 */
1320 module_init(w83977af_init);
1321
1322 /*
1323 * Function cleanup_module (void)
1324 *
1325 *
1326 *
1327 */
1328 module_exit(w83977af_cleanup);
This page took 0.072204 seconds and 5 git commands to generate.