Merge branch 'pm-sleep'
[deliverable/linux.git] / drivers / net / irda / via-ircc.c
1 /********************************************************************
2 Filename: via-ircc.c
3 Version: 1.0
4 Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author: VIA Technologies,inc
6 Date : 08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, see <http://www.gnu.org/licenses/>.
21
22 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
23 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
24 Comment :
25 jul/09/2002 : only implement two kind of dongle currently.
26 Oct/02/2002 : work on VT8231 and VT8233 .
27 Aug/06/2003 : change driver format to pci driver .
28
29 2004-02-16: <sda@bdit.de>
30 - Removed unneeded 'legacy' pci stuff.
31 - Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
32 - On speed change from core, don't send SIR frame with new speed.
33 Use current speed and change speeds later.
34 - Make module-param dongle_id actually work.
35 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
36 Tested with home-grown PCB on EPIA boards.
37 - Code cleanup.
38
39 ********************************************************************/
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/types.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/ioport.h>
46 #include <linux/delay.h>
47 #include <linux/init.h>
48 #include <linux/interrupt.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/pci.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/gfp.h>
53
54 #include <asm/io.h>
55 #include <asm/dma.h>
56 #include <asm/byteorder.h>
57
58 #include <linux/pm.h>
59
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda.h>
62 #include <net/irda/irda_device.h>
63
64 #include "via-ircc.h"
65
66 #define VIA_MODULE_NAME "via-ircc"
67 #define CHIP_IO_EXTENT 0x40
68
69 static char *driver_name = VIA_MODULE_NAME;
70
71 /* Module parameters */
72 static int qos_mtt_bits = 0x07; /* 1 ms or more */
73 static int dongle_id = 0; /* default: probe */
74
75 /* We can't guess the type of connected dongle, user *must* supply it. */
76 module_param(dongle_id, int, 0);
77
78 /* Some prototypes */
79 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
80 unsigned int id);
81 static int via_ircc_dma_receive(struct via_ircc_cb *self);
82 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
83 int iobase);
84 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
85 struct net_device *dev);
86 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
87 struct net_device *dev);
88 static void via_hw_init(struct via_ircc_cb *self);
89 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
90 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
91 static int via_ircc_is_receiving(struct via_ircc_cb *self);
92 static int via_ircc_read_dongle_id(int iobase);
93
94 static int via_ircc_net_open(struct net_device *dev);
95 static int via_ircc_net_close(struct net_device *dev);
96 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
97 int cmd);
98 static void via_ircc_change_dongle_speed(int iobase, int speed,
99 int dongle_id);
100 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
101 static void hwreset(struct via_ircc_cb *self);
102 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
103 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
104 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
105 static void via_remove_one(struct pci_dev *pdev);
106
107 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
108 static void iodelay(int udelay)
109 {
110 u8 data;
111 int i;
112
113 for (i = 0; i < udelay; i++) {
114 data = inb(0x80);
115 }
116 }
117
118 static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
119 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
120 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
121 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
122 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
123 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
124 { 0, }
125 };
126
127 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
128
129
130 static struct pci_driver via_driver = {
131 .name = VIA_MODULE_NAME,
132 .id_table = via_pci_tbl,
133 .probe = via_init_one,
134 .remove = via_remove_one,
135 };
136
137
138 /*
139 * Function via_ircc_init ()
140 *
141 * Initialize chip. Just find out chip type and resource.
142 */
143 static int __init via_ircc_init(void)
144 {
145 int rc;
146
147 IRDA_DEBUG(3, "%s()\n", __func__);
148
149 rc = pci_register_driver(&via_driver);
150 if (rc < 0) {
151 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
152 __func__, rc);
153 return -ENODEV;
154 }
155 return 0;
156 }
157
158 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
159 {
160 int rc;
161 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
162 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
163 chipio_t info;
164
165 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
166
167 rc = pci_enable_device (pcidev);
168 if (rc) {
169 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
170 return -ENODEV;
171 }
172
173 // South Bridge exist
174 if ( ReadLPCReg(0x20) != 0x3C )
175 Chipset=0x3096;
176 else
177 Chipset=0x3076;
178
179 if (Chipset==0x3076) {
180 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
181
182 WriteLPCReg(7,0x0c );
183 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
184 if((temp&0x01)==1) { // BIOS close or no FIR
185 WriteLPCReg(0x1d, 0x82 );
186 WriteLPCReg(0x23,0x18);
187 temp=ReadLPCReg(0xF0);
188 if((temp&0x01)==0) {
189 temp=(ReadLPCReg(0x74)&0x03); //DMA
190 FirDRQ0=temp + 4;
191 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
192 FirDRQ1=temp + 4;
193 } else {
194 temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
195 FirDRQ0=temp + 4;
196 FirDRQ1=FirDRQ0;
197 }
198 FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
199 FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
200 FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
201 FirIOBase=FirIOBase ;
202 info.fir_base=FirIOBase;
203 info.irq=FirIRQ;
204 info.dma=FirDRQ1;
205 info.dma2=FirDRQ0;
206 pci_read_config_byte(pcidev,0x40,&bTmp);
207 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
208 pci_read_config_byte(pcidev,0x42,&bTmp);
209 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
210 pci_write_config_byte(pcidev,0x5a,0xc0);
211 WriteLPCReg(0x28, 0x70 );
212 rc = via_ircc_open(pcidev, &info, 0x3076);
213 } else
214 rc = -ENODEV; //IR not turn on
215 } else { //Not VT1211
216 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
217
218 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
219 if((bTmp&0x01)==1) { // BIOS enable FIR
220 //Enable Double DMA clock
221 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
222 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
223 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
224 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
225 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
226 pci_write_config_byte(pcidev,0x44,0x4e);
227 //---------- read configuration from Function0 of south bridge
228 if((bTmp&0x02)==0) {
229 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
230 FirDRQ0 = (bTmp1 & 0x30) >> 4;
231 pci_read_config_byte(pcidev,0x44,&bTmp1);
232 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
233 } else {
234 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
235 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
236 FirDRQ1=0;
237 }
238 pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
239 FirIRQ = bTmp1 & 0x0f;
240
241 pci_read_config_byte(pcidev,0x69,&bTmp);
242 FirIOBase = bTmp << 8;//hight byte
243 pci_read_config_byte(pcidev,0x68,&bTmp);
244 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
245 //-------------------------
246 info.fir_base=FirIOBase;
247 info.irq=FirIRQ;
248 info.dma=FirDRQ1;
249 info.dma2=FirDRQ0;
250 rc = via_ircc_open(pcidev, &info, 0x3096);
251 } else
252 rc = -ENODEV; //IR not turn on !!!!!
253 }//Not VT1211
254
255 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
256 return rc;
257 }
258
259 static void __exit via_ircc_cleanup(void)
260 {
261 IRDA_DEBUG(3, "%s()\n", __func__);
262
263 /* Cleanup all instances of the driver */
264 pci_unregister_driver (&via_driver);
265 }
266
267 static const struct net_device_ops via_ircc_sir_ops = {
268 .ndo_start_xmit = via_ircc_hard_xmit_sir,
269 .ndo_open = via_ircc_net_open,
270 .ndo_stop = via_ircc_net_close,
271 .ndo_do_ioctl = via_ircc_net_ioctl,
272 };
273 static const struct net_device_ops via_ircc_fir_ops = {
274 .ndo_start_xmit = via_ircc_hard_xmit_fir,
275 .ndo_open = via_ircc_net_open,
276 .ndo_stop = via_ircc_net_close,
277 .ndo_do_ioctl = via_ircc_net_ioctl,
278 };
279
280 /*
281 * Function via_ircc_open(pdev, iobase, irq)
282 *
283 * Open driver instance
284 *
285 */
286 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
287 {
288 struct net_device *dev;
289 struct via_ircc_cb *self;
290 int err;
291
292 IRDA_DEBUG(3, "%s()\n", __func__);
293
294 /* Allocate new instance of the driver */
295 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
296 if (dev == NULL)
297 return -ENOMEM;
298
299 self = netdev_priv(dev);
300 self->netdev = dev;
301 spin_lock_init(&self->lock);
302
303 pci_set_drvdata(pdev, self);
304
305 /* Initialize Resource */
306 self->io.cfg_base = info->cfg_base;
307 self->io.fir_base = info->fir_base;
308 self->io.irq = info->irq;
309 self->io.fir_ext = CHIP_IO_EXTENT;
310 self->io.dma = info->dma;
311 self->io.dma2 = info->dma2;
312 self->io.fifo_size = 32;
313 self->chip_id = id;
314 self->st_fifo.len = 0;
315 self->RxDataReady = 0;
316
317 /* Reserve the ioports that we need */
318 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
319 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
320 __func__, self->io.fir_base);
321 err = -ENODEV;
322 goto err_out1;
323 }
324
325 /* Initialize QoS for this device */
326 irda_init_max_qos_capabilies(&self->qos);
327
328 /* Check if user has supplied the dongle id or not */
329 if (!dongle_id)
330 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
331 self->io.dongle_id = dongle_id;
332
333 /* The only value we must override it the baudrate */
334 /* Maximum speeds and capabilities are dongle-dependent. */
335 switch( self->io.dongle_id ){
336 case 0x0d:
337 self->qos.baud_rate.bits =
338 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
339 IR_576000 | IR_1152000 | (IR_4000000 << 8);
340 break;
341 default:
342 self->qos.baud_rate.bits =
343 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
344 break;
345 }
346
347 /* Following was used for testing:
348 *
349 * self->qos.baud_rate.bits = IR_9600;
350 *
351 * Is is no good, as it prohibits (error-prone) speed-changes.
352 */
353
354 self->qos.min_turn_time.bits = qos_mtt_bits;
355 irda_qos_bits_to_value(&self->qos);
356
357 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
358 self->rx_buff.truesize = 14384 + 2048;
359 self->tx_buff.truesize = 14384 + 2048;
360
361 /* Allocate memory if needed */
362 self->rx_buff.head =
363 dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
364 &self->rx_buff_dma, GFP_KERNEL);
365 if (self->rx_buff.head == NULL) {
366 err = -ENOMEM;
367 goto err_out2;
368 }
369
370 self->tx_buff.head =
371 dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
372 &self->tx_buff_dma, GFP_KERNEL);
373 if (self->tx_buff.head == NULL) {
374 err = -ENOMEM;
375 goto err_out3;
376 }
377
378 self->rx_buff.in_frame = FALSE;
379 self->rx_buff.state = OUTSIDE_FRAME;
380 self->tx_buff.data = self->tx_buff.head;
381 self->rx_buff.data = self->rx_buff.head;
382
383 /* Reset Tx queue info */
384 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
385 self->tx_fifo.tail = self->tx_buff.head;
386
387 /* Override the network functions we need to use */
388 dev->netdev_ops = &via_ircc_sir_ops;
389
390 err = register_netdev(dev);
391 if (err)
392 goto err_out4;
393
394 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
395
396 /* Initialise the hardware..
397 */
398 self->io.speed = 9600;
399 via_hw_init(self);
400 return 0;
401 err_out4:
402 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
403 self->tx_buff.head, self->tx_buff_dma);
404 err_out3:
405 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
406 self->rx_buff.head, self->rx_buff_dma);
407 err_out2:
408 release_region(self->io.fir_base, self->io.fir_ext);
409 err_out1:
410 free_netdev(dev);
411 return err;
412 }
413
414 /*
415 * Function via_remove_one(pdev)
416 *
417 * Close driver instance
418 *
419 */
420 static void via_remove_one(struct pci_dev *pdev)
421 {
422 struct via_ircc_cb *self = pci_get_drvdata(pdev);
423 int iobase;
424
425 IRDA_DEBUG(3, "%s()\n", __func__);
426
427 iobase = self->io.fir_base;
428
429 ResetChip(iobase, 5); //hardware reset.
430 /* Remove netdevice */
431 unregister_netdev(self->netdev);
432
433 /* Release the PORT that this driver is using */
434 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
435 __func__, self->io.fir_base);
436 release_region(self->io.fir_base, self->io.fir_ext);
437 if (self->tx_buff.head)
438 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
439 self->tx_buff.head, self->tx_buff_dma);
440 if (self->rx_buff.head)
441 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
442 self->rx_buff.head, self->rx_buff_dma);
443
444 free_netdev(self->netdev);
445
446 pci_disable_device(pdev);
447 }
448
449 /*
450 * Function via_hw_init(self)
451 *
452 * Returns non-negative on success.
453 *
454 * Formerly via_ircc_setup
455 */
456 static void via_hw_init(struct via_ircc_cb *self)
457 {
458 int iobase = self->io.fir_base;
459
460 IRDA_DEBUG(3, "%s()\n", __func__);
461
462 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
463 // FIFO Init
464 EnRXFIFOReadyInt(iobase, OFF);
465 EnRXFIFOHalfLevelInt(iobase, OFF);
466 EnTXFIFOHalfLevelInt(iobase, OFF);
467 EnTXFIFOUnderrunEOMInt(iobase, ON);
468 EnTXFIFOReadyInt(iobase, OFF);
469 InvertTX(iobase, OFF);
470 InvertRX(iobase, OFF);
471
472 if (ReadLPCReg(0x20) == 0x3c)
473 WriteLPCReg(0xF0, 0); // for VT1211
474 /* Int Init */
475 EnRXSpecInt(iobase, ON);
476
477 /* The following is basically hwreset */
478 /* If this is the case, why not just call hwreset() ? Jean II */
479 ResetChip(iobase, 5);
480 EnableDMA(iobase, OFF);
481 EnableTX(iobase, OFF);
482 EnableRX(iobase, OFF);
483 EnRXDMA(iobase, OFF);
484 EnTXDMA(iobase, OFF);
485 RXStart(iobase, OFF);
486 TXStart(iobase, OFF);
487 InitCard(iobase);
488 CommonInit(iobase);
489 SIRFilter(iobase, ON);
490 SetSIR(iobase, ON);
491 CRC16(iobase, ON);
492 EnTXCRC(iobase, 0);
493 WriteReg(iobase, I_ST_CT_0, 0x00);
494 SetBaudRate(iobase, 9600);
495 SetPulseWidth(iobase, 12);
496 SetSendPreambleCount(iobase, 0);
497
498 self->io.speed = 9600;
499 self->st_fifo.len = 0;
500
501 via_ircc_change_dongle_speed(iobase, self->io.speed,
502 self->io.dongle_id);
503
504 WriteReg(iobase, I_ST_CT_0, 0x80);
505 }
506
507 /*
508 * Function via_ircc_read_dongle_id (void)
509 *
510 */
511 static int via_ircc_read_dongle_id(int iobase)
512 {
513 int dongle_id = 9; /* Default to IBM */
514
515 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
516 return dongle_id;
517 }
518
519 /*
520 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
521 * Change speed of the attach dongle
522 * only implement two type of dongle currently.
523 */
524 static void via_ircc_change_dongle_speed(int iobase, int speed,
525 int dongle_id)
526 {
527 u8 mode = 0;
528
529 /* speed is unused, as we use IsSIROn()/IsMIROn() */
530 speed = speed;
531
532 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
533 __func__, speed, iobase, dongle_id);
534
535 switch (dongle_id) {
536
537 /* Note: The dongle_id's listed here are derived from
538 * nsc-ircc.c */
539
540 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
541 UseOneRX(iobase, ON); // use one RX pin RX1,RX2
542 InvertTX(iobase, OFF);
543 InvertRX(iobase, OFF);
544
545 EnRX2(iobase, ON); //sir to rx2
546 EnGPIOtoRX2(iobase, OFF);
547
548 if (IsSIROn(iobase)) { //sir
549 // Mode select Off
550 SlowIRRXLowActive(iobase, ON);
551 udelay(1000);
552 SlowIRRXLowActive(iobase, OFF);
553 } else {
554 if (IsMIROn(iobase)) { //mir
555 // Mode select On
556 SlowIRRXLowActive(iobase, OFF);
557 udelay(20);
558 } else { // fir
559 if (IsFIROn(iobase)) { //fir
560 // Mode select On
561 SlowIRRXLowActive(iobase, OFF);
562 udelay(20);
563 }
564 }
565 }
566 break;
567
568 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
569 UseOneRX(iobase, ON); //use ONE RX....RX1
570 InvertTX(iobase, OFF);
571 InvertRX(iobase, OFF); // invert RX pin
572
573 EnRX2(iobase, ON);
574 EnGPIOtoRX2(iobase, OFF);
575 if (IsSIROn(iobase)) { //sir
576 // Mode select On
577 SlowIRRXLowActive(iobase, ON);
578 udelay(20);
579 // Mode select Off
580 SlowIRRXLowActive(iobase, OFF);
581 }
582 if (IsMIROn(iobase)) { //mir
583 // Mode select On
584 SlowIRRXLowActive(iobase, OFF);
585 udelay(20);
586 // Mode select Off
587 SlowIRRXLowActive(iobase, ON);
588 } else { // fir
589 if (IsFIROn(iobase)) { //fir
590 // Mode select On
591 SlowIRRXLowActive(iobase, OFF);
592 // TX On
593 WriteTX(iobase, ON);
594 udelay(20);
595 // Mode select OFF
596 SlowIRRXLowActive(iobase, ON);
597 udelay(20);
598 // TX Off
599 WriteTX(iobase, OFF);
600 }
601 }
602 break;
603
604 case 0x0d:
605 UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
606 InvertTX(iobase, OFF);
607 InvertRX(iobase, OFF);
608 SlowIRRXLowActive(iobase, OFF);
609 if (IsSIROn(iobase)) { //sir
610 EnGPIOtoRX2(iobase, OFF);
611 WriteGIO(iobase, OFF);
612 EnRX2(iobase, OFF); //sir to rx2
613 } else { // fir mir
614 EnGPIOtoRX2(iobase, OFF);
615 WriteGIO(iobase, OFF);
616 EnRX2(iobase, OFF); //fir to rx
617 }
618 break;
619
620 case 0x11: /* Temic TFDS4500 */
621
622 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
623
624 UseOneRX(iobase, ON); //use ONE RX....RX1
625 InvertTX(iobase, OFF);
626 InvertRX(iobase, ON); // invert RX pin
627
628 EnRX2(iobase, ON); //sir to rx2
629 EnGPIOtoRX2(iobase, OFF);
630
631 if( IsSIROn(iobase) ){ //sir
632
633 // Mode select On
634 SlowIRRXLowActive(iobase, ON);
635 udelay(20);
636 // Mode select Off
637 SlowIRRXLowActive(iobase, OFF);
638
639 } else{
640 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
641 }
642 break;
643
644 case 0x0ff: /* Vishay */
645 if (IsSIROn(iobase))
646 mode = 0;
647 else if (IsMIROn(iobase))
648 mode = 1;
649 else if (IsFIROn(iobase))
650 mode = 2;
651 else if (IsVFIROn(iobase))
652 mode = 5; //VFIR-16
653 SI_SetMode(iobase, mode);
654 break;
655
656 default:
657 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
658 __func__, dongle_id);
659 }
660 }
661
662 /*
663 * Function via_ircc_change_speed (self, baud)
664 *
665 * Change the speed of the device
666 *
667 */
668 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
669 {
670 struct net_device *dev = self->netdev;
671 u16 iobase;
672 u8 value = 0, bTmp;
673
674 iobase = self->io.fir_base;
675 /* Update accounting for new speed */
676 self->io.speed = speed;
677 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
678
679 WriteReg(iobase, I_ST_CT_0, 0x0);
680
681 /* Controller mode sellection */
682 switch (speed) {
683 case 2400:
684 case 9600:
685 case 19200:
686 case 38400:
687 case 57600:
688 case 115200:
689 value = (115200/speed)-1;
690 SetSIR(iobase, ON);
691 CRC16(iobase, ON);
692 break;
693 case 576000:
694 /* FIXME: this can't be right, as it's the same as 115200,
695 * and 576000 is MIR, not SIR. */
696 value = 0;
697 SetSIR(iobase, ON);
698 CRC16(iobase, ON);
699 break;
700 case 1152000:
701 value = 0;
702 SetMIR(iobase, ON);
703 /* FIXME: CRC ??? */
704 break;
705 case 4000000:
706 value = 0;
707 SetFIR(iobase, ON);
708 SetPulseWidth(iobase, 0);
709 SetSendPreambleCount(iobase, 14);
710 CRC16(iobase, OFF);
711 EnTXCRC(iobase, ON);
712 break;
713 case 16000000:
714 value = 0;
715 SetVFIR(iobase, ON);
716 /* FIXME: CRC ??? */
717 break;
718 default:
719 value = 0;
720 break;
721 }
722
723 /* Set baudrate to 0x19[2..7] */
724 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
725 bTmp |= value << 2;
726 WriteReg(iobase, I_CF_H_1, bTmp);
727
728 /* Some dongles may need to be informed about speed changes. */
729 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
730
731 /* Set FIFO size to 64 */
732 SetFIFO(iobase, 64);
733
734 /* Enable IR */
735 WriteReg(iobase, I_ST_CT_0, 0x80);
736
737 // EnTXFIFOHalfLevelInt(iobase,ON);
738
739 /* Enable some interrupts so we can receive frames */
740 //EnAllInt(iobase,ON);
741
742 if (IsSIROn(iobase)) {
743 SIRFilter(iobase, ON);
744 SIRRecvAny(iobase, ON);
745 } else {
746 SIRFilter(iobase, OFF);
747 SIRRecvAny(iobase, OFF);
748 }
749
750 if (speed > 115200) {
751 /* Install FIR xmit handler */
752 dev->netdev_ops = &via_ircc_fir_ops;
753 via_ircc_dma_receive(self);
754 } else {
755 /* Install SIR xmit handler */
756 dev->netdev_ops = &via_ircc_sir_ops;
757 }
758 netif_wake_queue(dev);
759 }
760
761 /*
762 * Function via_ircc_hard_xmit (skb, dev)
763 *
764 * Transmit the frame!
765 *
766 */
767 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
768 struct net_device *dev)
769 {
770 struct via_ircc_cb *self;
771 unsigned long flags;
772 u16 iobase;
773 __u32 speed;
774
775 self = netdev_priv(dev);
776 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
777 iobase = self->io.fir_base;
778
779 netif_stop_queue(dev);
780 /* Check if we need to change the speed */
781 speed = irda_get_next_speed(skb);
782 if ((speed != self->io.speed) && (speed != -1)) {
783 /* Check for empty frame */
784 if (!skb->len) {
785 via_ircc_change_speed(self, speed);
786 dev->trans_start = jiffies;
787 dev_kfree_skb(skb);
788 return NETDEV_TX_OK;
789 } else
790 self->new_speed = speed;
791 }
792 InitCard(iobase);
793 CommonInit(iobase);
794 SIRFilter(iobase, ON);
795 SetSIR(iobase, ON);
796 CRC16(iobase, ON);
797 EnTXCRC(iobase, 0);
798 WriteReg(iobase, I_ST_CT_0, 0x00);
799
800 spin_lock_irqsave(&self->lock, flags);
801 self->tx_buff.data = self->tx_buff.head;
802 self->tx_buff.len =
803 async_wrap_skb(skb, self->tx_buff.data,
804 self->tx_buff.truesize);
805
806 dev->stats.tx_bytes += self->tx_buff.len;
807 /* Send this frame with old speed */
808 SetBaudRate(iobase, self->io.speed);
809 SetPulseWidth(iobase, 12);
810 SetSendPreambleCount(iobase, 0);
811 WriteReg(iobase, I_ST_CT_0, 0x80);
812
813 EnableTX(iobase, ON);
814 EnableRX(iobase, OFF);
815
816 ResetChip(iobase, 0);
817 ResetChip(iobase, 1);
818 ResetChip(iobase, 2);
819 ResetChip(iobase, 3);
820 ResetChip(iobase, 4);
821
822 EnAllInt(iobase, ON);
823 EnTXDMA(iobase, ON);
824 EnRXDMA(iobase, OFF);
825
826 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
827 DMA_TX_MODE);
828
829 SetSendByte(iobase, self->tx_buff.len);
830 RXStart(iobase, OFF);
831 TXStart(iobase, ON);
832
833 dev->trans_start = jiffies;
834 spin_unlock_irqrestore(&self->lock, flags);
835 dev_kfree_skb(skb);
836 return NETDEV_TX_OK;
837 }
838
839 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
840 struct net_device *dev)
841 {
842 struct via_ircc_cb *self;
843 u16 iobase;
844 __u32 speed;
845 unsigned long flags;
846
847 self = netdev_priv(dev);
848 iobase = self->io.fir_base;
849
850 if (self->st_fifo.len)
851 return NETDEV_TX_OK;
852 if (self->chip_id == 0x3076)
853 iodelay(1500);
854 else
855 udelay(1500);
856 netif_stop_queue(dev);
857 speed = irda_get_next_speed(skb);
858 if ((speed != self->io.speed) && (speed != -1)) {
859 if (!skb->len) {
860 via_ircc_change_speed(self, speed);
861 dev->trans_start = jiffies;
862 dev_kfree_skb(skb);
863 return NETDEV_TX_OK;
864 } else
865 self->new_speed = speed;
866 }
867 spin_lock_irqsave(&self->lock, flags);
868 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
869 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
870
871 self->tx_fifo.tail += skb->len;
872 dev->stats.tx_bytes += skb->len;
873 skb_copy_from_linear_data(skb,
874 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
875 self->tx_fifo.len++;
876 self->tx_fifo.free++;
877 //F01 if (self->tx_fifo.len == 1) {
878 via_ircc_dma_xmit(self, iobase);
879 //F01 }
880 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
881 dev->trans_start = jiffies;
882 dev_kfree_skb(skb);
883 spin_unlock_irqrestore(&self->lock, flags);
884 return NETDEV_TX_OK;
885
886 }
887
888 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
889 {
890 EnTXDMA(iobase, OFF);
891 self->io.direction = IO_XMIT;
892 EnPhys(iobase, ON);
893 EnableTX(iobase, ON);
894 EnableRX(iobase, OFF);
895 ResetChip(iobase, 0);
896 ResetChip(iobase, 1);
897 ResetChip(iobase, 2);
898 ResetChip(iobase, 3);
899 ResetChip(iobase, 4);
900 EnAllInt(iobase, ON);
901 EnTXDMA(iobase, ON);
902 EnRXDMA(iobase, OFF);
903 irda_setup_dma(self->io.dma,
904 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
905 self->tx_buff.head) + self->tx_buff_dma,
906 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
907 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
908 __func__, self->tx_fifo.ptr,
909 self->tx_fifo.queue[self->tx_fifo.ptr].len,
910 self->tx_fifo.len);
911
912 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
913 RXStart(iobase, OFF);
914 TXStart(iobase, ON);
915 return 0;
916
917 }
918
919 /*
920 * Function via_ircc_dma_xmit_complete (self)
921 *
922 * The transfer of a frame in finished. This function will only be called
923 * by the interrupt handler
924 *
925 */
926 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
927 {
928 int iobase;
929 int ret = TRUE;
930 u8 Tx_status;
931
932 IRDA_DEBUG(3, "%s()\n", __func__);
933
934 iobase = self->io.fir_base;
935 /* Disable DMA */
936 // DisableDmaChannel(self->io.dma);
937 /* Check for underrun! */
938 /* Clear bit, by writing 1 into it */
939 Tx_status = GetTXStatus(iobase);
940 if (Tx_status & 0x08) {
941 self->netdev->stats.tx_errors++;
942 self->netdev->stats.tx_fifo_errors++;
943 hwreset(self);
944 /* how to clear underrun? */
945 } else {
946 self->netdev->stats.tx_packets++;
947 ResetChip(iobase, 3);
948 ResetChip(iobase, 4);
949 }
950 /* Check if we need to change the speed */
951 if (self->new_speed) {
952 via_ircc_change_speed(self, self->new_speed);
953 self->new_speed = 0;
954 }
955
956 /* Finished with this frame, so prepare for next */
957 if (IsFIROn(iobase)) {
958 if (self->tx_fifo.len) {
959 self->tx_fifo.len--;
960 self->tx_fifo.ptr++;
961 }
962 }
963 IRDA_DEBUG(1,
964 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
965 __func__,
966 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
967 /* F01_S
968 // Any frames to be sent back-to-back?
969 if (self->tx_fifo.len) {
970 // Not finished yet!
971 via_ircc_dma_xmit(self, iobase);
972 ret = FALSE;
973 } else {
974 F01_E*/
975 // Reset Tx FIFO info
976 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
977 self->tx_fifo.tail = self->tx_buff.head;
978 //F01 }
979
980 // Make sure we have room for more frames
981 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
982 // Not busy transmitting anymore
983 // Tell the network layer, that we can accept more frames
984 netif_wake_queue(self->netdev);
985 //F01 }
986 return ret;
987 }
988
989 /*
990 * Function via_ircc_dma_receive (self)
991 *
992 * Set configuration for receive a frame.
993 *
994 */
995 static int via_ircc_dma_receive(struct via_ircc_cb *self)
996 {
997 int iobase;
998
999 iobase = self->io.fir_base;
1000
1001 IRDA_DEBUG(3, "%s()\n", __func__);
1002
1003 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1004 self->tx_fifo.tail = self->tx_buff.head;
1005 self->RxDataReady = 0;
1006 self->io.direction = IO_RECV;
1007 self->rx_buff.data = self->rx_buff.head;
1008 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1009 self->st_fifo.tail = self->st_fifo.head = 0;
1010
1011 EnPhys(iobase, ON);
1012 EnableTX(iobase, OFF);
1013 EnableRX(iobase, ON);
1014
1015 ResetChip(iobase, 0);
1016 ResetChip(iobase, 1);
1017 ResetChip(iobase, 2);
1018 ResetChip(iobase, 3);
1019 ResetChip(iobase, 4);
1020
1021 EnAllInt(iobase, ON);
1022 EnTXDMA(iobase, OFF);
1023 EnRXDMA(iobase, ON);
1024 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1025 self->rx_buff.truesize, DMA_RX_MODE);
1026 TXStart(iobase, OFF);
1027 RXStart(iobase, ON);
1028
1029 return 0;
1030 }
1031
1032 /*
1033 * Function via_ircc_dma_receive_complete (self)
1034 *
1035 * Controller Finished with receiving frames,
1036 * and this routine is call by ISR
1037 *
1038 */
1039 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1040 int iobase)
1041 {
1042 struct st_fifo *st_fifo;
1043 struct sk_buff *skb;
1044 int len, i;
1045 u8 status = 0;
1046
1047 iobase = self->io.fir_base;
1048 st_fifo = &self->st_fifo;
1049
1050 if (self->io.speed < 4000000) { //Speed below FIR
1051 len = GetRecvByte(iobase, self);
1052 skb = dev_alloc_skb(len + 1);
1053 if (skb == NULL)
1054 return FALSE;
1055 // Make sure IP header gets aligned
1056 skb_reserve(skb, 1);
1057 skb_put(skb, len - 2);
1058 if (self->chip_id == 0x3076) {
1059 for (i = 0; i < len - 2; i++)
1060 skb->data[i] = self->rx_buff.data[i * 2];
1061 } else {
1062 if (self->chip_id == 0x3096) {
1063 for (i = 0; i < len - 2; i++)
1064 skb->data[i] =
1065 self->rx_buff.data[i];
1066 }
1067 }
1068 // Move to next frame
1069 self->rx_buff.data += len;
1070 self->netdev->stats.rx_bytes += len;
1071 self->netdev->stats.rx_packets++;
1072 skb->dev = self->netdev;
1073 skb_reset_mac_header(skb);
1074 skb->protocol = htons(ETH_P_IRDA);
1075 netif_rx(skb);
1076 return TRUE;
1077 }
1078
1079 else { //FIR mode
1080 len = GetRecvByte(iobase, self);
1081 if (len == 0)
1082 return TRUE; //interrupt only, data maybe move by RxT
1083 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1084 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1085 __func__, len, RxCurCount(iobase, self),
1086 self->RxLastCount);
1087 hwreset(self);
1088 return FALSE;
1089 }
1090 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1091 __func__,
1092 st_fifo->len, len - 4, RxCurCount(iobase, self));
1093
1094 st_fifo->entries[st_fifo->tail].status = status;
1095 st_fifo->entries[st_fifo->tail].len = len;
1096 st_fifo->pending_bytes += len;
1097 st_fifo->tail++;
1098 st_fifo->len++;
1099 if (st_fifo->tail > MAX_RX_WINDOW)
1100 st_fifo->tail = 0;
1101 self->RxDataReady = 0;
1102
1103 // It maybe have MAX_RX_WINDOW package receive by
1104 // receive_complete before Timer IRQ
1105 /* F01_S
1106 if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1107 RXStart(iobase,ON);
1108 SetTimer(iobase,4);
1109 }
1110 else {
1111 F01_E */
1112 EnableRX(iobase, OFF);
1113 EnRXDMA(iobase, OFF);
1114 RXStart(iobase, OFF);
1115 //F01_S
1116 // Put this entry back in fifo
1117 if (st_fifo->head > MAX_RX_WINDOW)
1118 st_fifo->head = 0;
1119 status = st_fifo->entries[st_fifo->head].status;
1120 len = st_fifo->entries[st_fifo->head].len;
1121 st_fifo->head++;
1122 st_fifo->len--;
1123
1124 skb = dev_alloc_skb(len + 1 - 4);
1125 /*
1126 * if frame size, data ptr, or skb ptr are wrong, then get next
1127 * entry.
1128 */
1129 if ((skb == NULL) || (skb->data == NULL) ||
1130 (self->rx_buff.data == NULL) || (len < 6)) {
1131 self->netdev->stats.rx_dropped++;
1132 kfree_skb(skb);
1133 return TRUE;
1134 }
1135 skb_reserve(skb, 1);
1136 skb_put(skb, len - 4);
1137
1138 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1139 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1140 len - 4, self->rx_buff.data);
1141
1142 // Move to next frame
1143 self->rx_buff.data += len;
1144 self->netdev->stats.rx_bytes += len;
1145 self->netdev->stats.rx_packets++;
1146 skb->dev = self->netdev;
1147 skb_reset_mac_header(skb);
1148 skb->protocol = htons(ETH_P_IRDA);
1149 netif_rx(skb);
1150
1151 //F01_E
1152 } //FIR
1153 return TRUE;
1154
1155 }
1156
1157 /*
1158 * if frame is received , but no INT ,then use this routine to upload frame.
1159 */
1160 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1161 {
1162 struct sk_buff *skb;
1163 int len;
1164 struct st_fifo *st_fifo;
1165 st_fifo = &self->st_fifo;
1166
1167 len = GetRecvByte(iobase, self);
1168
1169 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1170
1171 if ((len - 4) < 2) {
1172 self->netdev->stats.rx_dropped++;
1173 return FALSE;
1174 }
1175
1176 skb = dev_alloc_skb(len + 1);
1177 if (skb == NULL) {
1178 self->netdev->stats.rx_dropped++;
1179 return FALSE;
1180 }
1181 skb_reserve(skb, 1);
1182 skb_put(skb, len - 4 + 1);
1183 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1184 st_fifo->tail++;
1185 st_fifo->len++;
1186 if (st_fifo->tail > MAX_RX_WINDOW)
1187 st_fifo->tail = 0;
1188 // Move to next frame
1189 self->rx_buff.data += len;
1190 self->netdev->stats.rx_bytes += len;
1191 self->netdev->stats.rx_packets++;
1192 skb->dev = self->netdev;
1193 skb_reset_mac_header(skb);
1194 skb->protocol = htons(ETH_P_IRDA);
1195 netif_rx(skb);
1196 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1197 RXStart(iobase, ON);
1198 } else {
1199 EnableRX(iobase, OFF);
1200 EnRXDMA(iobase, OFF);
1201 RXStart(iobase, OFF);
1202 }
1203 return TRUE;
1204 }
1205
1206 /*
1207 * Implement back to back receive , use this routine to upload data.
1208 */
1209
1210 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1211 {
1212 struct st_fifo *st_fifo;
1213 struct sk_buff *skb;
1214 int len;
1215 u8 status;
1216
1217 st_fifo = &self->st_fifo;
1218
1219 if (CkRxRecv(iobase, self)) {
1220 // if still receiving ,then return ,don't upload frame
1221 self->RetryCount = 0;
1222 SetTimer(iobase, 20);
1223 self->RxDataReady++;
1224 return FALSE;
1225 } else
1226 self->RetryCount++;
1227
1228 if ((self->RetryCount >= 1) ||
1229 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1230 (st_fifo->len >= (MAX_RX_WINDOW))) {
1231 while (st_fifo->len > 0) { //upload frame
1232 // Put this entry back in fifo
1233 if (st_fifo->head > MAX_RX_WINDOW)
1234 st_fifo->head = 0;
1235 status = st_fifo->entries[st_fifo->head].status;
1236 len = st_fifo->entries[st_fifo->head].len;
1237 st_fifo->head++;
1238 st_fifo->len--;
1239
1240 skb = dev_alloc_skb(len + 1 - 4);
1241 /*
1242 * if frame size, data ptr, or skb ptr are wrong,
1243 * then get next entry.
1244 */
1245 if ((skb == NULL) || (skb->data == NULL) ||
1246 (self->rx_buff.data == NULL) || (len < 6)) {
1247 self->netdev->stats.rx_dropped++;
1248 continue;
1249 }
1250 skb_reserve(skb, 1);
1251 skb_put(skb, len - 4);
1252 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1253
1254 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1255 len - 4, st_fifo->head);
1256
1257 // Move to next frame
1258 self->rx_buff.data += len;
1259 self->netdev->stats.rx_bytes += len;
1260 self->netdev->stats.rx_packets++;
1261 skb->dev = self->netdev;
1262 skb_reset_mac_header(skb);
1263 skb->protocol = htons(ETH_P_IRDA);
1264 netif_rx(skb);
1265 } //while
1266 self->RetryCount = 0;
1267
1268 IRDA_DEBUG(2,
1269 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1270 __func__,
1271 GetHostStatus(iobase), GetRXStatus(iobase));
1272
1273 /*
1274 * if frame is receive complete at this routine ,then upload
1275 * frame.
1276 */
1277 if ((GetRXStatus(iobase) & 0x10) &&
1278 (RxCurCount(iobase, self) != self->RxLastCount)) {
1279 upload_rxdata(self, iobase);
1280 if (irda_device_txqueue_empty(self->netdev))
1281 via_ircc_dma_receive(self);
1282 }
1283 } // timer detect complete
1284 else
1285 SetTimer(iobase, 4);
1286 return TRUE;
1287
1288 }
1289
1290
1291
1292 /*
1293 * Function via_ircc_interrupt (irq, dev_id)
1294 *
1295 * An interrupt from the chip has arrived. Time to do some work
1296 *
1297 */
1298 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1299 {
1300 struct net_device *dev = dev_id;
1301 struct via_ircc_cb *self = netdev_priv(dev);
1302 int iobase;
1303 u8 iHostIntType, iRxIntType, iTxIntType;
1304
1305 iobase = self->io.fir_base;
1306 spin_lock(&self->lock);
1307 iHostIntType = GetHostStatus(iobase);
1308
1309 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1310 __func__, iHostIntType,
1311 (iHostIntType & 0x40) ? "Timer" : "",
1312 (iHostIntType & 0x20) ? "Tx" : "",
1313 (iHostIntType & 0x10) ? "Rx" : "",
1314 (iHostIntType & 0x0e) >> 1);
1315
1316 if ((iHostIntType & 0x40) != 0) { //Timer Event
1317 self->EventFlag.TimeOut++;
1318 ClearTimerInt(iobase, 1);
1319 if (self->io.direction == IO_XMIT) {
1320 via_ircc_dma_xmit(self, iobase);
1321 }
1322 if (self->io.direction == IO_RECV) {
1323 /*
1324 * frame ready hold too long, must reset.
1325 */
1326 if (self->RxDataReady > 30) {
1327 hwreset(self);
1328 if (irda_device_txqueue_empty(self->netdev)) {
1329 via_ircc_dma_receive(self);
1330 }
1331 } else { // call this to upload frame.
1332 RxTimerHandler(self, iobase);
1333 }
1334 } //RECV
1335 } //Timer Event
1336 if ((iHostIntType & 0x20) != 0) { //Tx Event
1337 iTxIntType = GetTXStatus(iobase);
1338
1339 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1340 __func__, iTxIntType,
1341 (iTxIntType & 0x08) ? "FIFO underr." : "",
1342 (iTxIntType & 0x04) ? "EOM" : "",
1343 (iTxIntType & 0x02) ? "FIFO ready" : "",
1344 (iTxIntType & 0x01) ? "Early EOM" : "");
1345
1346 if (iTxIntType & 0x4) {
1347 self->EventFlag.EOMessage++; // read and will auto clean
1348 if (via_ircc_dma_xmit_complete(self)) {
1349 if (irda_device_txqueue_empty
1350 (self->netdev)) {
1351 via_ircc_dma_receive(self);
1352 }
1353 } else {
1354 self->EventFlag.Unknown++;
1355 }
1356 } //EOP
1357 } //Tx Event
1358 //----------------------------------------
1359 if ((iHostIntType & 0x10) != 0) { //Rx Event
1360 /* Check if DMA has finished */
1361 iRxIntType = GetRXStatus(iobase);
1362
1363 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1364 __func__, iRxIntType,
1365 (iRxIntType & 0x80) ? "PHY err." : "",
1366 (iRxIntType & 0x40) ? "CRC err" : "",
1367 (iRxIntType & 0x20) ? "FIFO overr." : "",
1368 (iRxIntType & 0x10) ? "EOF" : "",
1369 (iRxIntType & 0x08) ? "RxData" : "",
1370 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1371 (iRxIntType & 0x01) ? "SIR bad" : "");
1372 if (!iRxIntType)
1373 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1374
1375 if (iRxIntType & 0x10) {
1376 if (via_ircc_dma_receive_complete(self, iobase)) {
1377 //F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1378 via_ircc_dma_receive(self);
1379 }
1380 } // No ERR
1381 else { //ERR
1382 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1383 __func__, iRxIntType, iHostIntType,
1384 RxCurCount(iobase, self),
1385 self->RxLastCount);
1386
1387 if (iRxIntType & 0x20) { //FIFO OverRun ERR
1388 ResetChip(iobase, 0);
1389 ResetChip(iobase, 1);
1390 } else { //PHY,CRC ERR
1391
1392 if (iRxIntType != 0x08)
1393 hwreset(self); //F01
1394 }
1395 via_ircc_dma_receive(self);
1396 } //ERR
1397
1398 } //Rx Event
1399 spin_unlock(&self->lock);
1400 return IRQ_RETVAL(iHostIntType);
1401 }
1402
1403 static void hwreset(struct via_ircc_cb *self)
1404 {
1405 int iobase;
1406 iobase = self->io.fir_base;
1407
1408 IRDA_DEBUG(3, "%s()\n", __func__);
1409
1410 ResetChip(iobase, 5);
1411 EnableDMA(iobase, OFF);
1412 EnableTX(iobase, OFF);
1413 EnableRX(iobase, OFF);
1414 EnRXDMA(iobase, OFF);
1415 EnTXDMA(iobase, OFF);
1416 RXStart(iobase, OFF);
1417 TXStart(iobase, OFF);
1418 InitCard(iobase);
1419 CommonInit(iobase);
1420 SIRFilter(iobase, ON);
1421 SetSIR(iobase, ON);
1422 CRC16(iobase, ON);
1423 EnTXCRC(iobase, 0);
1424 WriteReg(iobase, I_ST_CT_0, 0x00);
1425 SetBaudRate(iobase, 9600);
1426 SetPulseWidth(iobase, 12);
1427 SetSendPreambleCount(iobase, 0);
1428 WriteReg(iobase, I_ST_CT_0, 0x80);
1429
1430 /* Restore speed. */
1431 via_ircc_change_speed(self, self->io.speed);
1432
1433 self->st_fifo.len = 0;
1434 }
1435
1436 /*
1437 * Function via_ircc_is_receiving (self)
1438 *
1439 * Return TRUE is we are currently receiving a frame
1440 *
1441 */
1442 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1443 {
1444 int status = FALSE;
1445 int iobase;
1446
1447 IRDA_ASSERT(self != NULL, return FALSE;);
1448
1449 iobase = self->io.fir_base;
1450 if (CkRxRecv(iobase, self))
1451 status = TRUE;
1452
1453 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1454
1455 return status;
1456 }
1457
1458
1459 /*
1460 * Function via_ircc_net_open (dev)
1461 *
1462 * Start the device
1463 *
1464 */
1465 static int via_ircc_net_open(struct net_device *dev)
1466 {
1467 struct via_ircc_cb *self;
1468 int iobase;
1469 char hwname[32];
1470
1471 IRDA_DEBUG(3, "%s()\n", __func__);
1472
1473 IRDA_ASSERT(dev != NULL, return -1;);
1474 self = netdev_priv(dev);
1475 dev->stats.rx_packets = 0;
1476 IRDA_ASSERT(self != NULL, return 0;);
1477 iobase = self->io.fir_base;
1478 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1479 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1480 self->io.irq);
1481 return -EAGAIN;
1482 }
1483 /*
1484 * Always allocate the DMA channel after the IRQ, and clean up on
1485 * failure.
1486 */
1487 if (request_dma(self->io.dma, dev->name)) {
1488 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1489 self->io.dma);
1490 free_irq(self->io.irq, dev);
1491 return -EAGAIN;
1492 }
1493 if (self->io.dma2 != self->io.dma) {
1494 if (request_dma(self->io.dma2, dev->name)) {
1495 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1496 driver_name, self->io.dma2);
1497 free_irq(self->io.irq, dev);
1498 free_dma(self->io.dma);
1499 return -EAGAIN;
1500 }
1501 }
1502
1503
1504 /* turn on interrupts */
1505 EnAllInt(iobase, ON);
1506 EnInternalLoop(iobase, OFF);
1507 EnExternalLoop(iobase, OFF);
1508
1509 /* */
1510 via_ircc_dma_receive(self);
1511
1512 /* Ready to play! */
1513 netif_start_queue(dev);
1514
1515 /*
1516 * Open new IrLAP layer instance, now that everything should be
1517 * initialized properly
1518 */
1519 sprintf(hwname, "VIA @ 0x%x", iobase);
1520 self->irlap = irlap_open(dev, &self->qos, hwname);
1521
1522 self->RxLastCount = 0;
1523
1524 return 0;
1525 }
1526
1527 /*
1528 * Function via_ircc_net_close (dev)
1529 *
1530 * Stop the device
1531 *
1532 */
1533 static int via_ircc_net_close(struct net_device *dev)
1534 {
1535 struct via_ircc_cb *self;
1536 int iobase;
1537
1538 IRDA_DEBUG(3, "%s()\n", __func__);
1539
1540 IRDA_ASSERT(dev != NULL, return -1;);
1541 self = netdev_priv(dev);
1542 IRDA_ASSERT(self != NULL, return 0;);
1543
1544 /* Stop device */
1545 netif_stop_queue(dev);
1546 /* Stop and remove instance of IrLAP */
1547 if (self->irlap)
1548 irlap_close(self->irlap);
1549 self->irlap = NULL;
1550 iobase = self->io.fir_base;
1551 EnTXDMA(iobase, OFF);
1552 EnRXDMA(iobase, OFF);
1553 DisableDmaChannel(self->io.dma);
1554
1555 /* Disable interrupts */
1556 EnAllInt(iobase, OFF);
1557 free_irq(self->io.irq, dev);
1558 free_dma(self->io.dma);
1559 if (self->io.dma2 != self->io.dma)
1560 free_dma(self->io.dma2);
1561
1562 return 0;
1563 }
1564
1565 /*
1566 * Function via_ircc_net_ioctl (dev, rq, cmd)
1567 *
1568 * Process IOCTL commands for this device
1569 *
1570 */
1571 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1572 int cmd)
1573 {
1574 struct if_irda_req *irq = (struct if_irda_req *) rq;
1575 struct via_ircc_cb *self;
1576 unsigned long flags;
1577 int ret = 0;
1578
1579 IRDA_ASSERT(dev != NULL, return -1;);
1580 self = netdev_priv(dev);
1581 IRDA_ASSERT(self != NULL, return -1;);
1582 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1583 cmd);
1584 /* Disable interrupts & save flags */
1585 spin_lock_irqsave(&self->lock, flags);
1586 switch (cmd) {
1587 case SIOCSBANDWIDTH: /* Set bandwidth */
1588 if (!capable(CAP_NET_ADMIN)) {
1589 ret = -EPERM;
1590 goto out;
1591 }
1592 via_ircc_change_speed(self, irq->ifr_baudrate);
1593 break;
1594 case SIOCSMEDIABUSY: /* Set media busy */
1595 if (!capable(CAP_NET_ADMIN)) {
1596 ret = -EPERM;
1597 goto out;
1598 }
1599 irda_device_set_media_busy(self->netdev, TRUE);
1600 break;
1601 case SIOCGRECEIVING: /* Check if we are receiving right now */
1602 irq->ifr_receiving = via_ircc_is_receiving(self);
1603 break;
1604 default:
1605 ret = -EOPNOTSUPP;
1606 }
1607 out:
1608 spin_unlock_irqrestore(&self->lock, flags);
1609 return ret;
1610 }
1611
1612 MODULE_AUTHOR("VIA Technologies,inc");
1613 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1614 MODULE_LICENSE("GPL");
1615
1616 module_init(via_ircc_init);
1617 module_exit(via_ircc_cleanup);
This page took 0.069097 seconds and 5 git commands to generate.