regulator: tps65023: Use [set|get]_voltage_sel_regmap instead of open coded
[deliverable/linux.git] / drivers / net / irda / via-ircc.c
1 /********************************************************************
2 Filename: via-ircc.c
3 Version: 1.0
4 Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author: VIA Technologies,inc
6 Date : 08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25 Comment :
26 jul/09/2002 : only implement two kind of dongle currently.
27 Oct/02/2002 : work on VT8231 and VT8233 .
28 Aug/06/2003 : change driver format to pci driver .
29
30 2004-02-16: <sda@bdit.de>
31 - Removed unneeded 'legacy' pci stuff.
32 - Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
33 - On speed change from core, don't send SIR frame with new speed.
34 Use current speed and change speeds later.
35 - Make module-param dongle_id actually work.
36 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
37 Tested with home-grown PCB on EPIA boards.
38 - Code cleanup.
39
40 ********************************************************************/
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/ioport.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/interrupt.h>
50 #include <linux/rtnetlink.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
54
55 #include <asm/io.h>
56 #include <asm/dma.h>
57 #include <asm/byteorder.h>
58
59 #include <linux/pm.h>
60
61 #include <net/irda/wrapper.h>
62 #include <net/irda/irda.h>
63 #include <net/irda/irda_device.h>
64
65 #include "via-ircc.h"
66
67 #define VIA_MODULE_NAME "via-ircc"
68 #define CHIP_IO_EXTENT 0x40
69
70 static char *driver_name = VIA_MODULE_NAME;
71
72 /* Module parameters */
73 static int qos_mtt_bits = 0x07; /* 1 ms or more */
74 static int dongle_id = 0; /* default: probe */
75
76 /* We can't guess the type of connected dongle, user *must* supply it. */
77 module_param(dongle_id, int, 0);
78
79 /* Some prototypes */
80 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
81 unsigned int id);
82 static int via_ircc_dma_receive(struct via_ircc_cb *self);
83 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
84 int iobase);
85 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
86 struct net_device *dev);
87 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
88 struct net_device *dev);
89 static void via_hw_init(struct via_ircc_cb *self);
90 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
92 static int via_ircc_is_receiving(struct via_ircc_cb *self);
93 static int via_ircc_read_dongle_id(int iobase);
94
95 static int via_ircc_net_open(struct net_device *dev);
96 static int via_ircc_net_close(struct net_device *dev);
97 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
98 int cmd);
99 static void via_ircc_change_dongle_speed(int iobase, int speed,
100 int dongle_id);
101 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
102 static void hwreset(struct via_ircc_cb *self);
103 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
104 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
105 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
106 static void via_remove_one(struct pci_dev *pdev);
107
108 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
109 static void iodelay(int udelay)
110 {
111 u8 data;
112 int i;
113
114 for (i = 0; i < udelay; i++) {
115 data = inb(0x80);
116 }
117 }
118
119 static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
120 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
121 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
122 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
123 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
124 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
125 { 0, }
126 };
127
128 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
129
130
131 static struct pci_driver via_driver = {
132 .name = VIA_MODULE_NAME,
133 .id_table = via_pci_tbl,
134 .probe = via_init_one,
135 .remove = via_remove_one,
136 };
137
138
139 /*
140 * Function via_ircc_init ()
141 *
142 * Initialize chip. Just find out chip type and resource.
143 */
144 static int __init via_ircc_init(void)
145 {
146 int rc;
147
148 IRDA_DEBUG(3, "%s()\n", __func__);
149
150 rc = pci_register_driver(&via_driver);
151 if (rc < 0) {
152 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
153 __func__, rc);
154 return -ENODEV;
155 }
156 return 0;
157 }
158
159 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
160 {
161 int rc;
162 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
163 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
164 chipio_t info;
165
166 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
167
168 rc = pci_enable_device (pcidev);
169 if (rc) {
170 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
171 return -ENODEV;
172 }
173
174 // South Bridge exist
175 if ( ReadLPCReg(0x20) != 0x3C )
176 Chipset=0x3096;
177 else
178 Chipset=0x3076;
179
180 if (Chipset==0x3076) {
181 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
182
183 WriteLPCReg(7,0x0c );
184 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
185 if((temp&0x01)==1) { // BIOS close or no FIR
186 WriteLPCReg(0x1d, 0x82 );
187 WriteLPCReg(0x23,0x18);
188 temp=ReadLPCReg(0xF0);
189 if((temp&0x01)==0) {
190 temp=(ReadLPCReg(0x74)&0x03); //DMA
191 FirDRQ0=temp + 4;
192 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
193 FirDRQ1=temp + 4;
194 } else {
195 temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
196 FirDRQ0=temp + 4;
197 FirDRQ1=FirDRQ0;
198 }
199 FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
200 FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
201 FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
202 FirIOBase=FirIOBase ;
203 info.fir_base=FirIOBase;
204 info.irq=FirIRQ;
205 info.dma=FirDRQ1;
206 info.dma2=FirDRQ0;
207 pci_read_config_byte(pcidev,0x40,&bTmp);
208 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
209 pci_read_config_byte(pcidev,0x42,&bTmp);
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 );
213 if (via_ircc_open(pcidev, &info, 0x3076) == 0)
214 rc=0;
215 } else
216 rc = -ENODEV; //IR not turn on
217 } else { //Not VT1211
218 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
219
220 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
221 if((bTmp&0x01)==1) { // BIOS enable FIR
222 //Enable Double DMA clock
223 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
224 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
225 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
226 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
227 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
228 pci_write_config_byte(pcidev,0x44,0x4e);
229 //---------- read configuration from Function0 of south bridge
230 if((bTmp&0x02)==0) {
231 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
232 FirDRQ0 = (bTmp1 & 0x30) >> 4;
233 pci_read_config_byte(pcidev,0x44,&bTmp1);
234 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
235 } else {
236 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
237 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
238 FirDRQ1=0;
239 }
240 pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
241 FirIRQ = bTmp1 & 0x0f;
242
243 pci_read_config_byte(pcidev,0x69,&bTmp);
244 FirIOBase = bTmp << 8;//hight byte
245 pci_read_config_byte(pcidev,0x68,&bTmp);
246 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
247 //-------------------------
248 info.fir_base=FirIOBase;
249 info.irq=FirIRQ;
250 info.dma=FirDRQ1;
251 info.dma2=FirDRQ0;
252 if (via_ircc_open(pcidev, &info, 0x3096) == 0)
253 rc=0;
254 } else
255 rc = -ENODEV; //IR not turn on !!!!!
256 }//Not VT1211
257
258 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
259 return rc;
260 }
261
262 static void __exit via_ircc_cleanup(void)
263 {
264 IRDA_DEBUG(3, "%s()\n", __func__);
265
266 /* Cleanup all instances of the driver */
267 pci_unregister_driver (&via_driver);
268 }
269
270 static const struct net_device_ops via_ircc_sir_ops = {
271 .ndo_start_xmit = via_ircc_hard_xmit_sir,
272 .ndo_open = via_ircc_net_open,
273 .ndo_stop = via_ircc_net_close,
274 .ndo_do_ioctl = via_ircc_net_ioctl,
275 };
276 static const struct net_device_ops via_ircc_fir_ops = {
277 .ndo_start_xmit = via_ircc_hard_xmit_fir,
278 .ndo_open = via_ircc_net_open,
279 .ndo_stop = via_ircc_net_close,
280 .ndo_do_ioctl = via_ircc_net_ioctl,
281 };
282
283 /*
284 * Function via_ircc_open(pdev, iobase, irq)
285 *
286 * Open driver instance
287 *
288 */
289 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
290 {
291 struct net_device *dev;
292 struct via_ircc_cb *self;
293 int err;
294
295 IRDA_DEBUG(3, "%s()\n", __func__);
296
297 /* Allocate new instance of the driver */
298 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
299 if (dev == NULL)
300 return -ENOMEM;
301
302 self = netdev_priv(dev);
303 self->netdev = dev;
304 spin_lock_init(&self->lock);
305
306 pci_set_drvdata(pdev, self);
307
308 /* Initialize Resource */
309 self->io.cfg_base = info->cfg_base;
310 self->io.fir_base = info->fir_base;
311 self->io.irq = info->irq;
312 self->io.fir_ext = CHIP_IO_EXTENT;
313 self->io.dma = info->dma;
314 self->io.dma2 = info->dma2;
315 self->io.fifo_size = 32;
316 self->chip_id = id;
317 self->st_fifo.len = 0;
318 self->RxDataReady = 0;
319
320 /* Reserve the ioports that we need */
321 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
322 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
323 __func__, self->io.fir_base);
324 err = -ENODEV;
325 goto err_out1;
326 }
327
328 /* Initialize QoS for this device */
329 irda_init_max_qos_capabilies(&self->qos);
330
331 /* Check if user has supplied the dongle id or not */
332 if (!dongle_id)
333 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
334 self->io.dongle_id = dongle_id;
335
336 /* The only value we must override it the baudrate */
337 /* Maximum speeds and capabilities are dongle-dependent. */
338 switch( self->io.dongle_id ){
339 case 0x0d:
340 self->qos.baud_rate.bits =
341 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
342 IR_576000 | IR_1152000 | (IR_4000000 << 8);
343 break;
344 default:
345 self->qos.baud_rate.bits =
346 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
347 break;
348 }
349
350 /* Following was used for testing:
351 *
352 * self->qos.baud_rate.bits = IR_9600;
353 *
354 * Is is no good, as it prohibits (error-prone) speed-changes.
355 */
356
357 self->qos.min_turn_time.bits = qos_mtt_bits;
358 irda_qos_bits_to_value(&self->qos);
359
360 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
361 self->rx_buff.truesize = 14384 + 2048;
362 self->tx_buff.truesize = 14384 + 2048;
363
364 /* Allocate memory if needed */
365 self->rx_buff.head =
366 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
367 &self->rx_buff_dma, GFP_KERNEL);
368 if (self->rx_buff.head == NULL) {
369 err = -ENOMEM;
370 goto err_out2;
371 }
372 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
373
374 self->tx_buff.head =
375 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
376 &self->tx_buff_dma, GFP_KERNEL);
377 if (self->tx_buff.head == NULL) {
378 err = -ENOMEM;
379 goto err_out3;
380 }
381 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
382
383 self->rx_buff.in_frame = FALSE;
384 self->rx_buff.state = OUTSIDE_FRAME;
385 self->tx_buff.data = self->tx_buff.head;
386 self->rx_buff.data = self->rx_buff.head;
387
388 /* Reset Tx queue info */
389 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
390 self->tx_fifo.tail = self->tx_buff.head;
391
392 /* Override the network functions we need to use */
393 dev->netdev_ops = &via_ircc_sir_ops;
394
395 err = register_netdev(dev);
396 if (err)
397 goto err_out4;
398
399 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
400
401 /* Initialise the hardware..
402 */
403 self->io.speed = 9600;
404 via_hw_init(self);
405 return 0;
406 err_out4:
407 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
408 self->tx_buff.head, self->tx_buff_dma);
409 err_out3:
410 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
411 self->rx_buff.head, self->rx_buff_dma);
412 err_out2:
413 release_region(self->io.fir_base, self->io.fir_ext);
414 err_out1:
415 pci_set_drvdata(pdev, NULL);
416 free_netdev(dev);
417 return err;
418 }
419
420 /*
421 * Function via_remove_one(pdev)
422 *
423 * Close driver instance
424 *
425 */
426 static void via_remove_one(struct pci_dev *pdev)
427 {
428 struct via_ircc_cb *self = pci_get_drvdata(pdev);
429 int iobase;
430
431 IRDA_DEBUG(3, "%s()\n", __func__);
432
433 iobase = self->io.fir_base;
434
435 ResetChip(iobase, 5); //hardware reset.
436 /* Remove netdevice */
437 unregister_netdev(self->netdev);
438
439 /* Release the PORT that this driver is using */
440 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
441 __func__, self->io.fir_base);
442 release_region(self->io.fir_base, self->io.fir_ext);
443 if (self->tx_buff.head)
444 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
445 self->tx_buff.head, self->tx_buff_dma);
446 if (self->rx_buff.head)
447 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
448 self->rx_buff.head, self->rx_buff_dma);
449 pci_set_drvdata(pdev, NULL);
450
451 free_netdev(self->netdev);
452
453 pci_disable_device(pdev);
454 }
455
456 /*
457 * Function via_hw_init(self)
458 *
459 * Returns non-negative on success.
460 *
461 * Formerly via_ircc_setup
462 */
463 static void via_hw_init(struct via_ircc_cb *self)
464 {
465 int iobase = self->io.fir_base;
466
467 IRDA_DEBUG(3, "%s()\n", __func__);
468
469 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
470 // FIFO Init
471 EnRXFIFOReadyInt(iobase, OFF);
472 EnRXFIFOHalfLevelInt(iobase, OFF);
473 EnTXFIFOHalfLevelInt(iobase, OFF);
474 EnTXFIFOUnderrunEOMInt(iobase, ON);
475 EnTXFIFOReadyInt(iobase, OFF);
476 InvertTX(iobase, OFF);
477 InvertRX(iobase, OFF);
478
479 if (ReadLPCReg(0x20) == 0x3c)
480 WriteLPCReg(0xF0, 0); // for VT1211
481 /* Int Init */
482 EnRXSpecInt(iobase, ON);
483
484 /* The following is basically hwreset */
485 /* If this is the case, why not just call hwreset() ? Jean II */
486 ResetChip(iobase, 5);
487 EnableDMA(iobase, OFF);
488 EnableTX(iobase, OFF);
489 EnableRX(iobase, OFF);
490 EnRXDMA(iobase, OFF);
491 EnTXDMA(iobase, OFF);
492 RXStart(iobase, OFF);
493 TXStart(iobase, OFF);
494 InitCard(iobase);
495 CommonInit(iobase);
496 SIRFilter(iobase, ON);
497 SetSIR(iobase, ON);
498 CRC16(iobase, ON);
499 EnTXCRC(iobase, 0);
500 WriteReg(iobase, I_ST_CT_0, 0x00);
501 SetBaudRate(iobase, 9600);
502 SetPulseWidth(iobase, 12);
503 SetSendPreambleCount(iobase, 0);
504
505 self->io.speed = 9600;
506 self->st_fifo.len = 0;
507
508 via_ircc_change_dongle_speed(iobase, self->io.speed,
509 self->io.dongle_id);
510
511 WriteReg(iobase, I_ST_CT_0, 0x80);
512 }
513
514 /*
515 * Function via_ircc_read_dongle_id (void)
516 *
517 */
518 static int via_ircc_read_dongle_id(int iobase)
519 {
520 int dongle_id = 9; /* Default to IBM */
521
522 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
523 return dongle_id;
524 }
525
526 /*
527 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
528 * Change speed of the attach dongle
529 * only implement two type of dongle currently.
530 */
531 static void via_ircc_change_dongle_speed(int iobase, int speed,
532 int dongle_id)
533 {
534 u8 mode = 0;
535
536 /* speed is unused, as we use IsSIROn()/IsMIROn() */
537 speed = speed;
538
539 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
540 __func__, speed, iobase, dongle_id);
541
542 switch (dongle_id) {
543
544 /* Note: The dongle_id's listed here are derived from
545 * nsc-ircc.c */
546
547 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
548 UseOneRX(iobase, ON); // use one RX pin RX1,RX2
549 InvertTX(iobase, OFF);
550 InvertRX(iobase, OFF);
551
552 EnRX2(iobase, ON); //sir to rx2
553 EnGPIOtoRX2(iobase, OFF);
554
555 if (IsSIROn(iobase)) { //sir
556 // Mode select Off
557 SlowIRRXLowActive(iobase, ON);
558 udelay(1000);
559 SlowIRRXLowActive(iobase, OFF);
560 } else {
561 if (IsMIROn(iobase)) { //mir
562 // Mode select On
563 SlowIRRXLowActive(iobase, OFF);
564 udelay(20);
565 } else { // fir
566 if (IsFIROn(iobase)) { //fir
567 // Mode select On
568 SlowIRRXLowActive(iobase, OFF);
569 udelay(20);
570 }
571 }
572 }
573 break;
574
575 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
576 UseOneRX(iobase, ON); //use ONE RX....RX1
577 InvertTX(iobase, OFF);
578 InvertRX(iobase, OFF); // invert RX pin
579
580 EnRX2(iobase, ON);
581 EnGPIOtoRX2(iobase, OFF);
582 if (IsSIROn(iobase)) { //sir
583 // Mode select On
584 SlowIRRXLowActive(iobase, ON);
585 udelay(20);
586 // Mode select Off
587 SlowIRRXLowActive(iobase, OFF);
588 }
589 if (IsMIROn(iobase)) { //mir
590 // Mode select On
591 SlowIRRXLowActive(iobase, OFF);
592 udelay(20);
593 // Mode select Off
594 SlowIRRXLowActive(iobase, ON);
595 } else { // fir
596 if (IsFIROn(iobase)) { //fir
597 // Mode select On
598 SlowIRRXLowActive(iobase, OFF);
599 // TX On
600 WriteTX(iobase, ON);
601 udelay(20);
602 // Mode select OFF
603 SlowIRRXLowActive(iobase, ON);
604 udelay(20);
605 // TX Off
606 WriteTX(iobase, OFF);
607 }
608 }
609 break;
610
611 case 0x0d:
612 UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
613 InvertTX(iobase, OFF);
614 InvertRX(iobase, OFF);
615 SlowIRRXLowActive(iobase, OFF);
616 if (IsSIROn(iobase)) { //sir
617 EnGPIOtoRX2(iobase, OFF);
618 WriteGIO(iobase, OFF);
619 EnRX2(iobase, OFF); //sir to rx2
620 } else { // fir mir
621 EnGPIOtoRX2(iobase, OFF);
622 WriteGIO(iobase, OFF);
623 EnRX2(iobase, OFF); //fir to rx
624 }
625 break;
626
627 case 0x11: /* Temic TFDS4500 */
628
629 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
630
631 UseOneRX(iobase, ON); //use ONE RX....RX1
632 InvertTX(iobase, OFF);
633 InvertRX(iobase, ON); // invert RX pin
634
635 EnRX2(iobase, ON); //sir to rx2
636 EnGPIOtoRX2(iobase, OFF);
637
638 if( IsSIROn(iobase) ){ //sir
639
640 // Mode select On
641 SlowIRRXLowActive(iobase, ON);
642 udelay(20);
643 // Mode select Off
644 SlowIRRXLowActive(iobase, OFF);
645
646 } else{
647 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
648 }
649 break;
650
651 case 0x0ff: /* Vishay */
652 if (IsSIROn(iobase))
653 mode = 0;
654 else if (IsMIROn(iobase))
655 mode = 1;
656 else if (IsFIROn(iobase))
657 mode = 2;
658 else if (IsVFIROn(iobase))
659 mode = 5; //VFIR-16
660 SI_SetMode(iobase, mode);
661 break;
662
663 default:
664 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
665 __func__, dongle_id);
666 }
667 }
668
669 /*
670 * Function via_ircc_change_speed (self, baud)
671 *
672 * Change the speed of the device
673 *
674 */
675 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
676 {
677 struct net_device *dev = self->netdev;
678 u16 iobase;
679 u8 value = 0, bTmp;
680
681 iobase = self->io.fir_base;
682 /* Update accounting for new speed */
683 self->io.speed = speed;
684 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
685
686 WriteReg(iobase, I_ST_CT_0, 0x0);
687
688 /* Controller mode sellection */
689 switch (speed) {
690 case 2400:
691 case 9600:
692 case 19200:
693 case 38400:
694 case 57600:
695 case 115200:
696 value = (115200/speed)-1;
697 SetSIR(iobase, ON);
698 CRC16(iobase, ON);
699 break;
700 case 576000:
701 /* FIXME: this can't be right, as it's the same as 115200,
702 * and 576000 is MIR, not SIR. */
703 value = 0;
704 SetSIR(iobase, ON);
705 CRC16(iobase, ON);
706 break;
707 case 1152000:
708 value = 0;
709 SetMIR(iobase, ON);
710 /* FIXME: CRC ??? */
711 break;
712 case 4000000:
713 value = 0;
714 SetFIR(iobase, ON);
715 SetPulseWidth(iobase, 0);
716 SetSendPreambleCount(iobase, 14);
717 CRC16(iobase, OFF);
718 EnTXCRC(iobase, ON);
719 break;
720 case 16000000:
721 value = 0;
722 SetVFIR(iobase, ON);
723 /* FIXME: CRC ??? */
724 break;
725 default:
726 value = 0;
727 break;
728 }
729
730 /* Set baudrate to 0x19[2..7] */
731 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
732 bTmp |= value << 2;
733 WriteReg(iobase, I_CF_H_1, bTmp);
734
735 /* Some dongles may need to be informed about speed changes. */
736 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
737
738 /* Set FIFO size to 64 */
739 SetFIFO(iobase, 64);
740
741 /* Enable IR */
742 WriteReg(iobase, I_ST_CT_0, 0x80);
743
744 // EnTXFIFOHalfLevelInt(iobase,ON);
745
746 /* Enable some interrupts so we can receive frames */
747 //EnAllInt(iobase,ON);
748
749 if (IsSIROn(iobase)) {
750 SIRFilter(iobase, ON);
751 SIRRecvAny(iobase, ON);
752 } else {
753 SIRFilter(iobase, OFF);
754 SIRRecvAny(iobase, OFF);
755 }
756
757 if (speed > 115200) {
758 /* Install FIR xmit handler */
759 dev->netdev_ops = &via_ircc_fir_ops;
760 via_ircc_dma_receive(self);
761 } else {
762 /* Install SIR xmit handler */
763 dev->netdev_ops = &via_ircc_sir_ops;
764 }
765 netif_wake_queue(dev);
766 }
767
768 /*
769 * Function via_ircc_hard_xmit (skb, dev)
770 *
771 * Transmit the frame!
772 *
773 */
774 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
775 struct net_device *dev)
776 {
777 struct via_ircc_cb *self;
778 unsigned long flags;
779 u16 iobase;
780 __u32 speed;
781
782 self = netdev_priv(dev);
783 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
784 iobase = self->io.fir_base;
785
786 netif_stop_queue(dev);
787 /* Check if we need to change the speed */
788 speed = irda_get_next_speed(skb);
789 if ((speed != self->io.speed) && (speed != -1)) {
790 /* Check for empty frame */
791 if (!skb->len) {
792 via_ircc_change_speed(self, speed);
793 dev->trans_start = jiffies;
794 dev_kfree_skb(skb);
795 return NETDEV_TX_OK;
796 } else
797 self->new_speed = speed;
798 }
799 InitCard(iobase);
800 CommonInit(iobase);
801 SIRFilter(iobase, ON);
802 SetSIR(iobase, ON);
803 CRC16(iobase, ON);
804 EnTXCRC(iobase, 0);
805 WriteReg(iobase, I_ST_CT_0, 0x00);
806
807 spin_lock_irqsave(&self->lock, flags);
808 self->tx_buff.data = self->tx_buff.head;
809 self->tx_buff.len =
810 async_wrap_skb(skb, self->tx_buff.data,
811 self->tx_buff.truesize);
812
813 dev->stats.tx_bytes += self->tx_buff.len;
814 /* Send this frame with old speed */
815 SetBaudRate(iobase, self->io.speed);
816 SetPulseWidth(iobase, 12);
817 SetSendPreambleCount(iobase, 0);
818 WriteReg(iobase, I_ST_CT_0, 0x80);
819
820 EnableTX(iobase, ON);
821 EnableRX(iobase, OFF);
822
823 ResetChip(iobase, 0);
824 ResetChip(iobase, 1);
825 ResetChip(iobase, 2);
826 ResetChip(iobase, 3);
827 ResetChip(iobase, 4);
828
829 EnAllInt(iobase, ON);
830 EnTXDMA(iobase, ON);
831 EnRXDMA(iobase, OFF);
832
833 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
834 DMA_TX_MODE);
835
836 SetSendByte(iobase, self->tx_buff.len);
837 RXStart(iobase, OFF);
838 TXStart(iobase, ON);
839
840 dev->trans_start = jiffies;
841 spin_unlock_irqrestore(&self->lock, flags);
842 dev_kfree_skb(skb);
843 return NETDEV_TX_OK;
844 }
845
846 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
847 struct net_device *dev)
848 {
849 struct via_ircc_cb *self;
850 u16 iobase;
851 __u32 speed;
852 unsigned long flags;
853
854 self = netdev_priv(dev);
855 iobase = self->io.fir_base;
856
857 if (self->st_fifo.len)
858 return NETDEV_TX_OK;
859 if (self->chip_id == 0x3076)
860 iodelay(1500);
861 else
862 udelay(1500);
863 netif_stop_queue(dev);
864 speed = irda_get_next_speed(skb);
865 if ((speed != self->io.speed) && (speed != -1)) {
866 if (!skb->len) {
867 via_ircc_change_speed(self, speed);
868 dev->trans_start = jiffies;
869 dev_kfree_skb(skb);
870 return NETDEV_TX_OK;
871 } else
872 self->new_speed = speed;
873 }
874 spin_lock_irqsave(&self->lock, flags);
875 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
876 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
877
878 self->tx_fifo.tail += skb->len;
879 dev->stats.tx_bytes += skb->len;
880 skb_copy_from_linear_data(skb,
881 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
882 self->tx_fifo.len++;
883 self->tx_fifo.free++;
884 //F01 if (self->tx_fifo.len == 1) {
885 via_ircc_dma_xmit(self, iobase);
886 //F01 }
887 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
888 dev->trans_start = jiffies;
889 dev_kfree_skb(skb);
890 spin_unlock_irqrestore(&self->lock, flags);
891 return NETDEV_TX_OK;
892
893 }
894
895 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
896 {
897 EnTXDMA(iobase, OFF);
898 self->io.direction = IO_XMIT;
899 EnPhys(iobase, ON);
900 EnableTX(iobase, ON);
901 EnableRX(iobase, OFF);
902 ResetChip(iobase, 0);
903 ResetChip(iobase, 1);
904 ResetChip(iobase, 2);
905 ResetChip(iobase, 3);
906 ResetChip(iobase, 4);
907 EnAllInt(iobase, ON);
908 EnTXDMA(iobase, ON);
909 EnRXDMA(iobase, OFF);
910 irda_setup_dma(self->io.dma,
911 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
912 self->tx_buff.head) + self->tx_buff_dma,
913 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
914 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
915 __func__, self->tx_fifo.ptr,
916 self->tx_fifo.queue[self->tx_fifo.ptr].len,
917 self->tx_fifo.len);
918
919 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
920 RXStart(iobase, OFF);
921 TXStart(iobase, ON);
922 return 0;
923
924 }
925
926 /*
927 * Function via_ircc_dma_xmit_complete (self)
928 *
929 * The transfer of a frame in finished. This function will only be called
930 * by the interrupt handler
931 *
932 */
933 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
934 {
935 int iobase;
936 int ret = TRUE;
937 u8 Tx_status;
938
939 IRDA_DEBUG(3, "%s()\n", __func__);
940
941 iobase = self->io.fir_base;
942 /* Disable DMA */
943 // DisableDmaChannel(self->io.dma);
944 /* Check for underrun! */
945 /* Clear bit, by writing 1 into it */
946 Tx_status = GetTXStatus(iobase);
947 if (Tx_status & 0x08) {
948 self->netdev->stats.tx_errors++;
949 self->netdev->stats.tx_fifo_errors++;
950 hwreset(self);
951 /* how to clear underrun? */
952 } else {
953 self->netdev->stats.tx_packets++;
954 ResetChip(iobase, 3);
955 ResetChip(iobase, 4);
956 }
957 /* Check if we need to change the speed */
958 if (self->new_speed) {
959 via_ircc_change_speed(self, self->new_speed);
960 self->new_speed = 0;
961 }
962
963 /* Finished with this frame, so prepare for next */
964 if (IsFIROn(iobase)) {
965 if (self->tx_fifo.len) {
966 self->tx_fifo.len--;
967 self->tx_fifo.ptr++;
968 }
969 }
970 IRDA_DEBUG(1,
971 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
972 __func__,
973 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
974 /* F01_S
975 // Any frames to be sent back-to-back?
976 if (self->tx_fifo.len) {
977 // Not finished yet!
978 via_ircc_dma_xmit(self, iobase);
979 ret = FALSE;
980 } else {
981 F01_E*/
982 // Reset Tx FIFO info
983 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
984 self->tx_fifo.tail = self->tx_buff.head;
985 //F01 }
986
987 // Make sure we have room for more frames
988 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
989 // Not busy transmitting anymore
990 // Tell the network layer, that we can accept more frames
991 netif_wake_queue(self->netdev);
992 //F01 }
993 return ret;
994 }
995
996 /*
997 * Function via_ircc_dma_receive (self)
998 *
999 * Set configuration for receive a frame.
1000 *
1001 */
1002 static int via_ircc_dma_receive(struct via_ircc_cb *self)
1003 {
1004 int iobase;
1005
1006 iobase = self->io.fir_base;
1007
1008 IRDA_DEBUG(3, "%s()\n", __func__);
1009
1010 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1011 self->tx_fifo.tail = self->tx_buff.head;
1012 self->RxDataReady = 0;
1013 self->io.direction = IO_RECV;
1014 self->rx_buff.data = self->rx_buff.head;
1015 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1016 self->st_fifo.tail = self->st_fifo.head = 0;
1017
1018 EnPhys(iobase, ON);
1019 EnableTX(iobase, OFF);
1020 EnableRX(iobase, ON);
1021
1022 ResetChip(iobase, 0);
1023 ResetChip(iobase, 1);
1024 ResetChip(iobase, 2);
1025 ResetChip(iobase, 3);
1026 ResetChip(iobase, 4);
1027
1028 EnAllInt(iobase, ON);
1029 EnTXDMA(iobase, OFF);
1030 EnRXDMA(iobase, ON);
1031 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1032 self->rx_buff.truesize, DMA_RX_MODE);
1033 TXStart(iobase, OFF);
1034 RXStart(iobase, ON);
1035
1036 return 0;
1037 }
1038
1039 /*
1040 * Function via_ircc_dma_receive_complete (self)
1041 *
1042 * Controller Finished with receiving frames,
1043 * and this routine is call by ISR
1044 *
1045 */
1046 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1047 int iobase)
1048 {
1049 struct st_fifo *st_fifo;
1050 struct sk_buff *skb;
1051 int len, i;
1052 u8 status = 0;
1053
1054 iobase = self->io.fir_base;
1055 st_fifo = &self->st_fifo;
1056
1057 if (self->io.speed < 4000000) { //Speed below FIR
1058 len = GetRecvByte(iobase, self);
1059 skb = dev_alloc_skb(len + 1);
1060 if (skb == NULL)
1061 return FALSE;
1062 // Make sure IP header gets aligned
1063 skb_reserve(skb, 1);
1064 skb_put(skb, len - 2);
1065 if (self->chip_id == 0x3076) {
1066 for (i = 0; i < len - 2; i++)
1067 skb->data[i] = self->rx_buff.data[i * 2];
1068 } else {
1069 if (self->chip_id == 0x3096) {
1070 for (i = 0; i < len - 2; i++)
1071 skb->data[i] =
1072 self->rx_buff.data[i];
1073 }
1074 }
1075 // Move to next frame
1076 self->rx_buff.data += len;
1077 self->netdev->stats.rx_bytes += len;
1078 self->netdev->stats.rx_packets++;
1079 skb->dev = self->netdev;
1080 skb_reset_mac_header(skb);
1081 skb->protocol = htons(ETH_P_IRDA);
1082 netif_rx(skb);
1083 return TRUE;
1084 }
1085
1086 else { //FIR mode
1087 len = GetRecvByte(iobase, self);
1088 if (len == 0)
1089 return TRUE; //interrupt only, data maybe move by RxT
1090 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1091 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1092 __func__, len, RxCurCount(iobase, self),
1093 self->RxLastCount);
1094 hwreset(self);
1095 return FALSE;
1096 }
1097 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1098 __func__,
1099 st_fifo->len, len - 4, RxCurCount(iobase, self));
1100
1101 st_fifo->entries[st_fifo->tail].status = status;
1102 st_fifo->entries[st_fifo->tail].len = len;
1103 st_fifo->pending_bytes += len;
1104 st_fifo->tail++;
1105 st_fifo->len++;
1106 if (st_fifo->tail > MAX_RX_WINDOW)
1107 st_fifo->tail = 0;
1108 self->RxDataReady = 0;
1109
1110 // It maybe have MAX_RX_WINDOW package receive by
1111 // receive_complete before Timer IRQ
1112 /* F01_S
1113 if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1114 RXStart(iobase,ON);
1115 SetTimer(iobase,4);
1116 }
1117 else {
1118 F01_E */
1119 EnableRX(iobase, OFF);
1120 EnRXDMA(iobase, OFF);
1121 RXStart(iobase, OFF);
1122 //F01_S
1123 // Put this entry back in fifo
1124 if (st_fifo->head > MAX_RX_WINDOW)
1125 st_fifo->head = 0;
1126 status = st_fifo->entries[st_fifo->head].status;
1127 len = st_fifo->entries[st_fifo->head].len;
1128 st_fifo->head++;
1129 st_fifo->len--;
1130
1131 skb = dev_alloc_skb(len + 1 - 4);
1132 /*
1133 * if frame size, data ptr, or skb ptr are wrong, then get next
1134 * entry.
1135 */
1136 if ((skb == NULL) || (skb->data == NULL) ||
1137 (self->rx_buff.data == NULL) || (len < 6)) {
1138 self->netdev->stats.rx_dropped++;
1139 kfree_skb(skb);
1140 return TRUE;
1141 }
1142 skb_reserve(skb, 1);
1143 skb_put(skb, len - 4);
1144
1145 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1146 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1147 len - 4, self->rx_buff.data);
1148
1149 // Move to next frame
1150 self->rx_buff.data += len;
1151 self->netdev->stats.rx_bytes += len;
1152 self->netdev->stats.rx_packets++;
1153 skb->dev = self->netdev;
1154 skb_reset_mac_header(skb);
1155 skb->protocol = htons(ETH_P_IRDA);
1156 netif_rx(skb);
1157
1158 //F01_E
1159 } //FIR
1160 return TRUE;
1161
1162 }
1163
1164 /*
1165 * if frame is received , but no INT ,then use this routine to upload frame.
1166 */
1167 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1168 {
1169 struct sk_buff *skb;
1170 int len;
1171 struct st_fifo *st_fifo;
1172 st_fifo = &self->st_fifo;
1173
1174 len = GetRecvByte(iobase, self);
1175
1176 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1177
1178 if ((len - 4) < 2) {
1179 self->netdev->stats.rx_dropped++;
1180 return FALSE;
1181 }
1182
1183 skb = dev_alloc_skb(len + 1);
1184 if (skb == NULL) {
1185 self->netdev->stats.rx_dropped++;
1186 return FALSE;
1187 }
1188 skb_reserve(skb, 1);
1189 skb_put(skb, len - 4 + 1);
1190 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1191 st_fifo->tail++;
1192 st_fifo->len++;
1193 if (st_fifo->tail > MAX_RX_WINDOW)
1194 st_fifo->tail = 0;
1195 // Move to next frame
1196 self->rx_buff.data += len;
1197 self->netdev->stats.rx_bytes += len;
1198 self->netdev->stats.rx_packets++;
1199 skb->dev = self->netdev;
1200 skb_reset_mac_header(skb);
1201 skb->protocol = htons(ETH_P_IRDA);
1202 netif_rx(skb);
1203 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1204 RXStart(iobase, ON);
1205 } else {
1206 EnableRX(iobase, OFF);
1207 EnRXDMA(iobase, OFF);
1208 RXStart(iobase, OFF);
1209 }
1210 return TRUE;
1211 }
1212
1213 /*
1214 * Implement back to back receive , use this routine to upload data.
1215 */
1216
1217 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1218 {
1219 struct st_fifo *st_fifo;
1220 struct sk_buff *skb;
1221 int len;
1222 u8 status;
1223
1224 st_fifo = &self->st_fifo;
1225
1226 if (CkRxRecv(iobase, self)) {
1227 // if still receiving ,then return ,don't upload frame
1228 self->RetryCount = 0;
1229 SetTimer(iobase, 20);
1230 self->RxDataReady++;
1231 return FALSE;
1232 } else
1233 self->RetryCount++;
1234
1235 if ((self->RetryCount >= 1) ||
1236 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1237 (st_fifo->len >= (MAX_RX_WINDOW))) {
1238 while (st_fifo->len > 0) { //upload frame
1239 // Put this entry back in fifo
1240 if (st_fifo->head > MAX_RX_WINDOW)
1241 st_fifo->head = 0;
1242 status = st_fifo->entries[st_fifo->head].status;
1243 len = st_fifo->entries[st_fifo->head].len;
1244 st_fifo->head++;
1245 st_fifo->len--;
1246
1247 skb = dev_alloc_skb(len + 1 - 4);
1248 /*
1249 * if frame size, data ptr, or skb ptr are wrong,
1250 * then get next entry.
1251 */
1252 if ((skb == NULL) || (skb->data == NULL) ||
1253 (self->rx_buff.data == NULL) || (len < 6)) {
1254 self->netdev->stats.rx_dropped++;
1255 continue;
1256 }
1257 skb_reserve(skb, 1);
1258 skb_put(skb, len - 4);
1259 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1260
1261 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1262 len - 4, st_fifo->head);
1263
1264 // Move to next frame
1265 self->rx_buff.data += len;
1266 self->netdev->stats.rx_bytes += len;
1267 self->netdev->stats.rx_packets++;
1268 skb->dev = self->netdev;
1269 skb_reset_mac_header(skb);
1270 skb->protocol = htons(ETH_P_IRDA);
1271 netif_rx(skb);
1272 } //while
1273 self->RetryCount = 0;
1274
1275 IRDA_DEBUG(2,
1276 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1277 __func__,
1278 GetHostStatus(iobase), GetRXStatus(iobase));
1279
1280 /*
1281 * if frame is receive complete at this routine ,then upload
1282 * frame.
1283 */
1284 if ((GetRXStatus(iobase) & 0x10) &&
1285 (RxCurCount(iobase, self) != self->RxLastCount)) {
1286 upload_rxdata(self, iobase);
1287 if (irda_device_txqueue_empty(self->netdev))
1288 via_ircc_dma_receive(self);
1289 }
1290 } // timer detect complete
1291 else
1292 SetTimer(iobase, 4);
1293 return TRUE;
1294
1295 }
1296
1297
1298
1299 /*
1300 * Function via_ircc_interrupt (irq, dev_id)
1301 *
1302 * An interrupt from the chip has arrived. Time to do some work
1303 *
1304 */
1305 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1306 {
1307 struct net_device *dev = dev_id;
1308 struct via_ircc_cb *self = netdev_priv(dev);
1309 int iobase;
1310 u8 iHostIntType, iRxIntType, iTxIntType;
1311
1312 iobase = self->io.fir_base;
1313 spin_lock(&self->lock);
1314 iHostIntType = GetHostStatus(iobase);
1315
1316 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1317 __func__, iHostIntType,
1318 (iHostIntType & 0x40) ? "Timer" : "",
1319 (iHostIntType & 0x20) ? "Tx" : "",
1320 (iHostIntType & 0x10) ? "Rx" : "",
1321 (iHostIntType & 0x0e) >> 1);
1322
1323 if ((iHostIntType & 0x40) != 0) { //Timer Event
1324 self->EventFlag.TimeOut++;
1325 ClearTimerInt(iobase, 1);
1326 if (self->io.direction == IO_XMIT) {
1327 via_ircc_dma_xmit(self, iobase);
1328 }
1329 if (self->io.direction == IO_RECV) {
1330 /*
1331 * frame ready hold too long, must reset.
1332 */
1333 if (self->RxDataReady > 30) {
1334 hwreset(self);
1335 if (irda_device_txqueue_empty(self->netdev)) {
1336 via_ircc_dma_receive(self);
1337 }
1338 } else { // call this to upload frame.
1339 RxTimerHandler(self, iobase);
1340 }
1341 } //RECV
1342 } //Timer Event
1343 if ((iHostIntType & 0x20) != 0) { //Tx Event
1344 iTxIntType = GetTXStatus(iobase);
1345
1346 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1347 __func__, iTxIntType,
1348 (iTxIntType & 0x08) ? "FIFO underr." : "",
1349 (iTxIntType & 0x04) ? "EOM" : "",
1350 (iTxIntType & 0x02) ? "FIFO ready" : "",
1351 (iTxIntType & 0x01) ? "Early EOM" : "");
1352
1353 if (iTxIntType & 0x4) {
1354 self->EventFlag.EOMessage++; // read and will auto clean
1355 if (via_ircc_dma_xmit_complete(self)) {
1356 if (irda_device_txqueue_empty
1357 (self->netdev)) {
1358 via_ircc_dma_receive(self);
1359 }
1360 } else {
1361 self->EventFlag.Unknown++;
1362 }
1363 } //EOP
1364 } //Tx Event
1365 //----------------------------------------
1366 if ((iHostIntType & 0x10) != 0) { //Rx Event
1367 /* Check if DMA has finished */
1368 iRxIntType = GetRXStatus(iobase);
1369
1370 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1371 __func__, iRxIntType,
1372 (iRxIntType & 0x80) ? "PHY err." : "",
1373 (iRxIntType & 0x40) ? "CRC err" : "",
1374 (iRxIntType & 0x20) ? "FIFO overr." : "",
1375 (iRxIntType & 0x10) ? "EOF" : "",
1376 (iRxIntType & 0x08) ? "RxData" : "",
1377 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1378 (iRxIntType & 0x01) ? "SIR bad" : "");
1379 if (!iRxIntType)
1380 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1381
1382 if (iRxIntType & 0x10) {
1383 if (via_ircc_dma_receive_complete(self, iobase)) {
1384 //F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1385 via_ircc_dma_receive(self);
1386 }
1387 } // No ERR
1388 else { //ERR
1389 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1390 __func__, iRxIntType, iHostIntType,
1391 RxCurCount(iobase, self),
1392 self->RxLastCount);
1393
1394 if (iRxIntType & 0x20) { //FIFO OverRun ERR
1395 ResetChip(iobase, 0);
1396 ResetChip(iobase, 1);
1397 } else { //PHY,CRC ERR
1398
1399 if (iRxIntType != 0x08)
1400 hwreset(self); //F01
1401 }
1402 via_ircc_dma_receive(self);
1403 } //ERR
1404
1405 } //Rx Event
1406 spin_unlock(&self->lock);
1407 return IRQ_RETVAL(iHostIntType);
1408 }
1409
1410 static void hwreset(struct via_ircc_cb *self)
1411 {
1412 int iobase;
1413 iobase = self->io.fir_base;
1414
1415 IRDA_DEBUG(3, "%s()\n", __func__);
1416
1417 ResetChip(iobase, 5);
1418 EnableDMA(iobase, OFF);
1419 EnableTX(iobase, OFF);
1420 EnableRX(iobase, OFF);
1421 EnRXDMA(iobase, OFF);
1422 EnTXDMA(iobase, OFF);
1423 RXStart(iobase, OFF);
1424 TXStart(iobase, OFF);
1425 InitCard(iobase);
1426 CommonInit(iobase);
1427 SIRFilter(iobase, ON);
1428 SetSIR(iobase, ON);
1429 CRC16(iobase, ON);
1430 EnTXCRC(iobase, 0);
1431 WriteReg(iobase, I_ST_CT_0, 0x00);
1432 SetBaudRate(iobase, 9600);
1433 SetPulseWidth(iobase, 12);
1434 SetSendPreambleCount(iobase, 0);
1435 WriteReg(iobase, I_ST_CT_0, 0x80);
1436
1437 /* Restore speed. */
1438 via_ircc_change_speed(self, self->io.speed);
1439
1440 self->st_fifo.len = 0;
1441 }
1442
1443 /*
1444 * Function via_ircc_is_receiving (self)
1445 *
1446 * Return TRUE is we are currently receiving a frame
1447 *
1448 */
1449 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1450 {
1451 int status = FALSE;
1452 int iobase;
1453
1454 IRDA_ASSERT(self != NULL, return FALSE;);
1455
1456 iobase = self->io.fir_base;
1457 if (CkRxRecv(iobase, self))
1458 status = TRUE;
1459
1460 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1461
1462 return status;
1463 }
1464
1465
1466 /*
1467 * Function via_ircc_net_open (dev)
1468 *
1469 * Start the device
1470 *
1471 */
1472 static int via_ircc_net_open(struct net_device *dev)
1473 {
1474 struct via_ircc_cb *self;
1475 int iobase;
1476 char hwname[32];
1477
1478 IRDA_DEBUG(3, "%s()\n", __func__);
1479
1480 IRDA_ASSERT(dev != NULL, return -1;);
1481 self = netdev_priv(dev);
1482 dev->stats.rx_packets = 0;
1483 IRDA_ASSERT(self != NULL, return 0;);
1484 iobase = self->io.fir_base;
1485 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1486 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1487 self->io.irq);
1488 return -EAGAIN;
1489 }
1490 /*
1491 * Always allocate the DMA channel after the IRQ, and clean up on
1492 * failure.
1493 */
1494 if (request_dma(self->io.dma, dev->name)) {
1495 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1496 self->io.dma);
1497 free_irq(self->io.irq, dev);
1498 return -EAGAIN;
1499 }
1500 if (self->io.dma2 != self->io.dma) {
1501 if (request_dma(self->io.dma2, dev->name)) {
1502 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1503 driver_name, self->io.dma2);
1504 free_irq(self->io.irq, dev);
1505 free_dma(self->io.dma);
1506 return -EAGAIN;
1507 }
1508 }
1509
1510
1511 /* turn on interrupts */
1512 EnAllInt(iobase, ON);
1513 EnInternalLoop(iobase, OFF);
1514 EnExternalLoop(iobase, OFF);
1515
1516 /* */
1517 via_ircc_dma_receive(self);
1518
1519 /* Ready to play! */
1520 netif_start_queue(dev);
1521
1522 /*
1523 * Open new IrLAP layer instance, now that everything should be
1524 * initialized properly
1525 */
1526 sprintf(hwname, "VIA @ 0x%x", iobase);
1527 self->irlap = irlap_open(dev, &self->qos, hwname);
1528
1529 self->RxLastCount = 0;
1530
1531 return 0;
1532 }
1533
1534 /*
1535 * Function via_ircc_net_close (dev)
1536 *
1537 * Stop the device
1538 *
1539 */
1540 static int via_ircc_net_close(struct net_device *dev)
1541 {
1542 struct via_ircc_cb *self;
1543 int iobase;
1544
1545 IRDA_DEBUG(3, "%s()\n", __func__);
1546
1547 IRDA_ASSERT(dev != NULL, return -1;);
1548 self = netdev_priv(dev);
1549 IRDA_ASSERT(self != NULL, return 0;);
1550
1551 /* Stop device */
1552 netif_stop_queue(dev);
1553 /* Stop and remove instance of IrLAP */
1554 if (self->irlap)
1555 irlap_close(self->irlap);
1556 self->irlap = NULL;
1557 iobase = self->io.fir_base;
1558 EnTXDMA(iobase, OFF);
1559 EnRXDMA(iobase, OFF);
1560 DisableDmaChannel(self->io.dma);
1561
1562 /* Disable interrupts */
1563 EnAllInt(iobase, OFF);
1564 free_irq(self->io.irq, dev);
1565 free_dma(self->io.dma);
1566 if (self->io.dma2 != self->io.dma)
1567 free_dma(self->io.dma2);
1568
1569 return 0;
1570 }
1571
1572 /*
1573 * Function via_ircc_net_ioctl (dev, rq, cmd)
1574 *
1575 * Process IOCTL commands for this device
1576 *
1577 */
1578 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1579 int cmd)
1580 {
1581 struct if_irda_req *irq = (struct if_irda_req *) rq;
1582 struct via_ircc_cb *self;
1583 unsigned long flags;
1584 int ret = 0;
1585
1586 IRDA_ASSERT(dev != NULL, return -1;);
1587 self = netdev_priv(dev);
1588 IRDA_ASSERT(self != NULL, return -1;);
1589 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1590 cmd);
1591 /* Disable interrupts & save flags */
1592 spin_lock_irqsave(&self->lock, flags);
1593 switch (cmd) {
1594 case SIOCSBANDWIDTH: /* Set bandwidth */
1595 if (!capable(CAP_NET_ADMIN)) {
1596 ret = -EPERM;
1597 goto out;
1598 }
1599 via_ircc_change_speed(self, irq->ifr_baudrate);
1600 break;
1601 case SIOCSMEDIABUSY: /* Set media busy */
1602 if (!capable(CAP_NET_ADMIN)) {
1603 ret = -EPERM;
1604 goto out;
1605 }
1606 irda_device_set_media_busy(self->netdev, TRUE);
1607 break;
1608 case SIOCGRECEIVING: /* Check if we are receiving right now */
1609 irq->ifr_receiving = via_ircc_is_receiving(self);
1610 break;
1611 default:
1612 ret = -EOPNOTSUPP;
1613 }
1614 out:
1615 spin_unlock_irqrestore(&self->lock, flags);
1616 return ret;
1617 }
1618
1619 MODULE_AUTHOR("VIA Technologies,inc");
1620 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1621 MODULE_LICENSE("GPL");
1622
1623 module_init(via_ircc_init);
1624 module_exit(via_ircc_cleanup);
This page took 0.099127 seconds and 5 git commands to generate.