KVM: x86: zero apic_arb_prio on reset
[deliverable/linux.git] / drivers / net / irda / via-ircc.c
1 /********************************************************************
2 Filename: via-ircc.c
3 Version: 1.0
4 Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author: VIA Technologies,inc
6 Date : 08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, see <http://www.gnu.org/licenses/>.
21
22 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
23 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
24 Comment :
25 jul/09/2002 : only implement two kind of dongle currently.
26 Oct/02/2002 : work on VT8231 and VT8233 .
27 Aug/06/2003 : change driver format to pci driver .
28
29 2004-02-16: <sda@bdit.de>
30 - Removed unneeded 'legacy' pci stuff.
31 - Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
32 - On speed change from core, don't send SIR frame with new speed.
33 Use current speed and change speeds later.
34 - Make module-param dongle_id actually work.
35 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
36 Tested with home-grown PCB on EPIA boards.
37 - Code cleanup.
38
39 ********************************************************************/
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/types.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/ioport.h>
46 #include <linux/delay.h>
47 #include <linux/init.h>
48 #include <linux/interrupt.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/pci.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/gfp.h>
53
54 #include <asm/io.h>
55 #include <asm/dma.h>
56 #include <asm/byteorder.h>
57
58 #include <linux/pm.h>
59
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda.h>
62 #include <net/irda/irda_device.h>
63
64 #include "via-ircc.h"
65
66 #define VIA_MODULE_NAME "via-ircc"
67 #define CHIP_IO_EXTENT 0x40
68
69 static char *driver_name = VIA_MODULE_NAME;
70
71 /* Module parameters */
72 static int qos_mtt_bits = 0x07; /* 1 ms or more */
73 static int dongle_id = 0; /* default: probe */
74
75 /* We can't guess the type of connected dongle, user *must* supply it. */
76 module_param(dongle_id, int, 0);
77
78 /* Some prototypes */
79 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
80 unsigned int id);
81 static int via_ircc_dma_receive(struct via_ircc_cb *self);
82 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
83 int iobase);
84 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
85 struct net_device *dev);
86 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
87 struct net_device *dev);
88 static void via_hw_init(struct via_ircc_cb *self);
89 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
90 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
91 static int via_ircc_is_receiving(struct via_ircc_cb *self);
92 static int via_ircc_read_dongle_id(int iobase);
93
94 static int via_ircc_net_open(struct net_device *dev);
95 static int via_ircc_net_close(struct net_device *dev);
96 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
97 int cmd);
98 static void via_ircc_change_dongle_speed(int iobase, int speed,
99 int dongle_id);
100 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
101 static void hwreset(struct via_ircc_cb *self);
102 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
103 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
104 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
105 static void via_remove_one(struct pci_dev *pdev);
106
107 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
108 static void iodelay(int udelay)
109 {
110 u8 data;
111 int i;
112
113 for (i = 0; i < udelay; i++) {
114 data = inb(0x80);
115 }
116 }
117
118 static const struct pci_device_id via_pci_tbl[] = {
119 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
120 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
121 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
122 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
123 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
124 { 0, }
125 };
126
127 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
128
129
130 static struct pci_driver via_driver = {
131 .name = VIA_MODULE_NAME,
132 .id_table = via_pci_tbl,
133 .probe = via_init_one,
134 .remove = via_remove_one,
135 };
136
137
138 /*
139 * Function via_ircc_init ()
140 *
141 * Initialize chip. Just find out chip type and resource.
142 */
143 static int __init via_ircc_init(void)
144 {
145 int rc;
146
147 rc = pci_register_driver(&via_driver);
148 if (rc < 0) {
149 pr_debug("%s(): error rc = %d, returning -ENODEV...\n",
150 __func__, rc);
151 return -ENODEV;
152 }
153 return 0;
154 }
155
156 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
157 {
158 int rc;
159 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
160 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
161 chipio_t info;
162
163 pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device);
164
165 rc = pci_enable_device (pcidev);
166 if (rc) {
167 pr_debug("%s(): error rc = %d\n", __func__, rc);
168 return -ENODEV;
169 }
170
171 // South Bridge exist
172 if ( ReadLPCReg(0x20) != 0x3C )
173 Chipset=0x3096;
174 else
175 Chipset=0x3076;
176
177 if (Chipset==0x3076) {
178 pr_debug("%s(): Chipset = 3076\n", __func__);
179
180 WriteLPCReg(7,0x0c );
181 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
182 if((temp&0x01)==1) { // BIOS close or no FIR
183 WriteLPCReg(0x1d, 0x82 );
184 WriteLPCReg(0x23,0x18);
185 temp=ReadLPCReg(0xF0);
186 if((temp&0x01)==0) {
187 temp=(ReadLPCReg(0x74)&0x03); //DMA
188 FirDRQ0=temp + 4;
189 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
190 FirDRQ1=temp + 4;
191 } else {
192 temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
193 FirDRQ0=temp + 4;
194 FirDRQ1=FirDRQ0;
195 }
196 FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
197 FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
198 FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
199 FirIOBase=FirIOBase ;
200 info.fir_base=FirIOBase;
201 info.irq=FirIRQ;
202 info.dma=FirDRQ1;
203 info.dma2=FirDRQ0;
204 pci_read_config_byte(pcidev,0x40,&bTmp);
205 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
206 pci_read_config_byte(pcidev,0x42,&bTmp);
207 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
208 pci_write_config_byte(pcidev,0x5a,0xc0);
209 WriteLPCReg(0x28, 0x70 );
210 rc = via_ircc_open(pcidev, &info, 0x3076);
211 } else
212 rc = -ENODEV; //IR not turn on
213 } else { //Not VT1211
214 pr_debug("%s(): Chipset = 3096\n", __func__);
215
216 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
217 if((bTmp&0x01)==1) { // BIOS enable FIR
218 //Enable Double DMA clock
219 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
220 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
221 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
222 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
223 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
224 pci_write_config_byte(pcidev,0x44,0x4e);
225 //---------- read configuration from Function0 of south bridge
226 if((bTmp&0x02)==0) {
227 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
228 FirDRQ0 = (bTmp1 & 0x30) >> 4;
229 pci_read_config_byte(pcidev,0x44,&bTmp1);
230 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
231 } else {
232 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
233 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
234 FirDRQ1=0;
235 }
236 pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
237 FirIRQ = bTmp1 & 0x0f;
238
239 pci_read_config_byte(pcidev,0x69,&bTmp);
240 FirIOBase = bTmp << 8;//hight byte
241 pci_read_config_byte(pcidev,0x68,&bTmp);
242 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
243 //-------------------------
244 info.fir_base=FirIOBase;
245 info.irq=FirIRQ;
246 info.dma=FirDRQ1;
247 info.dma2=FirDRQ0;
248 rc = via_ircc_open(pcidev, &info, 0x3096);
249 } else
250 rc = -ENODEV; //IR not turn on !!!!!
251 }//Not VT1211
252
253 pr_debug("%s(): End - rc = %d\n", __func__, rc);
254 return rc;
255 }
256
257 static void __exit via_ircc_cleanup(void)
258 {
259 /* Cleanup all instances of the driver */
260 pci_unregister_driver (&via_driver);
261 }
262
263 static const struct net_device_ops via_ircc_sir_ops = {
264 .ndo_start_xmit = via_ircc_hard_xmit_sir,
265 .ndo_open = via_ircc_net_open,
266 .ndo_stop = via_ircc_net_close,
267 .ndo_do_ioctl = via_ircc_net_ioctl,
268 };
269 static const struct net_device_ops via_ircc_fir_ops = {
270 .ndo_start_xmit = via_ircc_hard_xmit_fir,
271 .ndo_open = via_ircc_net_open,
272 .ndo_stop = via_ircc_net_close,
273 .ndo_do_ioctl = via_ircc_net_ioctl,
274 };
275
276 /*
277 * Function via_ircc_open(pdev, iobase, irq)
278 *
279 * Open driver instance
280 *
281 */
282 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
283 {
284 struct net_device *dev;
285 struct via_ircc_cb *self;
286 int err;
287
288 /* Allocate new instance of the driver */
289 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
290 if (dev == NULL)
291 return -ENOMEM;
292
293 self = netdev_priv(dev);
294 self->netdev = dev;
295 spin_lock_init(&self->lock);
296
297 pci_set_drvdata(pdev, self);
298
299 /* Initialize Resource */
300 self->io.cfg_base = info->cfg_base;
301 self->io.fir_base = info->fir_base;
302 self->io.irq = info->irq;
303 self->io.fir_ext = CHIP_IO_EXTENT;
304 self->io.dma = info->dma;
305 self->io.dma2 = info->dma2;
306 self->io.fifo_size = 32;
307 self->chip_id = id;
308 self->st_fifo.len = 0;
309 self->RxDataReady = 0;
310
311 /* Reserve the ioports that we need */
312 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
313 pr_debug("%s(), can't get iobase of 0x%03x\n",
314 __func__, self->io.fir_base);
315 err = -ENODEV;
316 goto err_out1;
317 }
318
319 /* Initialize QoS for this device */
320 irda_init_max_qos_capabilies(&self->qos);
321
322 /* Check if user has supplied the dongle id or not */
323 if (!dongle_id)
324 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
325 self->io.dongle_id = dongle_id;
326
327 /* The only value we must override it the baudrate */
328 /* Maximum speeds and capabilities are dongle-dependent. */
329 switch( self->io.dongle_id ){
330 case 0x0d:
331 self->qos.baud_rate.bits =
332 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
333 IR_576000 | IR_1152000 | (IR_4000000 << 8);
334 break;
335 default:
336 self->qos.baud_rate.bits =
337 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
338 break;
339 }
340
341 /* Following was used for testing:
342 *
343 * self->qos.baud_rate.bits = IR_9600;
344 *
345 * Is is no good, as it prohibits (error-prone) speed-changes.
346 */
347
348 self->qos.min_turn_time.bits = qos_mtt_bits;
349 irda_qos_bits_to_value(&self->qos);
350
351 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
352 self->rx_buff.truesize = 14384 + 2048;
353 self->tx_buff.truesize = 14384 + 2048;
354
355 /* Allocate memory if needed */
356 self->rx_buff.head =
357 dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
358 &self->rx_buff_dma, GFP_KERNEL);
359 if (self->rx_buff.head == NULL) {
360 err = -ENOMEM;
361 goto err_out2;
362 }
363
364 self->tx_buff.head =
365 dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
366 &self->tx_buff_dma, GFP_KERNEL);
367 if (self->tx_buff.head == NULL) {
368 err = -ENOMEM;
369 goto err_out3;
370 }
371
372 self->rx_buff.in_frame = FALSE;
373 self->rx_buff.state = OUTSIDE_FRAME;
374 self->tx_buff.data = self->tx_buff.head;
375 self->rx_buff.data = self->rx_buff.head;
376
377 /* Reset Tx queue info */
378 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
379 self->tx_fifo.tail = self->tx_buff.head;
380
381 /* Override the network functions we need to use */
382 dev->netdev_ops = &via_ircc_sir_ops;
383
384 err = register_netdev(dev);
385 if (err)
386 goto err_out4;
387
388 net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n",
389 dev->name);
390
391 /* Initialise the hardware..
392 */
393 self->io.speed = 9600;
394 via_hw_init(self);
395 return 0;
396 err_out4:
397 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
398 self->tx_buff.head, self->tx_buff_dma);
399 err_out3:
400 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
401 self->rx_buff.head, self->rx_buff_dma);
402 err_out2:
403 release_region(self->io.fir_base, self->io.fir_ext);
404 err_out1:
405 free_netdev(dev);
406 return err;
407 }
408
409 /*
410 * Function via_remove_one(pdev)
411 *
412 * Close driver instance
413 *
414 */
415 static void via_remove_one(struct pci_dev *pdev)
416 {
417 struct via_ircc_cb *self = pci_get_drvdata(pdev);
418 int iobase;
419
420 iobase = self->io.fir_base;
421
422 ResetChip(iobase, 5); //hardware reset.
423 /* Remove netdevice */
424 unregister_netdev(self->netdev);
425
426 /* Release the PORT that this driver is using */
427 pr_debug("%s(), Releasing Region %03x\n",
428 __func__, self->io.fir_base);
429 release_region(self->io.fir_base, self->io.fir_ext);
430 if (self->tx_buff.head)
431 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
432 self->tx_buff.head, self->tx_buff_dma);
433 if (self->rx_buff.head)
434 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
435 self->rx_buff.head, self->rx_buff_dma);
436
437 free_netdev(self->netdev);
438
439 pci_disable_device(pdev);
440 }
441
442 /*
443 * Function via_hw_init(self)
444 *
445 * Returns non-negative on success.
446 *
447 * Formerly via_ircc_setup
448 */
449 static void via_hw_init(struct via_ircc_cb *self)
450 {
451 int iobase = self->io.fir_base;
452
453 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
454 // FIFO Init
455 EnRXFIFOReadyInt(iobase, OFF);
456 EnRXFIFOHalfLevelInt(iobase, OFF);
457 EnTXFIFOHalfLevelInt(iobase, OFF);
458 EnTXFIFOUnderrunEOMInt(iobase, ON);
459 EnTXFIFOReadyInt(iobase, OFF);
460 InvertTX(iobase, OFF);
461 InvertRX(iobase, OFF);
462
463 if (ReadLPCReg(0x20) == 0x3c)
464 WriteLPCReg(0xF0, 0); // for VT1211
465 /* Int Init */
466 EnRXSpecInt(iobase, ON);
467
468 /* The following is basically hwreset */
469 /* If this is the case, why not just call hwreset() ? Jean II */
470 ResetChip(iobase, 5);
471 EnableDMA(iobase, OFF);
472 EnableTX(iobase, OFF);
473 EnableRX(iobase, OFF);
474 EnRXDMA(iobase, OFF);
475 EnTXDMA(iobase, OFF);
476 RXStart(iobase, OFF);
477 TXStart(iobase, OFF);
478 InitCard(iobase);
479 CommonInit(iobase);
480 SIRFilter(iobase, ON);
481 SetSIR(iobase, ON);
482 CRC16(iobase, ON);
483 EnTXCRC(iobase, 0);
484 WriteReg(iobase, I_ST_CT_0, 0x00);
485 SetBaudRate(iobase, 9600);
486 SetPulseWidth(iobase, 12);
487 SetSendPreambleCount(iobase, 0);
488
489 self->io.speed = 9600;
490 self->st_fifo.len = 0;
491
492 via_ircc_change_dongle_speed(iobase, self->io.speed,
493 self->io.dongle_id);
494
495 WriteReg(iobase, I_ST_CT_0, 0x80);
496 }
497
498 /*
499 * Function via_ircc_read_dongle_id (void)
500 *
501 */
502 static int via_ircc_read_dongle_id(int iobase)
503 {
504 net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n");
505 return 9; /* Default to IBM */
506 }
507
508 /*
509 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
510 * Change speed of the attach dongle
511 * only implement two type of dongle currently.
512 */
513 static void via_ircc_change_dongle_speed(int iobase, int speed,
514 int dongle_id)
515 {
516 u8 mode = 0;
517
518 /* speed is unused, as we use IsSIROn()/IsMIROn() */
519 speed = speed;
520
521 pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n",
522 __func__, speed, iobase, dongle_id);
523
524 switch (dongle_id) {
525
526 /* Note: The dongle_id's listed here are derived from
527 * nsc-ircc.c */
528
529 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
530 UseOneRX(iobase, ON); // use one RX pin RX1,RX2
531 InvertTX(iobase, OFF);
532 InvertRX(iobase, OFF);
533
534 EnRX2(iobase, ON); //sir to rx2
535 EnGPIOtoRX2(iobase, OFF);
536
537 if (IsSIROn(iobase)) { //sir
538 // Mode select Off
539 SlowIRRXLowActive(iobase, ON);
540 udelay(1000);
541 SlowIRRXLowActive(iobase, OFF);
542 } else {
543 if (IsMIROn(iobase)) { //mir
544 // Mode select On
545 SlowIRRXLowActive(iobase, OFF);
546 udelay(20);
547 } else { // fir
548 if (IsFIROn(iobase)) { //fir
549 // Mode select On
550 SlowIRRXLowActive(iobase, OFF);
551 udelay(20);
552 }
553 }
554 }
555 break;
556
557 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
558 UseOneRX(iobase, ON); //use ONE RX....RX1
559 InvertTX(iobase, OFF);
560 InvertRX(iobase, OFF); // invert RX pin
561
562 EnRX2(iobase, ON);
563 EnGPIOtoRX2(iobase, OFF);
564 if (IsSIROn(iobase)) { //sir
565 // Mode select On
566 SlowIRRXLowActive(iobase, ON);
567 udelay(20);
568 // Mode select Off
569 SlowIRRXLowActive(iobase, OFF);
570 }
571 if (IsMIROn(iobase)) { //mir
572 // Mode select On
573 SlowIRRXLowActive(iobase, OFF);
574 udelay(20);
575 // Mode select Off
576 SlowIRRXLowActive(iobase, ON);
577 } else { // fir
578 if (IsFIROn(iobase)) { //fir
579 // Mode select On
580 SlowIRRXLowActive(iobase, OFF);
581 // TX On
582 WriteTX(iobase, ON);
583 udelay(20);
584 // Mode select OFF
585 SlowIRRXLowActive(iobase, ON);
586 udelay(20);
587 // TX Off
588 WriteTX(iobase, OFF);
589 }
590 }
591 break;
592
593 case 0x0d:
594 UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
595 InvertTX(iobase, OFF);
596 InvertRX(iobase, OFF);
597 SlowIRRXLowActive(iobase, OFF);
598 if (IsSIROn(iobase)) { //sir
599 EnGPIOtoRX2(iobase, OFF);
600 WriteGIO(iobase, OFF);
601 EnRX2(iobase, OFF); //sir to rx2
602 } else { // fir mir
603 EnGPIOtoRX2(iobase, OFF);
604 WriteGIO(iobase, OFF);
605 EnRX2(iobase, OFF); //fir to rx
606 }
607 break;
608
609 case 0x11: /* Temic TFDS4500 */
610
611 pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n",
612 __func__);
613
614 UseOneRX(iobase, ON); //use ONE RX....RX1
615 InvertTX(iobase, OFF);
616 InvertRX(iobase, ON); // invert RX pin
617
618 EnRX2(iobase, ON); //sir to rx2
619 EnGPIOtoRX2(iobase, OFF);
620
621 if( IsSIROn(iobase) ){ //sir
622
623 // Mode select On
624 SlowIRRXLowActive(iobase, ON);
625 udelay(20);
626 // Mode select Off
627 SlowIRRXLowActive(iobase, OFF);
628
629 } else{
630 pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n",
631 __func__);
632 }
633 break;
634
635 case 0x0ff: /* Vishay */
636 if (IsSIROn(iobase))
637 mode = 0;
638 else if (IsMIROn(iobase))
639 mode = 1;
640 else if (IsFIROn(iobase))
641 mode = 2;
642 else if (IsVFIROn(iobase))
643 mode = 5; //VFIR-16
644 SI_SetMode(iobase, mode);
645 break;
646
647 default:
648 net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n",
649 __func__, dongle_id);
650 }
651 }
652
653 /*
654 * Function via_ircc_change_speed (self, baud)
655 *
656 * Change the speed of the device
657 *
658 */
659 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
660 {
661 struct net_device *dev = self->netdev;
662 u16 iobase;
663 u8 value = 0, bTmp;
664
665 iobase = self->io.fir_base;
666 /* Update accounting for new speed */
667 self->io.speed = speed;
668 pr_debug("%s: change_speed to %d bps.\n", __func__, speed);
669
670 WriteReg(iobase, I_ST_CT_0, 0x0);
671
672 /* Controller mode sellection */
673 switch (speed) {
674 case 2400:
675 case 9600:
676 case 19200:
677 case 38400:
678 case 57600:
679 case 115200:
680 value = (115200/speed)-1;
681 SetSIR(iobase, ON);
682 CRC16(iobase, ON);
683 break;
684 case 576000:
685 /* FIXME: this can't be right, as it's the same as 115200,
686 * and 576000 is MIR, not SIR. */
687 value = 0;
688 SetSIR(iobase, ON);
689 CRC16(iobase, ON);
690 break;
691 case 1152000:
692 value = 0;
693 SetMIR(iobase, ON);
694 /* FIXME: CRC ??? */
695 break;
696 case 4000000:
697 value = 0;
698 SetFIR(iobase, ON);
699 SetPulseWidth(iobase, 0);
700 SetSendPreambleCount(iobase, 14);
701 CRC16(iobase, OFF);
702 EnTXCRC(iobase, ON);
703 break;
704 case 16000000:
705 value = 0;
706 SetVFIR(iobase, ON);
707 /* FIXME: CRC ??? */
708 break;
709 default:
710 value = 0;
711 break;
712 }
713
714 /* Set baudrate to 0x19[2..7] */
715 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
716 bTmp |= value << 2;
717 WriteReg(iobase, I_CF_H_1, bTmp);
718
719 /* Some dongles may need to be informed about speed changes. */
720 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
721
722 /* Set FIFO size to 64 */
723 SetFIFO(iobase, 64);
724
725 /* Enable IR */
726 WriteReg(iobase, I_ST_CT_0, 0x80);
727
728 // EnTXFIFOHalfLevelInt(iobase,ON);
729
730 /* Enable some interrupts so we can receive frames */
731 //EnAllInt(iobase,ON);
732
733 if (IsSIROn(iobase)) {
734 SIRFilter(iobase, ON);
735 SIRRecvAny(iobase, ON);
736 } else {
737 SIRFilter(iobase, OFF);
738 SIRRecvAny(iobase, OFF);
739 }
740
741 if (speed > 115200) {
742 /* Install FIR xmit handler */
743 dev->netdev_ops = &via_ircc_fir_ops;
744 via_ircc_dma_receive(self);
745 } else {
746 /* Install SIR xmit handler */
747 dev->netdev_ops = &via_ircc_sir_ops;
748 }
749 netif_wake_queue(dev);
750 }
751
752 /*
753 * Function via_ircc_hard_xmit (skb, dev)
754 *
755 * Transmit the frame!
756 *
757 */
758 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
759 struct net_device *dev)
760 {
761 struct via_ircc_cb *self;
762 unsigned long flags;
763 u16 iobase;
764 __u32 speed;
765
766 self = netdev_priv(dev);
767 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
768 iobase = self->io.fir_base;
769
770 netif_stop_queue(dev);
771 /* Check if we need to change the speed */
772 speed = irda_get_next_speed(skb);
773 if ((speed != self->io.speed) && (speed != -1)) {
774 /* Check for empty frame */
775 if (!skb->len) {
776 via_ircc_change_speed(self, speed);
777 dev->trans_start = jiffies;
778 dev_kfree_skb(skb);
779 return NETDEV_TX_OK;
780 } else
781 self->new_speed = speed;
782 }
783 InitCard(iobase);
784 CommonInit(iobase);
785 SIRFilter(iobase, ON);
786 SetSIR(iobase, ON);
787 CRC16(iobase, ON);
788 EnTXCRC(iobase, 0);
789 WriteReg(iobase, I_ST_CT_0, 0x00);
790
791 spin_lock_irqsave(&self->lock, flags);
792 self->tx_buff.data = self->tx_buff.head;
793 self->tx_buff.len =
794 async_wrap_skb(skb, self->tx_buff.data,
795 self->tx_buff.truesize);
796
797 dev->stats.tx_bytes += self->tx_buff.len;
798 /* Send this frame with old speed */
799 SetBaudRate(iobase, self->io.speed);
800 SetPulseWidth(iobase, 12);
801 SetSendPreambleCount(iobase, 0);
802 WriteReg(iobase, I_ST_CT_0, 0x80);
803
804 EnableTX(iobase, ON);
805 EnableRX(iobase, OFF);
806
807 ResetChip(iobase, 0);
808 ResetChip(iobase, 1);
809 ResetChip(iobase, 2);
810 ResetChip(iobase, 3);
811 ResetChip(iobase, 4);
812
813 EnAllInt(iobase, ON);
814 EnTXDMA(iobase, ON);
815 EnRXDMA(iobase, OFF);
816
817 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
818 DMA_TX_MODE);
819
820 SetSendByte(iobase, self->tx_buff.len);
821 RXStart(iobase, OFF);
822 TXStart(iobase, ON);
823
824 dev->trans_start = jiffies;
825 spin_unlock_irqrestore(&self->lock, flags);
826 dev_kfree_skb(skb);
827 return NETDEV_TX_OK;
828 }
829
830 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
831 struct net_device *dev)
832 {
833 struct via_ircc_cb *self;
834 u16 iobase;
835 __u32 speed;
836 unsigned long flags;
837
838 self = netdev_priv(dev);
839 iobase = self->io.fir_base;
840
841 if (self->st_fifo.len)
842 return NETDEV_TX_OK;
843 if (self->chip_id == 0x3076)
844 iodelay(1500);
845 else
846 udelay(1500);
847 netif_stop_queue(dev);
848 speed = irda_get_next_speed(skb);
849 if ((speed != self->io.speed) && (speed != -1)) {
850 if (!skb->len) {
851 via_ircc_change_speed(self, speed);
852 dev->trans_start = jiffies;
853 dev_kfree_skb(skb);
854 return NETDEV_TX_OK;
855 } else
856 self->new_speed = speed;
857 }
858 spin_lock_irqsave(&self->lock, flags);
859 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
860 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
861
862 self->tx_fifo.tail += skb->len;
863 dev->stats.tx_bytes += skb->len;
864 skb_copy_from_linear_data(skb,
865 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
866 self->tx_fifo.len++;
867 self->tx_fifo.free++;
868 //F01 if (self->tx_fifo.len == 1) {
869 via_ircc_dma_xmit(self, iobase);
870 //F01 }
871 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
872 dev->trans_start = jiffies;
873 dev_kfree_skb(skb);
874 spin_unlock_irqrestore(&self->lock, flags);
875 return NETDEV_TX_OK;
876
877 }
878
879 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
880 {
881 EnTXDMA(iobase, OFF);
882 self->io.direction = IO_XMIT;
883 EnPhys(iobase, ON);
884 EnableTX(iobase, ON);
885 EnableRX(iobase, OFF);
886 ResetChip(iobase, 0);
887 ResetChip(iobase, 1);
888 ResetChip(iobase, 2);
889 ResetChip(iobase, 3);
890 ResetChip(iobase, 4);
891 EnAllInt(iobase, ON);
892 EnTXDMA(iobase, ON);
893 EnRXDMA(iobase, OFF);
894 irda_setup_dma(self->io.dma,
895 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
896 self->tx_buff.head) + self->tx_buff_dma,
897 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
898 pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
899 __func__, self->tx_fifo.ptr,
900 self->tx_fifo.queue[self->tx_fifo.ptr].len,
901 self->tx_fifo.len);
902
903 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
904 RXStart(iobase, OFF);
905 TXStart(iobase, ON);
906 return 0;
907
908 }
909
910 /*
911 * Function via_ircc_dma_xmit_complete (self)
912 *
913 * The transfer of a frame in finished. This function will only be called
914 * by the interrupt handler
915 *
916 */
917 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
918 {
919 int iobase;
920 u8 Tx_status;
921
922 iobase = self->io.fir_base;
923 /* Disable DMA */
924 // DisableDmaChannel(self->io.dma);
925 /* Check for underrun! */
926 /* Clear bit, by writing 1 into it */
927 Tx_status = GetTXStatus(iobase);
928 if (Tx_status & 0x08) {
929 self->netdev->stats.tx_errors++;
930 self->netdev->stats.tx_fifo_errors++;
931 hwreset(self);
932 /* how to clear underrun? */
933 } else {
934 self->netdev->stats.tx_packets++;
935 ResetChip(iobase, 3);
936 ResetChip(iobase, 4);
937 }
938 /* Check if we need to change the speed */
939 if (self->new_speed) {
940 via_ircc_change_speed(self, self->new_speed);
941 self->new_speed = 0;
942 }
943
944 /* Finished with this frame, so prepare for next */
945 if (IsFIROn(iobase)) {
946 if (self->tx_fifo.len) {
947 self->tx_fifo.len--;
948 self->tx_fifo.ptr++;
949 }
950 }
951 pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
952 __func__,
953 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
954 /* F01_S
955 // Any frames to be sent back-to-back?
956 if (self->tx_fifo.len) {
957 // Not finished yet!
958 via_ircc_dma_xmit(self, iobase);
959 ret = FALSE;
960 } else {
961 F01_E*/
962 // Reset Tx FIFO info
963 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
964 self->tx_fifo.tail = self->tx_buff.head;
965 //F01 }
966
967 // Make sure we have room for more frames
968 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
969 // Not busy transmitting anymore
970 // Tell the network layer, that we can accept more frames
971 netif_wake_queue(self->netdev);
972 //F01 }
973 return TRUE;
974 }
975
976 /*
977 * Function via_ircc_dma_receive (self)
978 *
979 * Set configuration for receive a frame.
980 *
981 */
982 static int via_ircc_dma_receive(struct via_ircc_cb *self)
983 {
984 int iobase;
985
986 iobase = self->io.fir_base;
987
988 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
989 self->tx_fifo.tail = self->tx_buff.head;
990 self->RxDataReady = 0;
991 self->io.direction = IO_RECV;
992 self->rx_buff.data = self->rx_buff.head;
993 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
994 self->st_fifo.tail = self->st_fifo.head = 0;
995
996 EnPhys(iobase, ON);
997 EnableTX(iobase, OFF);
998 EnableRX(iobase, ON);
999
1000 ResetChip(iobase, 0);
1001 ResetChip(iobase, 1);
1002 ResetChip(iobase, 2);
1003 ResetChip(iobase, 3);
1004 ResetChip(iobase, 4);
1005
1006 EnAllInt(iobase, ON);
1007 EnTXDMA(iobase, OFF);
1008 EnRXDMA(iobase, ON);
1009 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1010 self->rx_buff.truesize, DMA_RX_MODE);
1011 TXStart(iobase, OFF);
1012 RXStart(iobase, ON);
1013
1014 return 0;
1015 }
1016
1017 /*
1018 * Function via_ircc_dma_receive_complete (self)
1019 *
1020 * Controller Finished with receiving frames,
1021 * and this routine is call by ISR
1022 *
1023 */
1024 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1025 int iobase)
1026 {
1027 struct st_fifo *st_fifo;
1028 struct sk_buff *skb;
1029 int len, i;
1030 u8 status = 0;
1031
1032 iobase = self->io.fir_base;
1033 st_fifo = &self->st_fifo;
1034
1035 if (self->io.speed < 4000000) { //Speed below FIR
1036 len = GetRecvByte(iobase, self);
1037 skb = dev_alloc_skb(len + 1);
1038 if (skb == NULL)
1039 return FALSE;
1040 // Make sure IP header gets aligned
1041 skb_reserve(skb, 1);
1042 skb_put(skb, len - 2);
1043 if (self->chip_id == 0x3076) {
1044 for (i = 0; i < len - 2; i++)
1045 skb->data[i] = self->rx_buff.data[i * 2];
1046 } else {
1047 if (self->chip_id == 0x3096) {
1048 for (i = 0; i < len - 2; i++)
1049 skb->data[i] =
1050 self->rx_buff.data[i];
1051 }
1052 }
1053 // Move to next frame
1054 self->rx_buff.data += len;
1055 self->netdev->stats.rx_bytes += len;
1056 self->netdev->stats.rx_packets++;
1057 skb->dev = self->netdev;
1058 skb_reset_mac_header(skb);
1059 skb->protocol = htons(ETH_P_IRDA);
1060 netif_rx(skb);
1061 return TRUE;
1062 }
1063
1064 else { //FIR mode
1065 len = GetRecvByte(iobase, self);
1066 if (len == 0)
1067 return TRUE; //interrupt only, data maybe move by RxT
1068 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1069 pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n",
1070 __func__, len, RxCurCount(iobase, self),
1071 self->RxLastCount);
1072 hwreset(self);
1073 return FALSE;
1074 }
1075 pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1076 __func__,
1077 st_fifo->len, len - 4, RxCurCount(iobase, self));
1078
1079 st_fifo->entries[st_fifo->tail].status = status;
1080 st_fifo->entries[st_fifo->tail].len = len;
1081 st_fifo->pending_bytes += len;
1082 st_fifo->tail++;
1083 st_fifo->len++;
1084 if (st_fifo->tail > MAX_RX_WINDOW)
1085 st_fifo->tail = 0;
1086 self->RxDataReady = 0;
1087
1088 // It maybe have MAX_RX_WINDOW package receive by
1089 // receive_complete before Timer IRQ
1090 /* F01_S
1091 if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1092 RXStart(iobase,ON);
1093 SetTimer(iobase,4);
1094 }
1095 else {
1096 F01_E */
1097 EnableRX(iobase, OFF);
1098 EnRXDMA(iobase, OFF);
1099 RXStart(iobase, OFF);
1100 //F01_S
1101 // Put this entry back in fifo
1102 if (st_fifo->head > MAX_RX_WINDOW)
1103 st_fifo->head = 0;
1104 status = st_fifo->entries[st_fifo->head].status;
1105 len = st_fifo->entries[st_fifo->head].len;
1106 st_fifo->head++;
1107 st_fifo->len--;
1108
1109 skb = dev_alloc_skb(len + 1 - 4);
1110 /*
1111 * if frame size, data ptr, or skb ptr are wrong, then get next
1112 * entry.
1113 */
1114 if ((skb == NULL) || (skb->data == NULL) ||
1115 (self->rx_buff.data == NULL) || (len < 6)) {
1116 self->netdev->stats.rx_dropped++;
1117 kfree_skb(skb);
1118 return TRUE;
1119 }
1120 skb_reserve(skb, 1);
1121 skb_put(skb, len - 4);
1122
1123 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1124 pr_debug("%s(): len=%x.rx_buff=%p\n", __func__,
1125 len - 4, self->rx_buff.data);
1126
1127 // Move to next frame
1128 self->rx_buff.data += len;
1129 self->netdev->stats.rx_bytes += len;
1130 self->netdev->stats.rx_packets++;
1131 skb->dev = self->netdev;
1132 skb_reset_mac_header(skb);
1133 skb->protocol = htons(ETH_P_IRDA);
1134 netif_rx(skb);
1135
1136 //F01_E
1137 } //FIR
1138 return TRUE;
1139
1140 }
1141
1142 /*
1143 * if frame is received , but no INT ,then use this routine to upload frame.
1144 */
1145 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1146 {
1147 struct sk_buff *skb;
1148 int len;
1149 struct st_fifo *st_fifo;
1150 st_fifo = &self->st_fifo;
1151
1152 len = GetRecvByte(iobase, self);
1153
1154 pr_debug("%s(): len=%x\n", __func__, len);
1155
1156 if ((len - 4) < 2) {
1157 self->netdev->stats.rx_dropped++;
1158 return FALSE;
1159 }
1160
1161 skb = dev_alloc_skb(len + 1);
1162 if (skb == NULL) {
1163 self->netdev->stats.rx_dropped++;
1164 return FALSE;
1165 }
1166 skb_reserve(skb, 1);
1167 skb_put(skb, len - 4 + 1);
1168 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1169 st_fifo->tail++;
1170 st_fifo->len++;
1171 if (st_fifo->tail > MAX_RX_WINDOW)
1172 st_fifo->tail = 0;
1173 // Move to next frame
1174 self->rx_buff.data += len;
1175 self->netdev->stats.rx_bytes += len;
1176 self->netdev->stats.rx_packets++;
1177 skb->dev = self->netdev;
1178 skb_reset_mac_header(skb);
1179 skb->protocol = htons(ETH_P_IRDA);
1180 netif_rx(skb);
1181 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1182 RXStart(iobase, ON);
1183 } else {
1184 EnableRX(iobase, OFF);
1185 EnRXDMA(iobase, OFF);
1186 RXStart(iobase, OFF);
1187 }
1188 return TRUE;
1189 }
1190
1191 /*
1192 * Implement back to back receive , use this routine to upload data.
1193 */
1194
1195 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1196 {
1197 struct st_fifo *st_fifo;
1198 struct sk_buff *skb;
1199 int len;
1200 u8 status;
1201
1202 st_fifo = &self->st_fifo;
1203
1204 if (CkRxRecv(iobase, self)) {
1205 // if still receiving ,then return ,don't upload frame
1206 self->RetryCount = 0;
1207 SetTimer(iobase, 20);
1208 self->RxDataReady++;
1209 return FALSE;
1210 } else
1211 self->RetryCount++;
1212
1213 if ((self->RetryCount >= 1) ||
1214 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1215 (st_fifo->len >= (MAX_RX_WINDOW))) {
1216 while (st_fifo->len > 0) { //upload frame
1217 // Put this entry back in fifo
1218 if (st_fifo->head > MAX_RX_WINDOW)
1219 st_fifo->head = 0;
1220 status = st_fifo->entries[st_fifo->head].status;
1221 len = st_fifo->entries[st_fifo->head].len;
1222 st_fifo->head++;
1223 st_fifo->len--;
1224
1225 skb = dev_alloc_skb(len + 1 - 4);
1226 /*
1227 * if frame size, data ptr, or skb ptr are wrong,
1228 * then get next entry.
1229 */
1230 if ((skb == NULL) || (skb->data == NULL) ||
1231 (self->rx_buff.data == NULL) || (len < 6)) {
1232 self->netdev->stats.rx_dropped++;
1233 continue;
1234 }
1235 skb_reserve(skb, 1);
1236 skb_put(skb, len - 4);
1237 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1238
1239 pr_debug("%s(): len=%x.head=%x\n", __func__,
1240 len - 4, st_fifo->head);
1241
1242 // Move to next frame
1243 self->rx_buff.data += len;
1244 self->netdev->stats.rx_bytes += len;
1245 self->netdev->stats.rx_packets++;
1246 skb->dev = self->netdev;
1247 skb_reset_mac_header(skb);
1248 skb->protocol = htons(ETH_P_IRDA);
1249 netif_rx(skb);
1250 } //while
1251 self->RetryCount = 0;
1252
1253 pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1254 __func__, GetHostStatus(iobase), GetRXStatus(iobase));
1255
1256 /*
1257 * if frame is receive complete at this routine ,then upload
1258 * frame.
1259 */
1260 if ((GetRXStatus(iobase) & 0x10) &&
1261 (RxCurCount(iobase, self) != self->RxLastCount)) {
1262 upload_rxdata(self, iobase);
1263 if (irda_device_txqueue_empty(self->netdev))
1264 via_ircc_dma_receive(self);
1265 }
1266 } // timer detect complete
1267 else
1268 SetTimer(iobase, 4);
1269 return TRUE;
1270
1271 }
1272
1273
1274
1275 /*
1276 * Function via_ircc_interrupt (irq, dev_id)
1277 *
1278 * An interrupt from the chip has arrived. Time to do some work
1279 *
1280 */
1281 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1282 {
1283 struct net_device *dev = dev_id;
1284 struct via_ircc_cb *self = netdev_priv(dev);
1285 int iobase;
1286 u8 iHostIntType, iRxIntType, iTxIntType;
1287
1288 iobase = self->io.fir_base;
1289 spin_lock(&self->lock);
1290 iHostIntType = GetHostStatus(iobase);
1291
1292 pr_debug("%s(): iHostIntType %02x: %s %s %s %02x\n",
1293 __func__, iHostIntType,
1294 (iHostIntType & 0x40) ? "Timer" : "",
1295 (iHostIntType & 0x20) ? "Tx" : "",
1296 (iHostIntType & 0x10) ? "Rx" : "",
1297 (iHostIntType & 0x0e) >> 1);
1298
1299 if ((iHostIntType & 0x40) != 0) { //Timer Event
1300 self->EventFlag.TimeOut++;
1301 ClearTimerInt(iobase, 1);
1302 if (self->io.direction == IO_XMIT) {
1303 via_ircc_dma_xmit(self, iobase);
1304 }
1305 if (self->io.direction == IO_RECV) {
1306 /*
1307 * frame ready hold too long, must reset.
1308 */
1309 if (self->RxDataReady > 30) {
1310 hwreset(self);
1311 if (irda_device_txqueue_empty(self->netdev)) {
1312 via_ircc_dma_receive(self);
1313 }
1314 } else { // call this to upload frame.
1315 RxTimerHandler(self, iobase);
1316 }
1317 } //RECV
1318 } //Timer Event
1319 if ((iHostIntType & 0x20) != 0) { //Tx Event
1320 iTxIntType = GetTXStatus(iobase);
1321
1322 pr_debug("%s(): iTxIntType %02x: %s %s %s %s\n",
1323 __func__, iTxIntType,
1324 (iTxIntType & 0x08) ? "FIFO underr." : "",
1325 (iTxIntType & 0x04) ? "EOM" : "",
1326 (iTxIntType & 0x02) ? "FIFO ready" : "",
1327 (iTxIntType & 0x01) ? "Early EOM" : "");
1328
1329 if (iTxIntType & 0x4) {
1330 self->EventFlag.EOMessage++; // read and will auto clean
1331 if (via_ircc_dma_xmit_complete(self)) {
1332 if (irda_device_txqueue_empty
1333 (self->netdev)) {
1334 via_ircc_dma_receive(self);
1335 }
1336 } else {
1337 self->EventFlag.Unknown++;
1338 }
1339 } //EOP
1340 } //Tx Event
1341 //----------------------------------------
1342 if ((iHostIntType & 0x10) != 0) { //Rx Event
1343 /* Check if DMA has finished */
1344 iRxIntType = GetRXStatus(iobase);
1345
1346 pr_debug("%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1347 __func__, iRxIntType,
1348 (iRxIntType & 0x80) ? "PHY err." : "",
1349 (iRxIntType & 0x40) ? "CRC err" : "",
1350 (iRxIntType & 0x20) ? "FIFO overr." : "",
1351 (iRxIntType & 0x10) ? "EOF" : "",
1352 (iRxIntType & 0x08) ? "RxData" : "",
1353 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1354 (iRxIntType & 0x01) ? "SIR bad" : "");
1355 if (!iRxIntType)
1356 pr_debug("%s(): RxIRQ =0\n", __func__);
1357
1358 if (iRxIntType & 0x10) {
1359 if (via_ircc_dma_receive_complete(self, iobase)) {
1360 //F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1361 via_ircc_dma_receive(self);
1362 }
1363 } // No ERR
1364 else { //ERR
1365 pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1366 __func__, iRxIntType, iHostIntType,
1367 RxCurCount(iobase, self), self->RxLastCount);
1368
1369 if (iRxIntType & 0x20) { //FIFO OverRun ERR
1370 ResetChip(iobase, 0);
1371 ResetChip(iobase, 1);
1372 } else { //PHY,CRC ERR
1373
1374 if (iRxIntType != 0x08)
1375 hwreset(self); //F01
1376 }
1377 via_ircc_dma_receive(self);
1378 } //ERR
1379
1380 } //Rx Event
1381 spin_unlock(&self->lock);
1382 return IRQ_RETVAL(iHostIntType);
1383 }
1384
1385 static void hwreset(struct via_ircc_cb *self)
1386 {
1387 int iobase;
1388 iobase = self->io.fir_base;
1389
1390 ResetChip(iobase, 5);
1391 EnableDMA(iobase, OFF);
1392 EnableTX(iobase, OFF);
1393 EnableRX(iobase, OFF);
1394 EnRXDMA(iobase, OFF);
1395 EnTXDMA(iobase, OFF);
1396 RXStart(iobase, OFF);
1397 TXStart(iobase, OFF);
1398 InitCard(iobase);
1399 CommonInit(iobase);
1400 SIRFilter(iobase, ON);
1401 SetSIR(iobase, ON);
1402 CRC16(iobase, ON);
1403 EnTXCRC(iobase, 0);
1404 WriteReg(iobase, I_ST_CT_0, 0x00);
1405 SetBaudRate(iobase, 9600);
1406 SetPulseWidth(iobase, 12);
1407 SetSendPreambleCount(iobase, 0);
1408 WriteReg(iobase, I_ST_CT_0, 0x80);
1409
1410 /* Restore speed. */
1411 via_ircc_change_speed(self, self->io.speed);
1412
1413 self->st_fifo.len = 0;
1414 }
1415
1416 /*
1417 * Function via_ircc_is_receiving (self)
1418 *
1419 * Return TRUE is we are currently receiving a frame
1420 *
1421 */
1422 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1423 {
1424 int status = FALSE;
1425 int iobase;
1426
1427 IRDA_ASSERT(self != NULL, return FALSE;);
1428
1429 iobase = self->io.fir_base;
1430 if (CkRxRecv(iobase, self))
1431 status = TRUE;
1432
1433 pr_debug("%s(): status=%x....\n", __func__, status);
1434
1435 return status;
1436 }
1437
1438
1439 /*
1440 * Function via_ircc_net_open (dev)
1441 *
1442 * Start the device
1443 *
1444 */
1445 static int via_ircc_net_open(struct net_device *dev)
1446 {
1447 struct via_ircc_cb *self;
1448 int iobase;
1449 char hwname[32];
1450
1451 IRDA_ASSERT(dev != NULL, return -1;);
1452 self = netdev_priv(dev);
1453 dev->stats.rx_packets = 0;
1454 IRDA_ASSERT(self != NULL, return 0;);
1455 iobase = self->io.fir_base;
1456 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1457 net_warn_ratelimited("%s, unable to allocate irq=%d\n",
1458 driver_name, self->io.irq);
1459 return -EAGAIN;
1460 }
1461 /*
1462 * Always allocate the DMA channel after the IRQ, and clean up on
1463 * failure.
1464 */
1465 if (request_dma(self->io.dma, dev->name)) {
1466 net_warn_ratelimited("%s, unable to allocate dma=%d\n",
1467 driver_name, self->io.dma);
1468 free_irq(self->io.irq, dev);
1469 return -EAGAIN;
1470 }
1471 if (self->io.dma2 != self->io.dma) {
1472 if (request_dma(self->io.dma2, dev->name)) {
1473 net_warn_ratelimited("%s, unable to allocate dma2=%d\n",
1474 driver_name, self->io.dma2);
1475 free_irq(self->io.irq, dev);
1476 free_dma(self->io.dma);
1477 return -EAGAIN;
1478 }
1479 }
1480
1481
1482 /* turn on interrupts */
1483 EnAllInt(iobase, ON);
1484 EnInternalLoop(iobase, OFF);
1485 EnExternalLoop(iobase, OFF);
1486
1487 /* */
1488 via_ircc_dma_receive(self);
1489
1490 /* Ready to play! */
1491 netif_start_queue(dev);
1492
1493 /*
1494 * Open new IrLAP layer instance, now that everything should be
1495 * initialized properly
1496 */
1497 sprintf(hwname, "VIA @ 0x%x", iobase);
1498 self->irlap = irlap_open(dev, &self->qos, hwname);
1499
1500 self->RxLastCount = 0;
1501
1502 return 0;
1503 }
1504
1505 /*
1506 * Function via_ircc_net_close (dev)
1507 *
1508 * Stop the device
1509 *
1510 */
1511 static int via_ircc_net_close(struct net_device *dev)
1512 {
1513 struct via_ircc_cb *self;
1514 int iobase;
1515
1516 IRDA_ASSERT(dev != NULL, return -1;);
1517 self = netdev_priv(dev);
1518 IRDA_ASSERT(self != NULL, return 0;);
1519
1520 /* Stop device */
1521 netif_stop_queue(dev);
1522 /* Stop and remove instance of IrLAP */
1523 if (self->irlap)
1524 irlap_close(self->irlap);
1525 self->irlap = NULL;
1526 iobase = self->io.fir_base;
1527 EnTXDMA(iobase, OFF);
1528 EnRXDMA(iobase, OFF);
1529 DisableDmaChannel(self->io.dma);
1530
1531 /* Disable interrupts */
1532 EnAllInt(iobase, OFF);
1533 free_irq(self->io.irq, dev);
1534 free_dma(self->io.dma);
1535 if (self->io.dma2 != self->io.dma)
1536 free_dma(self->io.dma2);
1537
1538 return 0;
1539 }
1540
1541 /*
1542 * Function via_ircc_net_ioctl (dev, rq, cmd)
1543 *
1544 * Process IOCTL commands for this device
1545 *
1546 */
1547 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1548 int cmd)
1549 {
1550 struct if_irda_req *irq = (struct if_irda_req *) rq;
1551 struct via_ircc_cb *self;
1552 unsigned long flags;
1553 int ret = 0;
1554
1555 IRDA_ASSERT(dev != NULL, return -1;);
1556 self = netdev_priv(dev);
1557 IRDA_ASSERT(self != NULL, return -1;);
1558 pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1559 cmd);
1560 /* Disable interrupts & save flags */
1561 spin_lock_irqsave(&self->lock, flags);
1562 switch (cmd) {
1563 case SIOCSBANDWIDTH: /* Set bandwidth */
1564 if (!capable(CAP_NET_ADMIN)) {
1565 ret = -EPERM;
1566 goto out;
1567 }
1568 via_ircc_change_speed(self, irq->ifr_baudrate);
1569 break;
1570 case SIOCSMEDIABUSY: /* Set media busy */
1571 if (!capable(CAP_NET_ADMIN)) {
1572 ret = -EPERM;
1573 goto out;
1574 }
1575 irda_device_set_media_busy(self->netdev, TRUE);
1576 break;
1577 case SIOCGRECEIVING: /* Check if we are receiving right now */
1578 irq->ifr_receiving = via_ircc_is_receiving(self);
1579 break;
1580 default:
1581 ret = -EOPNOTSUPP;
1582 }
1583 out:
1584 spin_unlock_irqrestore(&self->lock, flags);
1585 return ret;
1586 }
1587
1588 MODULE_AUTHOR("VIA Technologies,inc");
1589 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1590 MODULE_LICENSE("GPL");
1591
1592 module_init(via_ircc_init);
1593 module_exit(via_ircc_cleanup);
This page took 0.073253 seconds and 5 git commands to generate.