Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[deliverable/linux.git] / drivers / net / irda / via-ircc.c
1 /********************************************************************
2 Filename: via-ircc.c
3 Version: 1.0
4 Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author: VIA Technologies,inc
6 Date : 08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25 Comment :
26 jul/09/2002 : only implement two kind of dongle currently.
27 Oct/02/2002 : work on VT8231 and VT8233 .
28 Aug/06/2003 : change driver format to pci driver .
29
30 2004-02-16: <sda@bdit.de>
31 - Removed unneeded 'legacy' pci stuff.
32 - Make sure SIR mode is set (hw_init()) before calling mode-dependant stuff.
33 - On speed change from core, don't send SIR frame with new speed.
34 Use current speed and change speeds later.
35 - Make module-param dongle_id actually work.
36 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
37 Tested with home-grown PCB on EPIA boards.
38 - Code cleanup.
39
40 ********************************************************************/
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/ioport.h>
47 #include <linux/delay.h>
48 #include <linux/slab.h>
49 #include <linux/init.h>
50 #include <linux/rtnetlink.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53
54 #include <asm/io.h>
55 #include <asm/dma.h>
56 #include <asm/byteorder.h>
57
58 #include <linux/pm.h>
59
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda.h>
62 #include <net/irda/irda_device.h>
63
64 #include "via-ircc.h"
65
66 #define VIA_MODULE_NAME "via-ircc"
67 #define CHIP_IO_EXTENT 0x40
68
69 static char *driver_name = VIA_MODULE_NAME;
70
71 /* Module parameters */
72 static int qos_mtt_bits = 0x07; /* 1 ms or more */
73 static int dongle_id = 0; /* default: probe */
74
75 /* We can't guess the type of connected dongle, user *must* supply it. */
76 module_param(dongle_id, int, 0);
77
78 /* FIXME : we should not need this, because instances should be automatically
79 * managed by the PCI layer. Especially that we seem to only be using the
80 * first entry. Jean II */
81 /* Max 4 instances for now */
82 static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
83
84 /* Some prototypes */
85 static int via_ircc_open(int i, chipio_t * info, unsigned int id);
86 static int via_ircc_close(struct via_ircc_cb *self);
87 static int via_ircc_dma_receive(struct via_ircc_cb *self);
88 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
89 int iobase);
90 static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
91 struct net_device *dev);
92 static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
93 struct net_device *dev);
94 static void via_hw_init(struct via_ircc_cb *self);
95 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
96 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
97 static int via_ircc_is_receiving(struct via_ircc_cb *self);
98 static int via_ircc_read_dongle_id(int iobase);
99
100 static int via_ircc_net_open(struct net_device *dev);
101 static int via_ircc_net_close(struct net_device *dev);
102 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
103 int cmd);
104 static void via_ircc_change_dongle_speed(int iobase, int speed,
105 int dongle_id);
106 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
107 static void hwreset(struct via_ircc_cb *self);
108 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
109 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
110 static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
111 static void __devexit via_remove_one (struct pci_dev *pdev);
112
113 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
114 static void iodelay(int udelay)
115 {
116 u8 data;
117 int i;
118
119 for (i = 0; i < udelay; i++) {
120 data = inb(0x80);
121 }
122 }
123
124 static struct pci_device_id via_pci_tbl[] = {
125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
128 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
129 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
130 { 0, }
131 };
132
133 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
134
135
136 static struct pci_driver via_driver = {
137 .name = VIA_MODULE_NAME,
138 .id_table = via_pci_tbl,
139 .probe = via_init_one,
140 .remove = __devexit_p(via_remove_one),
141 };
142
143
144 /*
145 * Function via_ircc_init ()
146 *
147 * Initialize chip. Just find out chip type and resource.
148 */
149 static int __init via_ircc_init(void)
150 {
151 int rc;
152
153 IRDA_DEBUG(3, "%s()\n", __func__);
154
155 rc = pci_register_driver(&via_driver);
156 if (rc < 0) {
157 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
158 __func__, rc);
159 return -ENODEV;
160 }
161 return 0;
162 }
163
164 static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
165 {
166 int rc;
167 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
168 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
169 chipio_t info;
170
171 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
172
173 rc = pci_enable_device (pcidev);
174 if (rc) {
175 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
176 return -ENODEV;
177 }
178
179 // South Bridge exist
180 if ( ReadLPCReg(0x20) != 0x3C )
181 Chipset=0x3096;
182 else
183 Chipset=0x3076;
184
185 if (Chipset==0x3076) {
186 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
187
188 WriteLPCReg(7,0x0c );
189 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
190 if((temp&0x01)==1) { // BIOS close or no FIR
191 WriteLPCReg(0x1d, 0x82 );
192 WriteLPCReg(0x23,0x18);
193 temp=ReadLPCReg(0xF0);
194 if((temp&0x01)==0) {
195 temp=(ReadLPCReg(0x74)&0x03); //DMA
196 FirDRQ0=temp + 4;
197 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
198 FirDRQ1=temp + 4;
199 } else {
200 temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
201 FirDRQ0=temp + 4;
202 FirDRQ1=FirDRQ0;
203 }
204 FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
205 FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
206 FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
207 FirIOBase=FirIOBase ;
208 info.fir_base=FirIOBase;
209 info.irq=FirIRQ;
210 info.dma=FirDRQ1;
211 info.dma2=FirDRQ0;
212 pci_read_config_byte(pcidev,0x40,&bTmp);
213 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
214 pci_read_config_byte(pcidev,0x42,&bTmp);
215 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
216 pci_write_config_byte(pcidev,0x5a,0xc0);
217 WriteLPCReg(0x28, 0x70 );
218 if (via_ircc_open(0, &info,0x3076) == 0)
219 rc=0;
220 } else
221 rc = -ENODEV; //IR not turn on
222 } else { //Not VT1211
223 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
224
225 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
226 if((bTmp&0x01)==1) { // BIOS enable FIR
227 //Enable Double DMA clock
228 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
229 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
230 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
231 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
232 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
233 pci_write_config_byte(pcidev,0x44,0x4e);
234 //---------- read configuration from Function0 of south bridge
235 if((bTmp&0x02)==0) {
236 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
237 FirDRQ0 = (bTmp1 & 0x30) >> 4;
238 pci_read_config_byte(pcidev,0x44,&bTmp1);
239 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
240 } else {
241 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
242 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
243 FirDRQ1=0;
244 }
245 pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
246 FirIRQ = bTmp1 & 0x0f;
247
248 pci_read_config_byte(pcidev,0x69,&bTmp);
249 FirIOBase = bTmp << 8;//hight byte
250 pci_read_config_byte(pcidev,0x68,&bTmp);
251 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
252 //-------------------------
253 info.fir_base=FirIOBase;
254 info.irq=FirIRQ;
255 info.dma=FirDRQ1;
256 info.dma2=FirDRQ0;
257 if (via_ircc_open(0, &info,0x3096) == 0)
258 rc=0;
259 } else
260 rc = -ENODEV; //IR not turn on !!!!!
261 }//Not VT1211
262
263 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
264 return rc;
265 }
266
267 /*
268 * Function via_ircc_clean ()
269 *
270 * Close all configured chips
271 *
272 */
273 static void via_ircc_clean(void)
274 {
275 int i;
276
277 IRDA_DEBUG(3, "%s()\n", __func__);
278
279 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
280 if (dev_self[i])
281 via_ircc_close(dev_self[i]);
282 }
283 }
284
285 static void __devexit via_remove_one (struct pci_dev *pdev)
286 {
287 IRDA_DEBUG(3, "%s()\n", __func__);
288
289 /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
290 * to get our driver instance and call directly via_ircc_close().
291 * See vlsi_ir for details...
292 * Jean II */
293 via_ircc_clean();
294
295 /* FIXME : This should be in via_ircc_close(), because here we may
296 * theoritically disable still configured devices :-( - Jean II */
297 pci_disable_device(pdev);
298 }
299
300 static void __exit via_ircc_cleanup(void)
301 {
302 IRDA_DEBUG(3, "%s()\n", __func__);
303
304 /* FIXME : This should be redundant, as pci_unregister_driver()
305 * should call via_remove_one() on each device.
306 * Jean II */
307 via_ircc_clean();
308
309 /* Cleanup all instances of the driver */
310 pci_unregister_driver (&via_driver);
311 }
312
313 /*
314 * Function via_ircc_open (iobase, irq)
315 *
316 * Open driver instance
317 *
318 */
319 static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
320 {
321 struct net_device *dev;
322 struct via_ircc_cb *self;
323 int err;
324
325 IRDA_DEBUG(3, "%s()\n", __func__);
326
327 if (i >= ARRAY_SIZE(dev_self))
328 return -ENOMEM;
329
330 /* Allocate new instance of the driver */
331 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
332 if (dev == NULL)
333 return -ENOMEM;
334
335 self = netdev_priv(dev);
336 self->netdev = dev;
337 spin_lock_init(&self->lock);
338
339 /* FIXME : We should store our driver instance in the PCI layer,
340 * using pci_set_drvdata(), not in this array.
341 * See vlsi_ir for details... - Jean II */
342 /* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */
343 /* Need to store self somewhere */
344 dev_self[i] = self;
345 self->index = i;
346 /* Initialize Resource */
347 self->io.cfg_base = info->cfg_base;
348 self->io.fir_base = info->fir_base;
349 self->io.irq = info->irq;
350 self->io.fir_ext = CHIP_IO_EXTENT;
351 self->io.dma = info->dma;
352 self->io.dma2 = info->dma2;
353 self->io.fifo_size = 32;
354 self->chip_id = id;
355 self->st_fifo.len = 0;
356 self->RxDataReady = 0;
357
358 /* Reserve the ioports that we need */
359 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
360 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
361 __func__, self->io.fir_base);
362 err = -ENODEV;
363 goto err_out1;
364 }
365
366 /* Initialize QoS for this device */
367 irda_init_max_qos_capabilies(&self->qos);
368
369 /* Check if user has supplied the dongle id or not */
370 if (!dongle_id)
371 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
372 self->io.dongle_id = dongle_id;
373
374 /* The only value we must override it the baudrate */
375 /* Maximum speeds and capabilities are dongle-dependant. */
376 switch( self->io.dongle_id ){
377 case 0x0d:
378 self->qos.baud_rate.bits =
379 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
380 IR_576000 | IR_1152000 | (IR_4000000 << 8);
381 break;
382 default:
383 self->qos.baud_rate.bits =
384 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
385 break;
386 }
387
388 /* Following was used for testing:
389 *
390 * self->qos.baud_rate.bits = IR_9600;
391 *
392 * Is is no good, as it prohibits (error-prone) speed-changes.
393 */
394
395 self->qos.min_turn_time.bits = qos_mtt_bits;
396 irda_qos_bits_to_value(&self->qos);
397
398 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
399 self->rx_buff.truesize = 14384 + 2048;
400 self->tx_buff.truesize = 14384 + 2048;
401
402 /* Allocate memory if needed */
403 self->rx_buff.head =
404 dma_alloc_coherent(NULL, self->rx_buff.truesize,
405 &self->rx_buff_dma, GFP_KERNEL);
406 if (self->rx_buff.head == NULL) {
407 err = -ENOMEM;
408 goto err_out2;
409 }
410 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
411
412 self->tx_buff.head =
413 dma_alloc_coherent(NULL, self->tx_buff.truesize,
414 &self->tx_buff_dma, GFP_KERNEL);
415 if (self->tx_buff.head == NULL) {
416 err = -ENOMEM;
417 goto err_out3;
418 }
419 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
420
421 self->rx_buff.in_frame = FALSE;
422 self->rx_buff.state = OUTSIDE_FRAME;
423 self->tx_buff.data = self->tx_buff.head;
424 self->rx_buff.data = self->rx_buff.head;
425
426 /* Reset Tx queue info */
427 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
428 self->tx_fifo.tail = self->tx_buff.head;
429
430 /* Override the network functions we need to use */
431 dev->hard_start_xmit = via_ircc_hard_xmit_sir;
432 dev->open = via_ircc_net_open;
433 dev->stop = via_ircc_net_close;
434 dev->do_ioctl = via_ircc_net_ioctl;
435
436 err = register_netdev(dev);
437 if (err)
438 goto err_out4;
439
440 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
441
442 /* Initialise the hardware..
443 */
444 self->io.speed = 9600;
445 via_hw_init(self);
446 return 0;
447 err_out4:
448 dma_free_coherent(NULL, self->tx_buff.truesize,
449 self->tx_buff.head, self->tx_buff_dma);
450 err_out3:
451 dma_free_coherent(NULL, self->rx_buff.truesize,
452 self->rx_buff.head, self->rx_buff_dma);
453 err_out2:
454 release_region(self->io.fir_base, self->io.fir_ext);
455 err_out1:
456 free_netdev(dev);
457 dev_self[i] = NULL;
458 return err;
459 }
460
461 /*
462 * Function via_ircc_close (self)
463 *
464 * Close driver instance
465 *
466 */
467 static int via_ircc_close(struct via_ircc_cb *self)
468 {
469 int iobase;
470
471 IRDA_DEBUG(3, "%s()\n", __func__);
472
473 IRDA_ASSERT(self != NULL, return -1;);
474
475 iobase = self->io.fir_base;
476
477 ResetChip(iobase, 5); //hardware reset.
478 /* Remove netdevice */
479 unregister_netdev(self->netdev);
480
481 /* Release the PORT that this driver is using */
482 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
483 __func__, self->io.fir_base);
484 release_region(self->io.fir_base, self->io.fir_ext);
485 if (self->tx_buff.head)
486 dma_free_coherent(NULL, self->tx_buff.truesize,
487 self->tx_buff.head, self->tx_buff_dma);
488 if (self->rx_buff.head)
489 dma_free_coherent(NULL, self->rx_buff.truesize,
490 self->rx_buff.head, self->rx_buff_dma);
491 dev_self[self->index] = NULL;
492
493 free_netdev(self->netdev);
494
495 return 0;
496 }
497
498 /*
499 * Function via_hw_init(self)
500 *
501 * Returns non-negative on success.
502 *
503 * Formerly via_ircc_setup
504 */
505 static void via_hw_init(struct via_ircc_cb *self)
506 {
507 int iobase = self->io.fir_base;
508
509 IRDA_DEBUG(3, "%s()\n", __func__);
510
511 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
512 // FIFO Init
513 EnRXFIFOReadyInt(iobase, OFF);
514 EnRXFIFOHalfLevelInt(iobase, OFF);
515 EnTXFIFOHalfLevelInt(iobase, OFF);
516 EnTXFIFOUnderrunEOMInt(iobase, ON);
517 EnTXFIFOReadyInt(iobase, OFF);
518 InvertTX(iobase, OFF);
519 InvertRX(iobase, OFF);
520
521 if (ReadLPCReg(0x20) == 0x3c)
522 WriteLPCReg(0xF0, 0); // for VT1211
523 /* Int Init */
524 EnRXSpecInt(iobase, ON);
525
526 /* The following is basically hwreset */
527 /* If this is the case, why not just call hwreset() ? Jean II */
528 ResetChip(iobase, 5);
529 EnableDMA(iobase, OFF);
530 EnableTX(iobase, OFF);
531 EnableRX(iobase, OFF);
532 EnRXDMA(iobase, OFF);
533 EnTXDMA(iobase, OFF);
534 RXStart(iobase, OFF);
535 TXStart(iobase, OFF);
536 InitCard(iobase);
537 CommonInit(iobase);
538 SIRFilter(iobase, ON);
539 SetSIR(iobase, ON);
540 CRC16(iobase, ON);
541 EnTXCRC(iobase, 0);
542 WriteReg(iobase, I_ST_CT_0, 0x00);
543 SetBaudRate(iobase, 9600);
544 SetPulseWidth(iobase, 12);
545 SetSendPreambleCount(iobase, 0);
546
547 self->io.speed = 9600;
548 self->st_fifo.len = 0;
549
550 via_ircc_change_dongle_speed(iobase, self->io.speed,
551 self->io.dongle_id);
552
553 WriteReg(iobase, I_ST_CT_0, 0x80);
554 }
555
556 /*
557 * Function via_ircc_read_dongle_id (void)
558 *
559 */
560 static int via_ircc_read_dongle_id(int iobase)
561 {
562 int dongle_id = 9; /* Default to IBM */
563
564 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
565 return dongle_id;
566 }
567
568 /*
569 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
570 * Change speed of the attach dongle
571 * only implement two type of dongle currently.
572 */
573 static void via_ircc_change_dongle_speed(int iobase, int speed,
574 int dongle_id)
575 {
576 u8 mode = 0;
577
578 /* speed is unused, as we use IsSIROn()/IsMIROn() */
579 speed = speed;
580
581 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
582 __func__, speed, iobase, dongle_id);
583
584 switch (dongle_id) {
585
586 /* Note: The dongle_id's listed here are derived from
587 * nsc-ircc.c */
588
589 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
590 UseOneRX(iobase, ON); // use one RX pin RX1,RX2
591 InvertTX(iobase, OFF);
592 InvertRX(iobase, OFF);
593
594 EnRX2(iobase, ON); //sir to rx2
595 EnGPIOtoRX2(iobase, OFF);
596
597 if (IsSIROn(iobase)) { //sir
598 // Mode select Off
599 SlowIRRXLowActive(iobase, ON);
600 udelay(1000);
601 SlowIRRXLowActive(iobase, OFF);
602 } else {
603 if (IsMIROn(iobase)) { //mir
604 // Mode select On
605 SlowIRRXLowActive(iobase, OFF);
606 udelay(20);
607 } else { // fir
608 if (IsFIROn(iobase)) { //fir
609 // Mode select On
610 SlowIRRXLowActive(iobase, OFF);
611 udelay(20);
612 }
613 }
614 }
615 break;
616
617 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
618 UseOneRX(iobase, ON); //use ONE RX....RX1
619 InvertTX(iobase, OFF);
620 InvertRX(iobase, OFF); // invert RX pin
621
622 EnRX2(iobase, ON);
623 EnGPIOtoRX2(iobase, OFF);
624 if (IsSIROn(iobase)) { //sir
625 // Mode select On
626 SlowIRRXLowActive(iobase, ON);
627 udelay(20);
628 // Mode select Off
629 SlowIRRXLowActive(iobase, OFF);
630 }
631 if (IsMIROn(iobase)) { //mir
632 // Mode select On
633 SlowIRRXLowActive(iobase, OFF);
634 udelay(20);
635 // Mode select Off
636 SlowIRRXLowActive(iobase, ON);
637 } else { // fir
638 if (IsFIROn(iobase)) { //fir
639 // Mode select On
640 SlowIRRXLowActive(iobase, OFF);
641 // TX On
642 WriteTX(iobase, ON);
643 udelay(20);
644 // Mode select OFF
645 SlowIRRXLowActive(iobase, ON);
646 udelay(20);
647 // TX Off
648 WriteTX(iobase, OFF);
649 }
650 }
651 break;
652
653 case 0x0d:
654 UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
655 InvertTX(iobase, OFF);
656 InvertRX(iobase, OFF);
657 SlowIRRXLowActive(iobase, OFF);
658 if (IsSIROn(iobase)) { //sir
659 EnGPIOtoRX2(iobase, OFF);
660 WriteGIO(iobase, OFF);
661 EnRX2(iobase, OFF); //sir to rx2
662 } else { // fir mir
663 EnGPIOtoRX2(iobase, OFF);
664 WriteGIO(iobase, OFF);
665 EnRX2(iobase, OFF); //fir to rx
666 }
667 break;
668
669 case 0x11: /* Temic TFDS4500 */
670
671 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
672
673 UseOneRX(iobase, ON); //use ONE RX....RX1
674 InvertTX(iobase, OFF);
675 InvertRX(iobase, ON); // invert RX pin
676
677 EnRX2(iobase, ON); //sir to rx2
678 EnGPIOtoRX2(iobase, OFF);
679
680 if( IsSIROn(iobase) ){ //sir
681
682 // Mode select On
683 SlowIRRXLowActive(iobase, ON);
684 udelay(20);
685 // Mode select Off
686 SlowIRRXLowActive(iobase, OFF);
687
688 } else{
689 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
690 }
691 break;
692
693 case 0x0ff: /* Vishay */
694 if (IsSIROn(iobase))
695 mode = 0;
696 else if (IsMIROn(iobase))
697 mode = 1;
698 else if (IsFIROn(iobase))
699 mode = 2;
700 else if (IsVFIROn(iobase))
701 mode = 5; //VFIR-16
702 SI_SetMode(iobase, mode);
703 break;
704
705 default:
706 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
707 __func__, dongle_id);
708 }
709 }
710
711 /*
712 * Function via_ircc_change_speed (self, baud)
713 *
714 * Change the speed of the device
715 *
716 */
717 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
718 {
719 struct net_device *dev = self->netdev;
720 u16 iobase;
721 u8 value = 0, bTmp;
722
723 iobase = self->io.fir_base;
724 /* Update accounting for new speed */
725 self->io.speed = speed;
726 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
727
728 WriteReg(iobase, I_ST_CT_0, 0x0);
729
730 /* Controller mode sellection */
731 switch (speed) {
732 case 2400:
733 case 9600:
734 case 19200:
735 case 38400:
736 case 57600:
737 case 115200:
738 value = (115200/speed)-1;
739 SetSIR(iobase, ON);
740 CRC16(iobase, ON);
741 break;
742 case 576000:
743 /* FIXME: this can't be right, as it's the same as 115200,
744 * and 576000 is MIR, not SIR. */
745 value = 0;
746 SetSIR(iobase, ON);
747 CRC16(iobase, ON);
748 break;
749 case 1152000:
750 value = 0;
751 SetMIR(iobase, ON);
752 /* FIXME: CRC ??? */
753 break;
754 case 4000000:
755 value = 0;
756 SetFIR(iobase, ON);
757 SetPulseWidth(iobase, 0);
758 SetSendPreambleCount(iobase, 14);
759 CRC16(iobase, OFF);
760 EnTXCRC(iobase, ON);
761 break;
762 case 16000000:
763 value = 0;
764 SetVFIR(iobase, ON);
765 /* FIXME: CRC ??? */
766 break;
767 default:
768 value = 0;
769 break;
770 }
771
772 /* Set baudrate to 0x19[2..7] */
773 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
774 bTmp |= value << 2;
775 WriteReg(iobase, I_CF_H_1, bTmp);
776
777 /* Some dongles may need to be informed about speed changes. */
778 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
779
780 /* Set FIFO size to 64 */
781 SetFIFO(iobase, 64);
782
783 /* Enable IR */
784 WriteReg(iobase, I_ST_CT_0, 0x80);
785
786 // EnTXFIFOHalfLevelInt(iobase,ON);
787
788 /* Enable some interrupts so we can receive frames */
789 //EnAllInt(iobase,ON);
790
791 if (IsSIROn(iobase)) {
792 SIRFilter(iobase, ON);
793 SIRRecvAny(iobase, ON);
794 } else {
795 SIRFilter(iobase, OFF);
796 SIRRecvAny(iobase, OFF);
797 }
798
799 if (speed > 115200) {
800 /* Install FIR xmit handler */
801 dev->hard_start_xmit = via_ircc_hard_xmit_fir;
802 via_ircc_dma_receive(self);
803 } else {
804 /* Install SIR xmit handler */
805 dev->hard_start_xmit = via_ircc_hard_xmit_sir;
806 }
807 netif_wake_queue(dev);
808 }
809
810 /*
811 * Function via_ircc_hard_xmit (skb, dev)
812 *
813 * Transmit the frame!
814 *
815 */
816 static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
817 struct net_device *dev)
818 {
819 struct via_ircc_cb *self;
820 unsigned long flags;
821 u16 iobase;
822 __u32 speed;
823
824 self = netdev_priv(dev);
825 IRDA_ASSERT(self != NULL, return 0;);
826 iobase = self->io.fir_base;
827
828 netif_stop_queue(dev);
829 /* Check if we need to change the speed */
830 speed = irda_get_next_speed(skb);
831 if ((speed != self->io.speed) && (speed != -1)) {
832 /* Check for empty frame */
833 if (!skb->len) {
834 via_ircc_change_speed(self, speed);
835 dev->trans_start = jiffies;
836 dev_kfree_skb(skb);
837 return 0;
838 } else
839 self->new_speed = speed;
840 }
841 InitCard(iobase);
842 CommonInit(iobase);
843 SIRFilter(iobase, ON);
844 SetSIR(iobase, ON);
845 CRC16(iobase, ON);
846 EnTXCRC(iobase, 0);
847 WriteReg(iobase, I_ST_CT_0, 0x00);
848
849 spin_lock_irqsave(&self->lock, flags);
850 self->tx_buff.data = self->tx_buff.head;
851 self->tx_buff.len =
852 async_wrap_skb(skb, self->tx_buff.data,
853 self->tx_buff.truesize);
854
855 dev->stats.tx_bytes += self->tx_buff.len;
856 /* Send this frame with old speed */
857 SetBaudRate(iobase, self->io.speed);
858 SetPulseWidth(iobase, 12);
859 SetSendPreambleCount(iobase, 0);
860 WriteReg(iobase, I_ST_CT_0, 0x80);
861
862 EnableTX(iobase, ON);
863 EnableRX(iobase, OFF);
864
865 ResetChip(iobase, 0);
866 ResetChip(iobase, 1);
867 ResetChip(iobase, 2);
868 ResetChip(iobase, 3);
869 ResetChip(iobase, 4);
870
871 EnAllInt(iobase, ON);
872 EnTXDMA(iobase, ON);
873 EnRXDMA(iobase, OFF);
874
875 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
876 DMA_TX_MODE);
877
878 SetSendByte(iobase, self->tx_buff.len);
879 RXStart(iobase, OFF);
880 TXStart(iobase, ON);
881
882 dev->trans_start = jiffies;
883 spin_unlock_irqrestore(&self->lock, flags);
884 dev_kfree_skb(skb);
885 return 0;
886 }
887
888 static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
889 struct net_device *dev)
890 {
891 struct via_ircc_cb *self;
892 u16 iobase;
893 __u32 speed;
894 unsigned long flags;
895
896 self = netdev_priv(dev);
897 iobase = self->io.fir_base;
898
899 if (self->st_fifo.len)
900 return 0;
901 if (self->chip_id == 0x3076)
902 iodelay(1500);
903 else
904 udelay(1500);
905 netif_stop_queue(dev);
906 speed = irda_get_next_speed(skb);
907 if ((speed != self->io.speed) && (speed != -1)) {
908 if (!skb->len) {
909 via_ircc_change_speed(self, speed);
910 dev->trans_start = jiffies;
911 dev_kfree_skb(skb);
912 return 0;
913 } else
914 self->new_speed = speed;
915 }
916 spin_lock_irqsave(&self->lock, flags);
917 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
918 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
919
920 self->tx_fifo.tail += skb->len;
921 dev->stats.tx_bytes += skb->len;
922 skb_copy_from_linear_data(skb,
923 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
924 self->tx_fifo.len++;
925 self->tx_fifo.free++;
926 //F01 if (self->tx_fifo.len == 1) {
927 via_ircc_dma_xmit(self, iobase);
928 //F01 }
929 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
930 dev->trans_start = jiffies;
931 dev_kfree_skb(skb);
932 spin_unlock_irqrestore(&self->lock, flags);
933 return 0;
934
935 }
936
937 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
938 {
939 EnTXDMA(iobase, OFF);
940 self->io.direction = IO_XMIT;
941 EnPhys(iobase, ON);
942 EnableTX(iobase, ON);
943 EnableRX(iobase, OFF);
944 ResetChip(iobase, 0);
945 ResetChip(iobase, 1);
946 ResetChip(iobase, 2);
947 ResetChip(iobase, 3);
948 ResetChip(iobase, 4);
949 EnAllInt(iobase, ON);
950 EnTXDMA(iobase, ON);
951 EnRXDMA(iobase, OFF);
952 irda_setup_dma(self->io.dma,
953 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
954 self->tx_buff.head) + self->tx_buff_dma,
955 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
956 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
957 __func__, self->tx_fifo.ptr,
958 self->tx_fifo.queue[self->tx_fifo.ptr].len,
959 self->tx_fifo.len);
960
961 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
962 RXStart(iobase, OFF);
963 TXStart(iobase, ON);
964 return 0;
965
966 }
967
968 /*
969 * Function via_ircc_dma_xmit_complete (self)
970 *
971 * The transfer of a frame in finished. This function will only be called
972 * by the interrupt handler
973 *
974 */
975 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
976 {
977 int iobase;
978 int ret = TRUE;
979 u8 Tx_status;
980
981 IRDA_DEBUG(3, "%s()\n", __func__);
982
983 iobase = self->io.fir_base;
984 /* Disable DMA */
985 // DisableDmaChannel(self->io.dma);
986 /* Check for underrrun! */
987 /* Clear bit, by writing 1 into it */
988 Tx_status = GetTXStatus(iobase);
989 if (Tx_status & 0x08) {
990 self->netdev->stats.tx_errors++;
991 self->netdev->stats.tx_fifo_errors++;
992 hwreset(self);
993 // how to clear underrrun ?
994 } else {
995 self->netdev->stats.tx_packets++;
996 ResetChip(iobase, 3);
997 ResetChip(iobase, 4);
998 }
999 /* Check if we need to change the speed */
1000 if (self->new_speed) {
1001 via_ircc_change_speed(self, self->new_speed);
1002 self->new_speed = 0;
1003 }
1004
1005 /* Finished with this frame, so prepare for next */
1006 if (IsFIROn(iobase)) {
1007 if (self->tx_fifo.len) {
1008 self->tx_fifo.len--;
1009 self->tx_fifo.ptr++;
1010 }
1011 }
1012 IRDA_DEBUG(1,
1013 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
1014 __func__,
1015 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
1016 /* F01_S
1017 // Any frames to be sent back-to-back?
1018 if (self->tx_fifo.len) {
1019 // Not finished yet!
1020 via_ircc_dma_xmit(self, iobase);
1021 ret = FALSE;
1022 } else {
1023 F01_E*/
1024 // Reset Tx FIFO info
1025 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1026 self->tx_fifo.tail = self->tx_buff.head;
1027 //F01 }
1028
1029 // Make sure we have room for more frames
1030 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
1031 // Not busy transmitting anymore
1032 // Tell the network layer, that we can accept more frames
1033 netif_wake_queue(self->netdev);
1034 //F01 }
1035 return ret;
1036 }
1037
1038 /*
1039 * Function via_ircc_dma_receive (self)
1040 *
1041 * Set configuration for receive a frame.
1042 *
1043 */
1044 static int via_ircc_dma_receive(struct via_ircc_cb *self)
1045 {
1046 int iobase;
1047
1048 iobase = self->io.fir_base;
1049
1050 IRDA_DEBUG(3, "%s()\n", __func__);
1051
1052 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1053 self->tx_fifo.tail = self->tx_buff.head;
1054 self->RxDataReady = 0;
1055 self->io.direction = IO_RECV;
1056 self->rx_buff.data = self->rx_buff.head;
1057 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1058 self->st_fifo.tail = self->st_fifo.head = 0;
1059
1060 EnPhys(iobase, ON);
1061 EnableTX(iobase, OFF);
1062 EnableRX(iobase, ON);
1063
1064 ResetChip(iobase, 0);
1065 ResetChip(iobase, 1);
1066 ResetChip(iobase, 2);
1067 ResetChip(iobase, 3);
1068 ResetChip(iobase, 4);
1069
1070 EnAllInt(iobase, ON);
1071 EnTXDMA(iobase, OFF);
1072 EnRXDMA(iobase, ON);
1073 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1074 self->rx_buff.truesize, DMA_RX_MODE);
1075 TXStart(iobase, OFF);
1076 RXStart(iobase, ON);
1077
1078 return 0;
1079 }
1080
1081 /*
1082 * Function via_ircc_dma_receive_complete (self)
1083 *
1084 * Controller Finished with receiving frames,
1085 * and this routine is call by ISR
1086 *
1087 */
1088 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1089 int iobase)
1090 {
1091 struct st_fifo *st_fifo;
1092 struct sk_buff *skb;
1093 int len, i;
1094 u8 status = 0;
1095
1096 iobase = self->io.fir_base;
1097 st_fifo = &self->st_fifo;
1098
1099 if (self->io.speed < 4000000) { //Speed below FIR
1100 len = GetRecvByte(iobase, self);
1101 skb = dev_alloc_skb(len + 1);
1102 if (skb == NULL)
1103 return FALSE;
1104 // Make sure IP header gets aligned
1105 skb_reserve(skb, 1);
1106 skb_put(skb, len - 2);
1107 if (self->chip_id == 0x3076) {
1108 for (i = 0; i < len - 2; i++)
1109 skb->data[i] = self->rx_buff.data[i * 2];
1110 } else {
1111 if (self->chip_id == 0x3096) {
1112 for (i = 0; i < len - 2; i++)
1113 skb->data[i] =
1114 self->rx_buff.data[i];
1115 }
1116 }
1117 // Move to next frame
1118 self->rx_buff.data += len;
1119 self->netdev->stats.rx_bytes += len;
1120 self->netdev->stats.rx_packets++;
1121 skb->dev = self->netdev;
1122 skb_reset_mac_header(skb);
1123 skb->protocol = htons(ETH_P_IRDA);
1124 netif_rx(skb);
1125 return TRUE;
1126 }
1127
1128 else { //FIR mode
1129 len = GetRecvByte(iobase, self);
1130 if (len == 0)
1131 return TRUE; //interrupt only, data maybe move by RxT
1132 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1133 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1134 __func__, len, RxCurCount(iobase, self),
1135 self->RxLastCount);
1136 hwreset(self);
1137 return FALSE;
1138 }
1139 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1140 __func__,
1141 st_fifo->len, len - 4, RxCurCount(iobase, self));
1142
1143 st_fifo->entries[st_fifo->tail].status = status;
1144 st_fifo->entries[st_fifo->tail].len = len;
1145 st_fifo->pending_bytes += len;
1146 st_fifo->tail++;
1147 st_fifo->len++;
1148 if (st_fifo->tail > MAX_RX_WINDOW)
1149 st_fifo->tail = 0;
1150 self->RxDataReady = 0;
1151
1152 // It maybe have MAX_RX_WINDOW package receive by
1153 // receive_complete before Timer IRQ
1154 /* F01_S
1155 if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1156 RXStart(iobase,ON);
1157 SetTimer(iobase,4);
1158 }
1159 else {
1160 F01_E */
1161 EnableRX(iobase, OFF);
1162 EnRXDMA(iobase, OFF);
1163 RXStart(iobase, OFF);
1164 //F01_S
1165 // Put this entry back in fifo
1166 if (st_fifo->head > MAX_RX_WINDOW)
1167 st_fifo->head = 0;
1168 status = st_fifo->entries[st_fifo->head].status;
1169 len = st_fifo->entries[st_fifo->head].len;
1170 st_fifo->head++;
1171 st_fifo->len--;
1172
1173 skb = dev_alloc_skb(len + 1 - 4);
1174 /*
1175 * if frame size,data ptr,or skb ptr are wrong ,the get next
1176 * entry.
1177 */
1178 if ((skb == NULL) || (skb->data == NULL)
1179 || (self->rx_buff.data == NULL) || (len < 6)) {
1180 self->netdev->stats.rx_dropped++;
1181 return TRUE;
1182 }
1183 skb_reserve(skb, 1);
1184 skb_put(skb, len - 4);
1185
1186 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1187 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1188 len - 4, self->rx_buff.data);
1189
1190 // Move to next frame
1191 self->rx_buff.data += len;
1192 self->netdev->stats.rx_bytes += len;
1193 self->netdev->stats.rx_packets++;
1194 skb->dev = self->netdev;
1195 skb_reset_mac_header(skb);
1196 skb->protocol = htons(ETH_P_IRDA);
1197 netif_rx(skb);
1198
1199 //F01_E
1200 } //FIR
1201 return TRUE;
1202
1203 }
1204
1205 /*
1206 * if frame is received , but no INT ,then use this routine to upload frame.
1207 */
1208 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1209 {
1210 struct sk_buff *skb;
1211 int len;
1212 struct st_fifo *st_fifo;
1213 st_fifo = &self->st_fifo;
1214
1215 len = GetRecvByte(iobase, self);
1216
1217 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1218
1219 if ((len - 4) < 2) {
1220 self->netdev->stats.rx_dropped++;
1221 return FALSE;
1222 }
1223
1224 skb = dev_alloc_skb(len + 1);
1225 if (skb == NULL) {
1226 self->netdev->stats.rx_dropped++;
1227 return FALSE;
1228 }
1229 skb_reserve(skb, 1);
1230 skb_put(skb, len - 4 + 1);
1231 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1232 st_fifo->tail++;
1233 st_fifo->len++;
1234 if (st_fifo->tail > MAX_RX_WINDOW)
1235 st_fifo->tail = 0;
1236 // Move to next frame
1237 self->rx_buff.data += len;
1238 self->netdev->stats.rx_bytes += len;
1239 self->netdev->stats.rx_packets++;
1240 skb->dev = self->netdev;
1241 skb_reset_mac_header(skb);
1242 skb->protocol = htons(ETH_P_IRDA);
1243 netif_rx(skb);
1244 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1245 RXStart(iobase, ON);
1246 } else {
1247 EnableRX(iobase, OFF);
1248 EnRXDMA(iobase, OFF);
1249 RXStart(iobase, OFF);
1250 }
1251 return TRUE;
1252 }
1253
1254 /*
1255 * Implement back to back receive , use this routine to upload data.
1256 */
1257
1258 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1259 {
1260 struct st_fifo *st_fifo;
1261 struct sk_buff *skb;
1262 int len;
1263 u8 status;
1264
1265 st_fifo = &self->st_fifo;
1266
1267 if (CkRxRecv(iobase, self)) {
1268 // if still receiving ,then return ,don't upload frame
1269 self->RetryCount = 0;
1270 SetTimer(iobase, 20);
1271 self->RxDataReady++;
1272 return FALSE;
1273 } else
1274 self->RetryCount++;
1275
1276 if ((self->RetryCount >= 1) ||
1277 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize)
1278 || (st_fifo->len >= (MAX_RX_WINDOW))) {
1279 while (st_fifo->len > 0) { //upload frame
1280 // Put this entry back in fifo
1281 if (st_fifo->head > MAX_RX_WINDOW)
1282 st_fifo->head = 0;
1283 status = st_fifo->entries[st_fifo->head].status;
1284 len = st_fifo->entries[st_fifo->head].len;
1285 st_fifo->head++;
1286 st_fifo->len--;
1287
1288 skb = dev_alloc_skb(len + 1 - 4);
1289 /*
1290 * if frame size, data ptr, or skb ptr are wrong,
1291 * then get next entry.
1292 */
1293 if ((skb == NULL) || (skb->data == NULL)
1294 || (self->rx_buff.data == NULL) || (len < 6)) {
1295 self->netdev->stats.rx_dropped++;
1296 continue;
1297 }
1298 skb_reserve(skb, 1);
1299 skb_put(skb, len - 4);
1300 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1301
1302 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1303 len - 4, st_fifo->head);
1304
1305 // Move to next frame
1306 self->rx_buff.data += len;
1307 self->netdev->stats.rx_bytes += len;
1308 self->netdev->stats.rx_packets++;
1309 skb->dev = self->netdev;
1310 skb_reset_mac_header(skb);
1311 skb->protocol = htons(ETH_P_IRDA);
1312 netif_rx(skb);
1313 } //while
1314 self->RetryCount = 0;
1315
1316 IRDA_DEBUG(2,
1317 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1318 __func__,
1319 GetHostStatus(iobase), GetRXStatus(iobase));
1320
1321 /*
1322 * if frame is receive complete at this routine ,then upload
1323 * frame.
1324 */
1325 if ((GetRXStatus(iobase) & 0x10)
1326 && (RxCurCount(iobase, self) != self->RxLastCount)) {
1327 upload_rxdata(self, iobase);
1328 if (irda_device_txqueue_empty(self->netdev))
1329 via_ircc_dma_receive(self);
1330 }
1331 } // timer detect complete
1332 else
1333 SetTimer(iobase, 4);
1334 return TRUE;
1335
1336 }
1337
1338
1339
1340 /*
1341 * Function via_ircc_interrupt (irq, dev_id)
1342 *
1343 * An interrupt from the chip has arrived. Time to do some work
1344 *
1345 */
1346 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1347 {
1348 struct net_device *dev = dev_id;
1349 struct via_ircc_cb *self = netdev_priv(dev);
1350 int iobase;
1351 u8 iHostIntType, iRxIntType, iTxIntType;
1352
1353 iobase = self->io.fir_base;
1354 spin_lock(&self->lock);
1355 iHostIntType = GetHostStatus(iobase);
1356
1357 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1358 __func__, iHostIntType,
1359 (iHostIntType & 0x40) ? "Timer" : "",
1360 (iHostIntType & 0x20) ? "Tx" : "",
1361 (iHostIntType & 0x10) ? "Rx" : "",
1362 (iHostIntType & 0x0e) >> 1);
1363
1364 if ((iHostIntType & 0x40) != 0) { //Timer Event
1365 self->EventFlag.TimeOut++;
1366 ClearTimerInt(iobase, 1);
1367 if (self->io.direction == IO_XMIT) {
1368 via_ircc_dma_xmit(self, iobase);
1369 }
1370 if (self->io.direction == IO_RECV) {
1371 /*
1372 * frame ready hold too long, must reset.
1373 */
1374 if (self->RxDataReady > 30) {
1375 hwreset(self);
1376 if (irda_device_txqueue_empty(self->netdev)) {
1377 via_ircc_dma_receive(self);
1378 }
1379 } else { // call this to upload frame.
1380 RxTimerHandler(self, iobase);
1381 }
1382 } //RECV
1383 } //Timer Event
1384 if ((iHostIntType & 0x20) != 0) { //Tx Event
1385 iTxIntType = GetTXStatus(iobase);
1386
1387 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1388 __func__, iTxIntType,
1389 (iTxIntType & 0x08) ? "FIFO underr." : "",
1390 (iTxIntType & 0x04) ? "EOM" : "",
1391 (iTxIntType & 0x02) ? "FIFO ready" : "",
1392 (iTxIntType & 0x01) ? "Early EOM" : "");
1393
1394 if (iTxIntType & 0x4) {
1395 self->EventFlag.EOMessage++; // read and will auto clean
1396 if (via_ircc_dma_xmit_complete(self)) {
1397 if (irda_device_txqueue_empty
1398 (self->netdev)) {
1399 via_ircc_dma_receive(self);
1400 }
1401 } else {
1402 self->EventFlag.Unknown++;
1403 }
1404 } //EOP
1405 } //Tx Event
1406 //----------------------------------------
1407 if ((iHostIntType & 0x10) != 0) { //Rx Event
1408 /* Check if DMA has finished */
1409 iRxIntType = GetRXStatus(iobase);
1410
1411 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1412 __func__, iRxIntType,
1413 (iRxIntType & 0x80) ? "PHY err." : "",
1414 (iRxIntType & 0x40) ? "CRC err" : "",
1415 (iRxIntType & 0x20) ? "FIFO overr." : "",
1416 (iRxIntType & 0x10) ? "EOF" : "",
1417 (iRxIntType & 0x08) ? "RxData" : "",
1418 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1419 (iRxIntType & 0x01) ? "SIR bad" : "");
1420 if (!iRxIntType)
1421 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1422
1423 if (iRxIntType & 0x10) {
1424 if (via_ircc_dma_receive_complete(self, iobase)) {
1425 //F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1426 via_ircc_dma_receive(self);
1427 }
1428 } // No ERR
1429 else { //ERR
1430 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1431 __func__, iRxIntType, iHostIntType,
1432 RxCurCount(iobase, self),
1433 self->RxLastCount);
1434
1435 if (iRxIntType & 0x20) { //FIFO OverRun ERR
1436 ResetChip(iobase, 0);
1437 ResetChip(iobase, 1);
1438 } else { //PHY,CRC ERR
1439
1440 if (iRxIntType != 0x08)
1441 hwreset(self); //F01
1442 }
1443 via_ircc_dma_receive(self);
1444 } //ERR
1445
1446 } //Rx Event
1447 spin_unlock(&self->lock);
1448 return IRQ_RETVAL(iHostIntType);
1449 }
1450
1451 static void hwreset(struct via_ircc_cb *self)
1452 {
1453 int iobase;
1454 iobase = self->io.fir_base;
1455
1456 IRDA_DEBUG(3, "%s()\n", __func__);
1457
1458 ResetChip(iobase, 5);
1459 EnableDMA(iobase, OFF);
1460 EnableTX(iobase, OFF);
1461 EnableRX(iobase, OFF);
1462 EnRXDMA(iobase, OFF);
1463 EnTXDMA(iobase, OFF);
1464 RXStart(iobase, OFF);
1465 TXStart(iobase, OFF);
1466 InitCard(iobase);
1467 CommonInit(iobase);
1468 SIRFilter(iobase, ON);
1469 SetSIR(iobase, ON);
1470 CRC16(iobase, ON);
1471 EnTXCRC(iobase, 0);
1472 WriteReg(iobase, I_ST_CT_0, 0x00);
1473 SetBaudRate(iobase, 9600);
1474 SetPulseWidth(iobase, 12);
1475 SetSendPreambleCount(iobase, 0);
1476 WriteReg(iobase, I_ST_CT_0, 0x80);
1477
1478 /* Restore speed. */
1479 via_ircc_change_speed(self, self->io.speed);
1480
1481 self->st_fifo.len = 0;
1482 }
1483
1484 /*
1485 * Function via_ircc_is_receiving (self)
1486 *
1487 * Return TRUE is we are currently receiving a frame
1488 *
1489 */
1490 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1491 {
1492 int status = FALSE;
1493 int iobase;
1494
1495 IRDA_ASSERT(self != NULL, return FALSE;);
1496
1497 iobase = self->io.fir_base;
1498 if (CkRxRecv(iobase, self))
1499 status = TRUE;
1500
1501 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1502
1503 return status;
1504 }
1505
1506
1507 /*
1508 * Function via_ircc_net_open (dev)
1509 *
1510 * Start the device
1511 *
1512 */
1513 static int via_ircc_net_open(struct net_device *dev)
1514 {
1515 struct via_ircc_cb *self;
1516 int iobase;
1517 char hwname[32];
1518
1519 IRDA_DEBUG(3, "%s()\n", __func__);
1520
1521 IRDA_ASSERT(dev != NULL, return -1;);
1522 self = netdev_priv(dev);
1523 dev->stats.rx_packets = 0;
1524 IRDA_ASSERT(self != NULL, return 0;);
1525 iobase = self->io.fir_base;
1526 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1527 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1528 self->io.irq);
1529 return -EAGAIN;
1530 }
1531 /*
1532 * Always allocate the DMA channel after the IRQ, and clean up on
1533 * failure.
1534 */
1535 if (request_dma(self->io.dma, dev->name)) {
1536 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1537 self->io.dma);
1538 free_irq(self->io.irq, self);
1539 return -EAGAIN;
1540 }
1541 if (self->io.dma2 != self->io.dma) {
1542 if (request_dma(self->io.dma2, dev->name)) {
1543 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1544 driver_name, self->io.dma2);
1545 free_irq(self->io.irq, self);
1546 free_dma(self->io.dma);
1547 return -EAGAIN;
1548 }
1549 }
1550
1551
1552 /* turn on interrupts */
1553 EnAllInt(iobase, ON);
1554 EnInternalLoop(iobase, OFF);
1555 EnExternalLoop(iobase, OFF);
1556
1557 /* */
1558 via_ircc_dma_receive(self);
1559
1560 /* Ready to play! */
1561 netif_start_queue(dev);
1562
1563 /*
1564 * Open new IrLAP layer instance, now that everything should be
1565 * initialized properly
1566 */
1567 sprintf(hwname, "VIA @ 0x%x", iobase);
1568 self->irlap = irlap_open(dev, &self->qos, hwname);
1569
1570 self->RxLastCount = 0;
1571
1572 return 0;
1573 }
1574
1575 /*
1576 * Function via_ircc_net_close (dev)
1577 *
1578 * Stop the device
1579 *
1580 */
1581 static int via_ircc_net_close(struct net_device *dev)
1582 {
1583 struct via_ircc_cb *self;
1584 int iobase;
1585
1586 IRDA_DEBUG(3, "%s()\n", __func__);
1587
1588 IRDA_ASSERT(dev != NULL, return -1;);
1589 self = netdev_priv(dev);
1590 IRDA_ASSERT(self != NULL, return 0;);
1591
1592 /* Stop device */
1593 netif_stop_queue(dev);
1594 /* Stop and remove instance of IrLAP */
1595 if (self->irlap)
1596 irlap_close(self->irlap);
1597 self->irlap = NULL;
1598 iobase = self->io.fir_base;
1599 EnTXDMA(iobase, OFF);
1600 EnRXDMA(iobase, OFF);
1601 DisableDmaChannel(self->io.dma);
1602
1603 /* Disable interrupts */
1604 EnAllInt(iobase, OFF);
1605 free_irq(self->io.irq, dev);
1606 free_dma(self->io.dma);
1607 if (self->io.dma2 != self->io.dma)
1608 free_dma(self->io.dma2);
1609
1610 return 0;
1611 }
1612
1613 /*
1614 * Function via_ircc_net_ioctl (dev, rq, cmd)
1615 *
1616 * Process IOCTL commands for this device
1617 *
1618 */
1619 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1620 int cmd)
1621 {
1622 struct if_irda_req *irq = (struct if_irda_req *) rq;
1623 struct via_ircc_cb *self;
1624 unsigned long flags;
1625 int ret = 0;
1626
1627 IRDA_ASSERT(dev != NULL, return -1;);
1628 self = netdev_priv(dev);
1629 IRDA_ASSERT(self != NULL, return -1;);
1630 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1631 cmd);
1632 /* Disable interrupts & save flags */
1633 spin_lock_irqsave(&self->lock, flags);
1634 switch (cmd) {
1635 case SIOCSBANDWIDTH: /* Set bandwidth */
1636 if (!capable(CAP_NET_ADMIN)) {
1637 ret = -EPERM;
1638 goto out;
1639 }
1640 via_ircc_change_speed(self, irq->ifr_baudrate);
1641 break;
1642 case SIOCSMEDIABUSY: /* Set media busy */
1643 if (!capable(CAP_NET_ADMIN)) {
1644 ret = -EPERM;
1645 goto out;
1646 }
1647 irda_device_set_media_busy(self->netdev, TRUE);
1648 break;
1649 case SIOCGRECEIVING: /* Check if we are receiving right now */
1650 irq->ifr_receiving = via_ircc_is_receiving(self);
1651 break;
1652 default:
1653 ret = -EOPNOTSUPP;
1654 }
1655 out:
1656 spin_unlock_irqrestore(&self->lock, flags);
1657 return ret;
1658 }
1659
1660 MODULE_AUTHOR("VIA Technologies,inc");
1661 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1662 MODULE_LICENSE("GPL");
1663
1664 module_init(via_ircc_init);
1665 module_exit(via_ircc_cleanup);
This page took 0.081641 seconds and 5 git commands to generate.