]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/irda/via-ircc.c
Merge tag 'drm-intel-fixes-2013-11-20' of git://people.freedesktop.org/~danvet/drm...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / irda / via-ircc.c
1 /********************************************************************
2 Filename: via-ircc.c
3 Version: 1.0
4 Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author: VIA Technologies,inc
6 Date : 08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25 Comment :
26 jul/09/2002 : only implement two kind of dongle currently.
27 Oct/02/2002 : work on VT8231 and VT8233 .
28 Aug/06/2003 : change driver format to pci driver .
29
30 2004-02-16: <sda@bdit.de>
31 - Removed unneeded 'legacy' pci stuff.
32 - Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
33 - On speed change from core, don't send SIR frame with new speed.
34 Use current speed and change speeds later.
35 - Make module-param dongle_id actually work.
36 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
37 Tested with home-grown PCB on EPIA boards.
38 - Code cleanup.
39
40 ********************************************************************/
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/ioport.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/interrupt.h>
50 #include <linux/rtnetlink.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
54
55 #include <asm/io.h>
56 #include <asm/dma.h>
57 #include <asm/byteorder.h>
58
59 #include <linux/pm.h>
60
61 #include <net/irda/wrapper.h>
62 #include <net/irda/irda.h>
63 #include <net/irda/irda_device.h>
64
65 #include "via-ircc.h"
66
67 #define VIA_MODULE_NAME "via-ircc"
68 #define CHIP_IO_EXTENT 0x40
69
70 static char *driver_name = VIA_MODULE_NAME;
71
72 /* Module parameters */
73 static int qos_mtt_bits = 0x07; /* 1 ms or more */
74 static int dongle_id = 0; /* default: probe */
75
76 /* We can't guess the type of connected dongle, user *must* supply it. */
77 module_param(dongle_id, int, 0);
78
79 /* Some prototypes */
80 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
81 unsigned int id);
82 static int via_ircc_dma_receive(struct via_ircc_cb *self);
83 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
84 int iobase);
85 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
86 struct net_device *dev);
87 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
88 struct net_device *dev);
89 static void via_hw_init(struct via_ircc_cb *self);
90 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
92 static int via_ircc_is_receiving(struct via_ircc_cb *self);
93 static int via_ircc_read_dongle_id(int iobase);
94
95 static int via_ircc_net_open(struct net_device *dev);
96 static int via_ircc_net_close(struct net_device *dev);
97 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
98 int cmd);
99 static void via_ircc_change_dongle_speed(int iobase, int speed,
100 int dongle_id);
101 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
102 static void hwreset(struct via_ircc_cb *self);
103 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
104 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
105 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
106 static void via_remove_one(struct pci_dev *pdev);
107
108 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
109 static void iodelay(int udelay)
110 {
111 u8 data;
112 int i;
113
114 for (i = 0; i < udelay; i++) {
115 data = inb(0x80);
116 }
117 }
118
119 static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
120 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
121 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
122 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
123 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
124 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
125 { 0, }
126 };
127
128 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
129
130
131 static struct pci_driver via_driver = {
132 .name = VIA_MODULE_NAME,
133 .id_table = via_pci_tbl,
134 .probe = via_init_one,
135 .remove = via_remove_one,
136 };
137
138
139 /*
140 * Function via_ircc_init ()
141 *
142 * Initialize chip. Just find out chip type and resource.
143 */
144 static int __init via_ircc_init(void)
145 {
146 int rc;
147
148 IRDA_DEBUG(3, "%s()\n", __func__);
149
150 rc = pci_register_driver(&via_driver);
151 if (rc < 0) {
152 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
153 __func__, rc);
154 return -ENODEV;
155 }
156 return 0;
157 }
158
159 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
160 {
161 int rc;
162 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
163 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
164 chipio_t info;
165
166 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
167
168 rc = pci_enable_device (pcidev);
169 if (rc) {
170 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
171 return -ENODEV;
172 }
173
174 // South Bridge exist
175 if ( ReadLPCReg(0x20) != 0x3C )
176 Chipset=0x3096;
177 else
178 Chipset=0x3076;
179
180 if (Chipset==0x3076) {
181 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
182
183 WriteLPCReg(7,0x0c );
184 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
185 if((temp&0x01)==1) { // BIOS close or no FIR
186 WriteLPCReg(0x1d, 0x82 );
187 WriteLPCReg(0x23,0x18);
188 temp=ReadLPCReg(0xF0);
189 if((temp&0x01)==0) {
190 temp=(ReadLPCReg(0x74)&0x03); //DMA
191 FirDRQ0=temp + 4;
192 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
193 FirDRQ1=temp + 4;
194 } else {
195 temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
196 FirDRQ0=temp + 4;
197 FirDRQ1=FirDRQ0;
198 }
199 FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
200 FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
201 FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
202 FirIOBase=FirIOBase ;
203 info.fir_base=FirIOBase;
204 info.irq=FirIRQ;
205 info.dma=FirDRQ1;
206 info.dma2=FirDRQ0;
207 pci_read_config_byte(pcidev,0x40,&bTmp);
208 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
209 pci_read_config_byte(pcidev,0x42,&bTmp);
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 );
213 rc = via_ircc_open(pcidev, &info, 0x3076);
214 } else
215 rc = -ENODEV; //IR not turn on
216 } else { //Not VT1211
217 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
218
219 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
220 if((bTmp&0x01)==1) { // BIOS enable FIR
221 //Enable Double DMA clock
222 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
223 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
224 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
225 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
226 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
227 pci_write_config_byte(pcidev,0x44,0x4e);
228 //---------- read configuration from Function0 of south bridge
229 if((bTmp&0x02)==0) {
230 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
231 FirDRQ0 = (bTmp1 & 0x30) >> 4;
232 pci_read_config_byte(pcidev,0x44,&bTmp1);
233 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
234 } else {
235 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
236 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
237 FirDRQ1=0;
238 }
239 pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
240 FirIRQ = bTmp1 & 0x0f;
241
242 pci_read_config_byte(pcidev,0x69,&bTmp);
243 FirIOBase = bTmp << 8;//hight byte
244 pci_read_config_byte(pcidev,0x68,&bTmp);
245 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
246 //-------------------------
247 info.fir_base=FirIOBase;
248 info.irq=FirIRQ;
249 info.dma=FirDRQ1;
250 info.dma2=FirDRQ0;
251 rc = via_ircc_open(pcidev, &info, 0x3096);
252 } else
253 rc = -ENODEV; //IR not turn on !!!!!
254 }//Not VT1211
255
256 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
257 return rc;
258 }
259
260 static void __exit via_ircc_cleanup(void)
261 {
262 IRDA_DEBUG(3, "%s()\n", __func__);
263
264 /* Cleanup all instances of the driver */
265 pci_unregister_driver (&via_driver);
266 }
267
268 static const struct net_device_ops via_ircc_sir_ops = {
269 .ndo_start_xmit = via_ircc_hard_xmit_sir,
270 .ndo_open = via_ircc_net_open,
271 .ndo_stop = via_ircc_net_close,
272 .ndo_do_ioctl = via_ircc_net_ioctl,
273 };
274 static const struct net_device_ops via_ircc_fir_ops = {
275 .ndo_start_xmit = via_ircc_hard_xmit_fir,
276 .ndo_open = via_ircc_net_open,
277 .ndo_stop = via_ircc_net_close,
278 .ndo_do_ioctl = via_ircc_net_ioctl,
279 };
280
281 /*
282 * Function via_ircc_open(pdev, iobase, irq)
283 *
284 * Open driver instance
285 *
286 */
287 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
288 {
289 struct net_device *dev;
290 struct via_ircc_cb *self;
291 int err;
292
293 IRDA_DEBUG(3, "%s()\n", __func__);
294
295 /* Allocate new instance of the driver */
296 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
297 if (dev == NULL)
298 return -ENOMEM;
299
300 self = netdev_priv(dev);
301 self->netdev = dev;
302 spin_lock_init(&self->lock);
303
304 pci_set_drvdata(pdev, self);
305
306 /* Initialize Resource */
307 self->io.cfg_base = info->cfg_base;
308 self->io.fir_base = info->fir_base;
309 self->io.irq = info->irq;
310 self->io.fir_ext = CHIP_IO_EXTENT;
311 self->io.dma = info->dma;
312 self->io.dma2 = info->dma2;
313 self->io.fifo_size = 32;
314 self->chip_id = id;
315 self->st_fifo.len = 0;
316 self->RxDataReady = 0;
317
318 /* Reserve the ioports that we need */
319 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
320 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
321 __func__, self->io.fir_base);
322 err = -ENODEV;
323 goto err_out1;
324 }
325
326 /* Initialize QoS for this device */
327 irda_init_max_qos_capabilies(&self->qos);
328
329 /* Check if user has supplied the dongle id or not */
330 if (!dongle_id)
331 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
332 self->io.dongle_id = dongle_id;
333
334 /* The only value we must override it the baudrate */
335 /* Maximum speeds and capabilities are dongle-dependent. */
336 switch( self->io.dongle_id ){
337 case 0x0d:
338 self->qos.baud_rate.bits =
339 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
340 IR_576000 | IR_1152000 | (IR_4000000 << 8);
341 break;
342 default:
343 self->qos.baud_rate.bits =
344 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
345 break;
346 }
347
348 /* Following was used for testing:
349 *
350 * self->qos.baud_rate.bits = IR_9600;
351 *
352 * Is is no good, as it prohibits (error-prone) speed-changes.
353 */
354
355 self->qos.min_turn_time.bits = qos_mtt_bits;
356 irda_qos_bits_to_value(&self->qos);
357
358 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
359 self->rx_buff.truesize = 14384 + 2048;
360 self->tx_buff.truesize = 14384 + 2048;
361
362 /* Allocate memory if needed */
363 self->rx_buff.head =
364 dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
365 &self->rx_buff_dma, GFP_KERNEL);
366 if (self->rx_buff.head == NULL) {
367 err = -ENOMEM;
368 goto err_out2;
369 }
370
371 self->tx_buff.head =
372 dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
373 &self->tx_buff_dma, GFP_KERNEL);
374 if (self->tx_buff.head == NULL) {
375 err = -ENOMEM;
376 goto err_out3;
377 }
378
379 self->rx_buff.in_frame = FALSE;
380 self->rx_buff.state = OUTSIDE_FRAME;
381 self->tx_buff.data = self->tx_buff.head;
382 self->rx_buff.data = self->rx_buff.head;
383
384 /* Reset Tx queue info */
385 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
386 self->tx_fifo.tail = self->tx_buff.head;
387
388 /* Override the network functions we need to use */
389 dev->netdev_ops = &via_ircc_sir_ops;
390
391 err = register_netdev(dev);
392 if (err)
393 goto err_out4;
394
395 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
396
397 /* Initialise the hardware..
398 */
399 self->io.speed = 9600;
400 via_hw_init(self);
401 return 0;
402 err_out4:
403 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
404 self->tx_buff.head, self->tx_buff_dma);
405 err_out3:
406 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
407 self->rx_buff.head, self->rx_buff_dma);
408 err_out2:
409 release_region(self->io.fir_base, self->io.fir_ext);
410 err_out1:
411 pci_set_drvdata(pdev, NULL);
412 free_netdev(dev);
413 return err;
414 }
415
416 /*
417 * Function via_remove_one(pdev)
418 *
419 * Close driver instance
420 *
421 */
422 static void via_remove_one(struct pci_dev *pdev)
423 {
424 struct via_ircc_cb *self = pci_get_drvdata(pdev);
425 int iobase;
426
427 IRDA_DEBUG(3, "%s()\n", __func__);
428
429 iobase = self->io.fir_base;
430
431 ResetChip(iobase, 5); //hardware reset.
432 /* Remove netdevice */
433 unregister_netdev(self->netdev);
434
435 /* Release the PORT that this driver is using */
436 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
437 __func__, self->io.fir_base);
438 release_region(self->io.fir_base, self->io.fir_ext);
439 if (self->tx_buff.head)
440 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
441 self->tx_buff.head, self->tx_buff_dma);
442 if (self->rx_buff.head)
443 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
444 self->rx_buff.head, self->rx_buff_dma);
445 pci_set_drvdata(pdev, NULL);
446
447 free_netdev(self->netdev);
448
449 pci_disable_device(pdev);
450 }
451
452 /*
453 * Function via_hw_init(self)
454 *
455 * Returns non-negative on success.
456 *
457 * Formerly via_ircc_setup
458 */
459 static void via_hw_init(struct via_ircc_cb *self)
460 {
461 int iobase = self->io.fir_base;
462
463 IRDA_DEBUG(3, "%s()\n", __func__);
464
465 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
466 // FIFO Init
467 EnRXFIFOReadyInt(iobase, OFF);
468 EnRXFIFOHalfLevelInt(iobase, OFF);
469 EnTXFIFOHalfLevelInt(iobase, OFF);
470 EnTXFIFOUnderrunEOMInt(iobase, ON);
471 EnTXFIFOReadyInt(iobase, OFF);
472 InvertTX(iobase, OFF);
473 InvertRX(iobase, OFF);
474
475 if (ReadLPCReg(0x20) == 0x3c)
476 WriteLPCReg(0xF0, 0); // for VT1211
477 /* Int Init */
478 EnRXSpecInt(iobase, ON);
479
480 /* The following is basically hwreset */
481 /* If this is the case, why not just call hwreset() ? Jean II */
482 ResetChip(iobase, 5);
483 EnableDMA(iobase, OFF);
484 EnableTX(iobase, OFF);
485 EnableRX(iobase, OFF);
486 EnRXDMA(iobase, OFF);
487 EnTXDMA(iobase, OFF);
488 RXStart(iobase, OFF);
489 TXStart(iobase, OFF);
490 InitCard(iobase);
491 CommonInit(iobase);
492 SIRFilter(iobase, ON);
493 SetSIR(iobase, ON);
494 CRC16(iobase, ON);
495 EnTXCRC(iobase, 0);
496 WriteReg(iobase, I_ST_CT_0, 0x00);
497 SetBaudRate(iobase, 9600);
498 SetPulseWidth(iobase, 12);
499 SetSendPreambleCount(iobase, 0);
500
501 self->io.speed = 9600;
502 self->st_fifo.len = 0;
503
504 via_ircc_change_dongle_speed(iobase, self->io.speed,
505 self->io.dongle_id);
506
507 WriteReg(iobase, I_ST_CT_0, 0x80);
508 }
509
510 /*
511 * Function via_ircc_read_dongle_id (void)
512 *
513 */
514 static int via_ircc_read_dongle_id(int iobase)
515 {
516 int dongle_id = 9; /* Default to IBM */
517
518 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
519 return dongle_id;
520 }
521
522 /*
523 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
524 * Change speed of the attach dongle
525 * only implement two type of dongle currently.
526 */
527 static void via_ircc_change_dongle_speed(int iobase, int speed,
528 int dongle_id)
529 {
530 u8 mode = 0;
531
532 /* speed is unused, as we use IsSIROn()/IsMIROn() */
533 speed = speed;
534
535 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
536 __func__, speed, iobase, dongle_id);
537
538 switch (dongle_id) {
539
540 /* Note: The dongle_id's listed here are derived from
541 * nsc-ircc.c */
542
543 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
544 UseOneRX(iobase, ON); // use one RX pin RX1,RX2
545 InvertTX(iobase, OFF);
546 InvertRX(iobase, OFF);
547
548 EnRX2(iobase, ON); //sir to rx2
549 EnGPIOtoRX2(iobase, OFF);
550
551 if (IsSIROn(iobase)) { //sir
552 // Mode select Off
553 SlowIRRXLowActive(iobase, ON);
554 udelay(1000);
555 SlowIRRXLowActive(iobase, OFF);
556 } else {
557 if (IsMIROn(iobase)) { //mir
558 // Mode select On
559 SlowIRRXLowActive(iobase, OFF);
560 udelay(20);
561 } else { // fir
562 if (IsFIROn(iobase)) { //fir
563 // Mode select On
564 SlowIRRXLowActive(iobase, OFF);
565 udelay(20);
566 }
567 }
568 }
569 break;
570
571 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
572 UseOneRX(iobase, ON); //use ONE RX....RX1
573 InvertTX(iobase, OFF);
574 InvertRX(iobase, OFF); // invert RX pin
575
576 EnRX2(iobase, ON);
577 EnGPIOtoRX2(iobase, OFF);
578 if (IsSIROn(iobase)) { //sir
579 // Mode select On
580 SlowIRRXLowActive(iobase, ON);
581 udelay(20);
582 // Mode select Off
583 SlowIRRXLowActive(iobase, OFF);
584 }
585 if (IsMIROn(iobase)) { //mir
586 // Mode select On
587 SlowIRRXLowActive(iobase, OFF);
588 udelay(20);
589 // Mode select Off
590 SlowIRRXLowActive(iobase, ON);
591 } else { // fir
592 if (IsFIROn(iobase)) { //fir
593 // Mode select On
594 SlowIRRXLowActive(iobase, OFF);
595 // TX On
596 WriteTX(iobase, ON);
597 udelay(20);
598 // Mode select OFF
599 SlowIRRXLowActive(iobase, ON);
600 udelay(20);
601 // TX Off
602 WriteTX(iobase, OFF);
603 }
604 }
605 break;
606
607 case 0x0d:
608 UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
609 InvertTX(iobase, OFF);
610 InvertRX(iobase, OFF);
611 SlowIRRXLowActive(iobase, OFF);
612 if (IsSIROn(iobase)) { //sir
613 EnGPIOtoRX2(iobase, OFF);
614 WriteGIO(iobase, OFF);
615 EnRX2(iobase, OFF); //sir to rx2
616 } else { // fir mir
617 EnGPIOtoRX2(iobase, OFF);
618 WriteGIO(iobase, OFF);
619 EnRX2(iobase, OFF); //fir to rx
620 }
621 break;
622
623 case 0x11: /* Temic TFDS4500 */
624
625 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
626
627 UseOneRX(iobase, ON); //use ONE RX....RX1
628 InvertTX(iobase, OFF);
629 InvertRX(iobase, ON); // invert RX pin
630
631 EnRX2(iobase, ON); //sir to rx2
632 EnGPIOtoRX2(iobase, OFF);
633
634 if( IsSIROn(iobase) ){ //sir
635
636 // Mode select On
637 SlowIRRXLowActive(iobase, ON);
638 udelay(20);
639 // Mode select Off
640 SlowIRRXLowActive(iobase, OFF);
641
642 } else{
643 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
644 }
645 break;
646
647 case 0x0ff: /* Vishay */
648 if (IsSIROn(iobase))
649 mode = 0;
650 else if (IsMIROn(iobase))
651 mode = 1;
652 else if (IsFIROn(iobase))
653 mode = 2;
654 else if (IsVFIROn(iobase))
655 mode = 5; //VFIR-16
656 SI_SetMode(iobase, mode);
657 break;
658
659 default:
660 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
661 __func__, dongle_id);
662 }
663 }
664
665 /*
666 * Function via_ircc_change_speed (self, baud)
667 *
668 * Change the speed of the device
669 *
670 */
671 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
672 {
673 struct net_device *dev = self->netdev;
674 u16 iobase;
675 u8 value = 0, bTmp;
676
677 iobase = self->io.fir_base;
678 /* Update accounting for new speed */
679 self->io.speed = speed;
680 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
681
682 WriteReg(iobase, I_ST_CT_0, 0x0);
683
684 /* Controller mode sellection */
685 switch (speed) {
686 case 2400:
687 case 9600:
688 case 19200:
689 case 38400:
690 case 57600:
691 case 115200:
692 value = (115200/speed)-1;
693 SetSIR(iobase, ON);
694 CRC16(iobase, ON);
695 break;
696 case 576000:
697 /* FIXME: this can't be right, as it's the same as 115200,
698 * and 576000 is MIR, not SIR. */
699 value = 0;
700 SetSIR(iobase, ON);
701 CRC16(iobase, ON);
702 break;
703 case 1152000:
704 value = 0;
705 SetMIR(iobase, ON);
706 /* FIXME: CRC ??? */
707 break;
708 case 4000000:
709 value = 0;
710 SetFIR(iobase, ON);
711 SetPulseWidth(iobase, 0);
712 SetSendPreambleCount(iobase, 14);
713 CRC16(iobase, OFF);
714 EnTXCRC(iobase, ON);
715 break;
716 case 16000000:
717 value = 0;
718 SetVFIR(iobase, ON);
719 /* FIXME: CRC ??? */
720 break;
721 default:
722 value = 0;
723 break;
724 }
725
726 /* Set baudrate to 0x19[2..7] */
727 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
728 bTmp |= value << 2;
729 WriteReg(iobase, I_CF_H_1, bTmp);
730
731 /* Some dongles may need to be informed about speed changes. */
732 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
733
734 /* Set FIFO size to 64 */
735 SetFIFO(iobase, 64);
736
737 /* Enable IR */
738 WriteReg(iobase, I_ST_CT_0, 0x80);
739
740 // EnTXFIFOHalfLevelInt(iobase,ON);
741
742 /* Enable some interrupts so we can receive frames */
743 //EnAllInt(iobase,ON);
744
745 if (IsSIROn(iobase)) {
746 SIRFilter(iobase, ON);
747 SIRRecvAny(iobase, ON);
748 } else {
749 SIRFilter(iobase, OFF);
750 SIRRecvAny(iobase, OFF);
751 }
752
753 if (speed > 115200) {
754 /* Install FIR xmit handler */
755 dev->netdev_ops = &via_ircc_fir_ops;
756 via_ircc_dma_receive(self);
757 } else {
758 /* Install SIR xmit handler */
759 dev->netdev_ops = &via_ircc_sir_ops;
760 }
761 netif_wake_queue(dev);
762 }
763
764 /*
765 * Function via_ircc_hard_xmit (skb, dev)
766 *
767 * Transmit the frame!
768 *
769 */
770 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
771 struct net_device *dev)
772 {
773 struct via_ircc_cb *self;
774 unsigned long flags;
775 u16 iobase;
776 __u32 speed;
777
778 self = netdev_priv(dev);
779 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
780 iobase = self->io.fir_base;
781
782 netif_stop_queue(dev);
783 /* Check if we need to change the speed */
784 speed = irda_get_next_speed(skb);
785 if ((speed != self->io.speed) && (speed != -1)) {
786 /* Check for empty frame */
787 if (!skb->len) {
788 via_ircc_change_speed(self, speed);
789 dev->trans_start = jiffies;
790 dev_kfree_skb(skb);
791 return NETDEV_TX_OK;
792 } else
793 self->new_speed = speed;
794 }
795 InitCard(iobase);
796 CommonInit(iobase);
797 SIRFilter(iobase, ON);
798 SetSIR(iobase, ON);
799 CRC16(iobase, ON);
800 EnTXCRC(iobase, 0);
801 WriteReg(iobase, I_ST_CT_0, 0x00);
802
803 spin_lock_irqsave(&self->lock, flags);
804 self->tx_buff.data = self->tx_buff.head;
805 self->tx_buff.len =
806 async_wrap_skb(skb, self->tx_buff.data,
807 self->tx_buff.truesize);
808
809 dev->stats.tx_bytes += self->tx_buff.len;
810 /* Send this frame with old speed */
811 SetBaudRate(iobase, self->io.speed);
812 SetPulseWidth(iobase, 12);
813 SetSendPreambleCount(iobase, 0);
814 WriteReg(iobase, I_ST_CT_0, 0x80);
815
816 EnableTX(iobase, ON);
817 EnableRX(iobase, OFF);
818
819 ResetChip(iobase, 0);
820 ResetChip(iobase, 1);
821 ResetChip(iobase, 2);
822 ResetChip(iobase, 3);
823 ResetChip(iobase, 4);
824
825 EnAllInt(iobase, ON);
826 EnTXDMA(iobase, ON);
827 EnRXDMA(iobase, OFF);
828
829 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
830 DMA_TX_MODE);
831
832 SetSendByte(iobase, self->tx_buff.len);
833 RXStart(iobase, OFF);
834 TXStart(iobase, ON);
835
836 dev->trans_start = jiffies;
837 spin_unlock_irqrestore(&self->lock, flags);
838 dev_kfree_skb(skb);
839 return NETDEV_TX_OK;
840 }
841
842 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
843 struct net_device *dev)
844 {
845 struct via_ircc_cb *self;
846 u16 iobase;
847 __u32 speed;
848 unsigned long flags;
849
850 self = netdev_priv(dev);
851 iobase = self->io.fir_base;
852
853 if (self->st_fifo.len)
854 return NETDEV_TX_OK;
855 if (self->chip_id == 0x3076)
856 iodelay(1500);
857 else
858 udelay(1500);
859 netif_stop_queue(dev);
860 speed = irda_get_next_speed(skb);
861 if ((speed != self->io.speed) && (speed != -1)) {
862 if (!skb->len) {
863 via_ircc_change_speed(self, speed);
864 dev->trans_start = jiffies;
865 dev_kfree_skb(skb);
866 return NETDEV_TX_OK;
867 } else
868 self->new_speed = speed;
869 }
870 spin_lock_irqsave(&self->lock, flags);
871 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
872 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
873
874 self->tx_fifo.tail += skb->len;
875 dev->stats.tx_bytes += skb->len;
876 skb_copy_from_linear_data(skb,
877 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
878 self->tx_fifo.len++;
879 self->tx_fifo.free++;
880 //F01 if (self->tx_fifo.len == 1) {
881 via_ircc_dma_xmit(self, iobase);
882 //F01 }
883 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
884 dev->trans_start = jiffies;
885 dev_kfree_skb(skb);
886 spin_unlock_irqrestore(&self->lock, flags);
887 return NETDEV_TX_OK;
888
889 }
890
891 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
892 {
893 EnTXDMA(iobase, OFF);
894 self->io.direction = IO_XMIT;
895 EnPhys(iobase, ON);
896 EnableTX(iobase, ON);
897 EnableRX(iobase, OFF);
898 ResetChip(iobase, 0);
899 ResetChip(iobase, 1);
900 ResetChip(iobase, 2);
901 ResetChip(iobase, 3);
902 ResetChip(iobase, 4);
903 EnAllInt(iobase, ON);
904 EnTXDMA(iobase, ON);
905 EnRXDMA(iobase, OFF);
906 irda_setup_dma(self->io.dma,
907 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
908 self->tx_buff.head) + self->tx_buff_dma,
909 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
910 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
911 __func__, self->tx_fifo.ptr,
912 self->tx_fifo.queue[self->tx_fifo.ptr].len,
913 self->tx_fifo.len);
914
915 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
916 RXStart(iobase, OFF);
917 TXStart(iobase, ON);
918 return 0;
919
920 }
921
922 /*
923 * Function via_ircc_dma_xmit_complete (self)
924 *
925 * The transfer of a frame in finished. This function will only be called
926 * by the interrupt handler
927 *
928 */
929 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
930 {
931 int iobase;
932 int ret = TRUE;
933 u8 Tx_status;
934
935 IRDA_DEBUG(3, "%s()\n", __func__);
936
937 iobase = self->io.fir_base;
938 /* Disable DMA */
939 // DisableDmaChannel(self->io.dma);
940 /* Check for underrun! */
941 /* Clear bit, by writing 1 into it */
942 Tx_status = GetTXStatus(iobase);
943 if (Tx_status & 0x08) {
944 self->netdev->stats.tx_errors++;
945 self->netdev->stats.tx_fifo_errors++;
946 hwreset(self);
947 /* how to clear underrun? */
948 } else {
949 self->netdev->stats.tx_packets++;
950 ResetChip(iobase, 3);
951 ResetChip(iobase, 4);
952 }
953 /* Check if we need to change the speed */
954 if (self->new_speed) {
955 via_ircc_change_speed(self, self->new_speed);
956 self->new_speed = 0;
957 }
958
959 /* Finished with this frame, so prepare for next */
960 if (IsFIROn(iobase)) {
961 if (self->tx_fifo.len) {
962 self->tx_fifo.len--;
963 self->tx_fifo.ptr++;
964 }
965 }
966 IRDA_DEBUG(1,
967 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
968 __func__,
969 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
970 /* F01_S
971 // Any frames to be sent back-to-back?
972 if (self->tx_fifo.len) {
973 // Not finished yet!
974 via_ircc_dma_xmit(self, iobase);
975 ret = FALSE;
976 } else {
977 F01_E*/
978 // Reset Tx FIFO info
979 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
980 self->tx_fifo.tail = self->tx_buff.head;
981 //F01 }
982
983 // Make sure we have room for more frames
984 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
985 // Not busy transmitting anymore
986 // Tell the network layer, that we can accept more frames
987 netif_wake_queue(self->netdev);
988 //F01 }
989 return ret;
990 }
991
992 /*
993 * Function via_ircc_dma_receive (self)
994 *
995 * Set configuration for receive a frame.
996 *
997 */
998 static int via_ircc_dma_receive(struct via_ircc_cb *self)
999 {
1000 int iobase;
1001
1002 iobase = self->io.fir_base;
1003
1004 IRDA_DEBUG(3, "%s()\n", __func__);
1005
1006 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1007 self->tx_fifo.tail = self->tx_buff.head;
1008 self->RxDataReady = 0;
1009 self->io.direction = IO_RECV;
1010 self->rx_buff.data = self->rx_buff.head;
1011 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1012 self->st_fifo.tail = self->st_fifo.head = 0;
1013
1014 EnPhys(iobase, ON);
1015 EnableTX(iobase, OFF);
1016 EnableRX(iobase, ON);
1017
1018 ResetChip(iobase, 0);
1019 ResetChip(iobase, 1);
1020 ResetChip(iobase, 2);
1021 ResetChip(iobase, 3);
1022 ResetChip(iobase, 4);
1023
1024 EnAllInt(iobase, ON);
1025 EnTXDMA(iobase, OFF);
1026 EnRXDMA(iobase, ON);
1027 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1028 self->rx_buff.truesize, DMA_RX_MODE);
1029 TXStart(iobase, OFF);
1030 RXStart(iobase, ON);
1031
1032 return 0;
1033 }
1034
1035 /*
1036 * Function via_ircc_dma_receive_complete (self)
1037 *
1038 * Controller Finished with receiving frames,
1039 * and this routine is call by ISR
1040 *
1041 */
1042 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1043 int iobase)
1044 {
1045 struct st_fifo *st_fifo;
1046 struct sk_buff *skb;
1047 int len, i;
1048 u8 status = 0;
1049
1050 iobase = self->io.fir_base;
1051 st_fifo = &self->st_fifo;
1052
1053 if (self->io.speed < 4000000) { //Speed below FIR
1054 len = GetRecvByte(iobase, self);
1055 skb = dev_alloc_skb(len + 1);
1056 if (skb == NULL)
1057 return FALSE;
1058 // Make sure IP header gets aligned
1059 skb_reserve(skb, 1);
1060 skb_put(skb, len - 2);
1061 if (self->chip_id == 0x3076) {
1062 for (i = 0; i < len - 2; i++)
1063 skb->data[i] = self->rx_buff.data[i * 2];
1064 } else {
1065 if (self->chip_id == 0x3096) {
1066 for (i = 0; i < len - 2; i++)
1067 skb->data[i] =
1068 self->rx_buff.data[i];
1069 }
1070 }
1071 // Move to next frame
1072 self->rx_buff.data += len;
1073 self->netdev->stats.rx_bytes += len;
1074 self->netdev->stats.rx_packets++;
1075 skb->dev = self->netdev;
1076 skb_reset_mac_header(skb);
1077 skb->protocol = htons(ETH_P_IRDA);
1078 netif_rx(skb);
1079 return TRUE;
1080 }
1081
1082 else { //FIR mode
1083 len = GetRecvByte(iobase, self);
1084 if (len == 0)
1085 return TRUE; //interrupt only, data maybe move by RxT
1086 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1087 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1088 __func__, len, RxCurCount(iobase, self),
1089 self->RxLastCount);
1090 hwreset(self);
1091 return FALSE;
1092 }
1093 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1094 __func__,
1095 st_fifo->len, len - 4, RxCurCount(iobase, self));
1096
1097 st_fifo->entries[st_fifo->tail].status = status;
1098 st_fifo->entries[st_fifo->tail].len = len;
1099 st_fifo->pending_bytes += len;
1100 st_fifo->tail++;
1101 st_fifo->len++;
1102 if (st_fifo->tail > MAX_RX_WINDOW)
1103 st_fifo->tail = 0;
1104 self->RxDataReady = 0;
1105
1106 // It maybe have MAX_RX_WINDOW package receive by
1107 // receive_complete before Timer IRQ
1108 /* F01_S
1109 if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1110 RXStart(iobase,ON);
1111 SetTimer(iobase,4);
1112 }
1113 else {
1114 F01_E */
1115 EnableRX(iobase, OFF);
1116 EnRXDMA(iobase, OFF);
1117 RXStart(iobase, OFF);
1118 //F01_S
1119 // Put this entry back in fifo
1120 if (st_fifo->head > MAX_RX_WINDOW)
1121 st_fifo->head = 0;
1122 status = st_fifo->entries[st_fifo->head].status;
1123 len = st_fifo->entries[st_fifo->head].len;
1124 st_fifo->head++;
1125 st_fifo->len--;
1126
1127 skb = dev_alloc_skb(len + 1 - 4);
1128 /*
1129 * if frame size, data ptr, or skb ptr are wrong, then get next
1130 * entry.
1131 */
1132 if ((skb == NULL) || (skb->data == NULL) ||
1133 (self->rx_buff.data == NULL) || (len < 6)) {
1134 self->netdev->stats.rx_dropped++;
1135 kfree_skb(skb);
1136 return TRUE;
1137 }
1138 skb_reserve(skb, 1);
1139 skb_put(skb, len - 4);
1140
1141 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1142 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1143 len - 4, self->rx_buff.data);
1144
1145 // Move to next frame
1146 self->rx_buff.data += len;
1147 self->netdev->stats.rx_bytes += len;
1148 self->netdev->stats.rx_packets++;
1149 skb->dev = self->netdev;
1150 skb_reset_mac_header(skb);
1151 skb->protocol = htons(ETH_P_IRDA);
1152 netif_rx(skb);
1153
1154 //F01_E
1155 } //FIR
1156 return TRUE;
1157
1158 }
1159
1160 /*
1161 * if frame is received , but no INT ,then use this routine to upload frame.
1162 */
1163 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1164 {
1165 struct sk_buff *skb;
1166 int len;
1167 struct st_fifo *st_fifo;
1168 st_fifo = &self->st_fifo;
1169
1170 len = GetRecvByte(iobase, self);
1171
1172 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1173
1174 if ((len - 4) < 2) {
1175 self->netdev->stats.rx_dropped++;
1176 return FALSE;
1177 }
1178
1179 skb = dev_alloc_skb(len + 1);
1180 if (skb == NULL) {
1181 self->netdev->stats.rx_dropped++;
1182 return FALSE;
1183 }
1184 skb_reserve(skb, 1);
1185 skb_put(skb, len - 4 + 1);
1186 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1187 st_fifo->tail++;
1188 st_fifo->len++;
1189 if (st_fifo->tail > MAX_RX_WINDOW)
1190 st_fifo->tail = 0;
1191 // Move to next frame
1192 self->rx_buff.data += len;
1193 self->netdev->stats.rx_bytes += len;
1194 self->netdev->stats.rx_packets++;
1195 skb->dev = self->netdev;
1196 skb_reset_mac_header(skb);
1197 skb->protocol = htons(ETH_P_IRDA);
1198 netif_rx(skb);
1199 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1200 RXStart(iobase, ON);
1201 } else {
1202 EnableRX(iobase, OFF);
1203 EnRXDMA(iobase, OFF);
1204 RXStart(iobase, OFF);
1205 }
1206 return TRUE;
1207 }
1208
1209 /*
1210 * Implement back to back receive , use this routine to upload data.
1211 */
1212
1213 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1214 {
1215 struct st_fifo *st_fifo;
1216 struct sk_buff *skb;
1217 int len;
1218 u8 status;
1219
1220 st_fifo = &self->st_fifo;
1221
1222 if (CkRxRecv(iobase, self)) {
1223 // if still receiving ,then return ,don't upload frame
1224 self->RetryCount = 0;
1225 SetTimer(iobase, 20);
1226 self->RxDataReady++;
1227 return FALSE;
1228 } else
1229 self->RetryCount++;
1230
1231 if ((self->RetryCount >= 1) ||
1232 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1233 (st_fifo->len >= (MAX_RX_WINDOW))) {
1234 while (st_fifo->len > 0) { //upload frame
1235 // Put this entry back in fifo
1236 if (st_fifo->head > MAX_RX_WINDOW)
1237 st_fifo->head = 0;
1238 status = st_fifo->entries[st_fifo->head].status;
1239 len = st_fifo->entries[st_fifo->head].len;
1240 st_fifo->head++;
1241 st_fifo->len--;
1242
1243 skb = dev_alloc_skb(len + 1 - 4);
1244 /*
1245 * if frame size, data ptr, or skb ptr are wrong,
1246 * then get next entry.
1247 */
1248 if ((skb == NULL) || (skb->data == NULL) ||
1249 (self->rx_buff.data == NULL) || (len < 6)) {
1250 self->netdev->stats.rx_dropped++;
1251 continue;
1252 }
1253 skb_reserve(skb, 1);
1254 skb_put(skb, len - 4);
1255 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1256
1257 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1258 len - 4, st_fifo->head);
1259
1260 // Move to next frame
1261 self->rx_buff.data += len;
1262 self->netdev->stats.rx_bytes += len;
1263 self->netdev->stats.rx_packets++;
1264 skb->dev = self->netdev;
1265 skb_reset_mac_header(skb);
1266 skb->protocol = htons(ETH_P_IRDA);
1267 netif_rx(skb);
1268 } //while
1269 self->RetryCount = 0;
1270
1271 IRDA_DEBUG(2,
1272 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1273 __func__,
1274 GetHostStatus(iobase), GetRXStatus(iobase));
1275
1276 /*
1277 * if frame is receive complete at this routine ,then upload
1278 * frame.
1279 */
1280 if ((GetRXStatus(iobase) & 0x10) &&
1281 (RxCurCount(iobase, self) != self->RxLastCount)) {
1282 upload_rxdata(self, iobase);
1283 if (irda_device_txqueue_empty(self->netdev))
1284 via_ircc_dma_receive(self);
1285 }
1286 } // timer detect complete
1287 else
1288 SetTimer(iobase, 4);
1289 return TRUE;
1290
1291 }
1292
1293
1294
1295 /*
1296 * Function via_ircc_interrupt (irq, dev_id)
1297 *
1298 * An interrupt from the chip has arrived. Time to do some work
1299 *
1300 */
1301 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1302 {
1303 struct net_device *dev = dev_id;
1304 struct via_ircc_cb *self = netdev_priv(dev);
1305 int iobase;
1306 u8 iHostIntType, iRxIntType, iTxIntType;
1307
1308 iobase = self->io.fir_base;
1309 spin_lock(&self->lock);
1310 iHostIntType = GetHostStatus(iobase);
1311
1312 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1313 __func__, iHostIntType,
1314 (iHostIntType & 0x40) ? "Timer" : "",
1315 (iHostIntType & 0x20) ? "Tx" : "",
1316 (iHostIntType & 0x10) ? "Rx" : "",
1317 (iHostIntType & 0x0e) >> 1);
1318
1319 if ((iHostIntType & 0x40) != 0) { //Timer Event
1320 self->EventFlag.TimeOut++;
1321 ClearTimerInt(iobase, 1);
1322 if (self->io.direction == IO_XMIT) {
1323 via_ircc_dma_xmit(self, iobase);
1324 }
1325 if (self->io.direction == IO_RECV) {
1326 /*
1327 * frame ready hold too long, must reset.
1328 */
1329 if (self->RxDataReady > 30) {
1330 hwreset(self);
1331 if (irda_device_txqueue_empty(self->netdev)) {
1332 via_ircc_dma_receive(self);
1333 }
1334 } else { // call this to upload frame.
1335 RxTimerHandler(self, iobase);
1336 }
1337 } //RECV
1338 } //Timer Event
1339 if ((iHostIntType & 0x20) != 0) { //Tx Event
1340 iTxIntType = GetTXStatus(iobase);
1341
1342 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1343 __func__, iTxIntType,
1344 (iTxIntType & 0x08) ? "FIFO underr." : "",
1345 (iTxIntType & 0x04) ? "EOM" : "",
1346 (iTxIntType & 0x02) ? "FIFO ready" : "",
1347 (iTxIntType & 0x01) ? "Early EOM" : "");
1348
1349 if (iTxIntType & 0x4) {
1350 self->EventFlag.EOMessage++; // read and will auto clean
1351 if (via_ircc_dma_xmit_complete(self)) {
1352 if (irda_device_txqueue_empty
1353 (self->netdev)) {
1354 via_ircc_dma_receive(self);
1355 }
1356 } else {
1357 self->EventFlag.Unknown++;
1358 }
1359 } //EOP
1360 } //Tx Event
1361 //----------------------------------------
1362 if ((iHostIntType & 0x10) != 0) { //Rx Event
1363 /* Check if DMA has finished */
1364 iRxIntType = GetRXStatus(iobase);
1365
1366 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1367 __func__, iRxIntType,
1368 (iRxIntType & 0x80) ? "PHY err." : "",
1369 (iRxIntType & 0x40) ? "CRC err" : "",
1370 (iRxIntType & 0x20) ? "FIFO overr." : "",
1371 (iRxIntType & 0x10) ? "EOF" : "",
1372 (iRxIntType & 0x08) ? "RxData" : "",
1373 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1374 (iRxIntType & 0x01) ? "SIR bad" : "");
1375 if (!iRxIntType)
1376 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1377
1378 if (iRxIntType & 0x10) {
1379 if (via_ircc_dma_receive_complete(self, iobase)) {
1380 //F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1381 via_ircc_dma_receive(self);
1382 }
1383 } // No ERR
1384 else { //ERR
1385 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1386 __func__, iRxIntType, iHostIntType,
1387 RxCurCount(iobase, self),
1388 self->RxLastCount);
1389
1390 if (iRxIntType & 0x20) { //FIFO OverRun ERR
1391 ResetChip(iobase, 0);
1392 ResetChip(iobase, 1);
1393 } else { //PHY,CRC ERR
1394
1395 if (iRxIntType != 0x08)
1396 hwreset(self); //F01
1397 }
1398 via_ircc_dma_receive(self);
1399 } //ERR
1400
1401 } //Rx Event
1402 spin_unlock(&self->lock);
1403 return IRQ_RETVAL(iHostIntType);
1404 }
1405
1406 static void hwreset(struct via_ircc_cb *self)
1407 {
1408 int iobase;
1409 iobase = self->io.fir_base;
1410
1411 IRDA_DEBUG(3, "%s()\n", __func__);
1412
1413 ResetChip(iobase, 5);
1414 EnableDMA(iobase, OFF);
1415 EnableTX(iobase, OFF);
1416 EnableRX(iobase, OFF);
1417 EnRXDMA(iobase, OFF);
1418 EnTXDMA(iobase, OFF);
1419 RXStart(iobase, OFF);
1420 TXStart(iobase, OFF);
1421 InitCard(iobase);
1422 CommonInit(iobase);
1423 SIRFilter(iobase, ON);
1424 SetSIR(iobase, ON);
1425 CRC16(iobase, ON);
1426 EnTXCRC(iobase, 0);
1427 WriteReg(iobase, I_ST_CT_0, 0x00);
1428 SetBaudRate(iobase, 9600);
1429 SetPulseWidth(iobase, 12);
1430 SetSendPreambleCount(iobase, 0);
1431 WriteReg(iobase, I_ST_CT_0, 0x80);
1432
1433 /* Restore speed. */
1434 via_ircc_change_speed(self, self->io.speed);
1435
1436 self->st_fifo.len = 0;
1437 }
1438
1439 /*
1440 * Function via_ircc_is_receiving (self)
1441 *
1442 * Return TRUE is we are currently receiving a frame
1443 *
1444 */
1445 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1446 {
1447 int status = FALSE;
1448 int iobase;
1449
1450 IRDA_ASSERT(self != NULL, return FALSE;);
1451
1452 iobase = self->io.fir_base;
1453 if (CkRxRecv(iobase, self))
1454 status = TRUE;
1455
1456 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1457
1458 return status;
1459 }
1460
1461
1462 /*
1463 * Function via_ircc_net_open (dev)
1464 *
1465 * Start the device
1466 *
1467 */
1468 static int via_ircc_net_open(struct net_device *dev)
1469 {
1470 struct via_ircc_cb *self;
1471 int iobase;
1472 char hwname[32];
1473
1474 IRDA_DEBUG(3, "%s()\n", __func__);
1475
1476 IRDA_ASSERT(dev != NULL, return -1;);
1477 self = netdev_priv(dev);
1478 dev->stats.rx_packets = 0;
1479 IRDA_ASSERT(self != NULL, return 0;);
1480 iobase = self->io.fir_base;
1481 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1482 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1483 self->io.irq);
1484 return -EAGAIN;
1485 }
1486 /*
1487 * Always allocate the DMA channel after the IRQ, and clean up on
1488 * failure.
1489 */
1490 if (request_dma(self->io.dma, dev->name)) {
1491 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1492 self->io.dma);
1493 free_irq(self->io.irq, dev);
1494 return -EAGAIN;
1495 }
1496 if (self->io.dma2 != self->io.dma) {
1497 if (request_dma(self->io.dma2, dev->name)) {
1498 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1499 driver_name, self->io.dma2);
1500 free_irq(self->io.irq, dev);
1501 free_dma(self->io.dma);
1502 return -EAGAIN;
1503 }
1504 }
1505
1506
1507 /* turn on interrupts */
1508 EnAllInt(iobase, ON);
1509 EnInternalLoop(iobase, OFF);
1510 EnExternalLoop(iobase, OFF);
1511
1512 /* */
1513 via_ircc_dma_receive(self);
1514
1515 /* Ready to play! */
1516 netif_start_queue(dev);
1517
1518 /*
1519 * Open new IrLAP layer instance, now that everything should be
1520 * initialized properly
1521 */
1522 sprintf(hwname, "VIA @ 0x%x", iobase);
1523 self->irlap = irlap_open(dev, &self->qos, hwname);
1524
1525 self->RxLastCount = 0;
1526
1527 return 0;
1528 }
1529
1530 /*
1531 * Function via_ircc_net_close (dev)
1532 *
1533 * Stop the device
1534 *
1535 */
1536 static int via_ircc_net_close(struct net_device *dev)
1537 {
1538 struct via_ircc_cb *self;
1539 int iobase;
1540
1541 IRDA_DEBUG(3, "%s()\n", __func__);
1542
1543 IRDA_ASSERT(dev != NULL, return -1;);
1544 self = netdev_priv(dev);
1545 IRDA_ASSERT(self != NULL, return 0;);
1546
1547 /* Stop device */
1548 netif_stop_queue(dev);
1549 /* Stop and remove instance of IrLAP */
1550 if (self->irlap)
1551 irlap_close(self->irlap);
1552 self->irlap = NULL;
1553 iobase = self->io.fir_base;
1554 EnTXDMA(iobase, OFF);
1555 EnRXDMA(iobase, OFF);
1556 DisableDmaChannel(self->io.dma);
1557
1558 /* Disable interrupts */
1559 EnAllInt(iobase, OFF);
1560 free_irq(self->io.irq, dev);
1561 free_dma(self->io.dma);
1562 if (self->io.dma2 != self->io.dma)
1563 free_dma(self->io.dma2);
1564
1565 return 0;
1566 }
1567
1568 /*
1569 * Function via_ircc_net_ioctl (dev, rq, cmd)
1570 *
1571 * Process IOCTL commands for this device
1572 *
1573 */
1574 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1575 int cmd)
1576 {
1577 struct if_irda_req *irq = (struct if_irda_req *) rq;
1578 struct via_ircc_cb *self;
1579 unsigned long flags;
1580 int ret = 0;
1581
1582 IRDA_ASSERT(dev != NULL, return -1;);
1583 self = netdev_priv(dev);
1584 IRDA_ASSERT(self != NULL, return -1;);
1585 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1586 cmd);
1587 /* Disable interrupts & save flags */
1588 spin_lock_irqsave(&self->lock, flags);
1589 switch (cmd) {
1590 case SIOCSBANDWIDTH: /* Set bandwidth */
1591 if (!capable(CAP_NET_ADMIN)) {
1592 ret = -EPERM;
1593 goto out;
1594 }
1595 via_ircc_change_speed(self, irq->ifr_baudrate);
1596 break;
1597 case SIOCSMEDIABUSY: /* Set media busy */
1598 if (!capable(CAP_NET_ADMIN)) {
1599 ret = -EPERM;
1600 goto out;
1601 }
1602 irda_device_set_media_busy(self->netdev, TRUE);
1603 break;
1604 case SIOCGRECEIVING: /* Check if we are receiving right now */
1605 irq->ifr_receiving = via_ircc_is_receiving(self);
1606 break;
1607 default:
1608 ret = -EOPNOTSUPP;
1609 }
1610 out:
1611 spin_unlock_irqrestore(&self->lock, flags);
1612 return ret;
1613 }
1614
1615 MODULE_AUTHOR("VIA Technologies,inc");
1616 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1617 MODULE_LICENSE("GPL");
1618
1619 module_init(via_ircc_init);
1620 module_exit(via_ircc_cleanup);