v2.4.8 -> v2.4.8.1
[opensuse:kernel.git] / drivers / net / irda / vlsi_ir.c
1 /*********************************************************************
2  *
3  *      vlsi_ir.c:      VLSI82C147 PCI IrDA controller driver for Linux
4  *
5  *      Version:        0.1, Aug 6, 2001
6  *
7  *      Copyright (c) 2001 Martin Diehl
8  *
9  *      This program is free software; you can redistribute it and/or 
10  *      modify it under the terms of the GNU General Public License as 
11  *      published by the Free Software Foundation; either version 2 of 
12  *      the License, or (at your option) any later version.
13  *
14  *      This program is distributed in the hope that it will be useful,
15  *      but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  *      GNU General Public License for more details.
18  *
19  *      You should have received a copy of the GNU General Public License 
20  *      along with this program; if not, write to the Free Software 
21  *      Foundation, Inc., 59 Temple Place, Suite 330, Boston, 
22  *      MA 02111-1307 USA
23  *
24  ********************************************************************/
25
26 #include <linux/module.h>
27  
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/pci.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/delay.h>
35
36 #include <net/irda/irda.h>
37 #include <net/irda/irda_device.h>
38 #include <net/irda/wrapper.h>
39 #include <net/irda/irlap.h>
40
41 #include <net/irda/vlsi_ir.h>
42
43
44 /********************************************************/
45
46
47 MODULE_DESCRIPTION("IrDA SIR/MIR/FIR driver for VLSI 82C147");
48 MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
49
50
51 static /* const */ char drivername[] = "vlsi_ir";
52
53
54 #define PCI_CLASS_IRDA_GENERIC 0x0d00
55
56 static struct pci_device_id vlsi_irda_table [] __devinitdata = { {
57
58         class:          PCI_CLASS_IRDA_GENERIC << 8,
59         vendor:         PCI_VENDOR_ID_VLSI,
60         device:         PCI_DEVICE_ID_VLSI_82C147,
61         }, { /* all zeroes */ }
62 };
63
64 MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
65
66
67 /********************************************************/
68
69
70 MODULE_PARM(clksrc, "i");
71 MODULE_PARM_DESC(clksrc, "clock input source selection");
72
73 /*      clksrc: which clock source to be used
74  *              0: auto - try PLL, fallback to 40MHz XCLK
75  *              1: on-chip 48MHz PLL
76  *              2: external 48MHz XCLK
77  *              3: external 40MHz XCLK (HP OB-800)
78  */
79
80 static int clksrc = 0;                  /* default is 0(auto) */
81
82
83 MODULE_PARM(ringsize, "1-2i");
84 MODULE_PARM_DESC(ringsize, "tx, rx ring descriptor size");
85
86 /*      ringsize: size of the tx and rx descriptor rings
87  *              independent for tx and rx
88  *              specify as ringsize=tx[,rx]
89  *              allowed values: 4, 8, 16, 32, 64
90  */
91
92 static int ringsize[] = {16,16};        /* default is tx=rx=16 */
93
94
95 MODULE_PARM(sirpulse, "i");
96 MODULE_PARM_DESC(sirpulse, "sir pulse width tuning");
97
98 /*      sirpulse: tuning of the sir pulse width within IrPHY 1.3 limits
99  *              0: real short, 1.5us (exception 6us at 2.4kb/s)
100  *              1: nominal 3/16 bittime width
101  */
102
103 static int sirpulse = 1;                /* default is 3/16 bittime */
104
105
106 MODULE_PARM(mtt_bits, "i");
107 MODULE_PARM_DESC(mtt_bits, "IrLAP bitfield representing min-turn-time");
108
109 /*      mtt_bit: encoded min-turn-time values we accept for connections
110  *               according to IrLAP definition (section 6.6.8)
111  *               the widespreadly used HP HDLS-1100 requires 1 msec
112  */
113
114 static int mtt_bits = 0x07;             /* default is 1 ms or more */
115
116
117 /********************************************************/
118
119
120 /* some helpers for operations on ring descriptors */
121
122
123 static inline int rd_is_active(struct ring_descr *rd)
124 {
125         return ((rd->rd_status & RD_STAT_ACTIVE) != 0);
126 }
127
128 static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
129 {
130         /* overlayed - order is important! */
131
132         rd->rd_addr = a;
133         rd->rd_status = s;
134 }
135
136 static inline void rd_set_status(struct ring_descr *rd, u8 s)
137 {
138         rd->rd_status = s;
139 }
140
141 static inline void rd_set_count(struct ring_descr *rd, u16 c)
142 {
143         rd->rd_count = c;
144 }
145
146 static inline u8 rd_get_status(struct ring_descr *rd)
147 {
148         return rd->rd_status;
149 }
150
151 static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
152 {
153         dma_addr_t      a;
154
155         a = (rd->rd_addr & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
156         return a;
157 }
158
159 static inline u16 rd_get_count(struct ring_descr *rd)
160 {
161         return rd->rd_count;
162 }
163
164
165 /* advancing indices pointing into descriptor rings */
166
167 static inline void ring_ptr_inc(unsigned *ptr, unsigned mask)
168 {
169         *ptr = (*ptr + 1) & mask;
170 }
171
172
173 /********************************************************/
174
175
176 #define MAX_PACKET_LEN          2048    /* IrDA MTU */
177
178 /* increase transfer buffer size somewhat so we have enough space left
179  * when packet size increases during wrapping due to XBOFs and escapes.
180  * well, this wastes some memory - anyway, later we will
181  * either map skb's directly or use pci_pool allocator...
182  */
183
184 #define XFER_BUF_SIZE           (MAX_PACKET_LEN+512)
185
186 /* the memory required to hold the 2 descriptor rings */
187
188 #define RING_AREA_SIZE          (2 * MAX_RING_DESCR * sizeof(struct ring_descr))
189
190 /* the memory required to hold the rings' buffer entries */
191
192 #define RING_ENTRY_SIZE         (2 * MAX_RING_DESCR * sizeof(struct ring_entry))
193
194
195 /********************************************************/
196
197 /* just dump all registers */
198
199 static void vlsi_reg_debug(int iobase, const char *s)
200 {
201         int     i;
202
203         mb();
204         printk(KERN_DEBUG "%s: ", s);
205         for (i = 0; i < 0x20; i++)
206                 printk("%02x", (unsigned)inb((iobase+i)));
207         printk("\n");
208 }
209
210 /********************************************************/
211
212
213 static int vlsi_set_clock(struct pci_dev *pdev)
214 {
215         u8      clkctl, lock;
216         int     i, count;
217
218         if (clksrc < 0  ||  clksrc > 3) {
219                 printk(KERN_ERR "%s: invalid clksrc=%d\n", __FUNCTION__, clksrc);
220                 return -1;
221         }
222         if (clksrc < 2) { /* auto or PLL: try PLL */
223                 clkctl = CLKCTL_NO_PD | CLKCTL_CLKSTP;
224                 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
225
226                 /* protocol to detect PLL lock synchronisation */
227                 udelay(500);
228                 count = 0;
229                 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
230                         pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
231                         if (lock&CLKCTL_LOCK) {
232                                 if (++count >= 3)
233                                         break;
234                         }
235                         udelay(50);
236                 }
237                 if (count < 3) {
238                         if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
239                                 printk(KERN_ERR "%s: no PLL or failed to lock!\n",
240                                         __FUNCTION__);
241                                 clkctl = CLKCTL_CLKSTP;
242                                 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
243                                 return -1;
244                         }
245                         else /* was: clksrc=0(auto) */
246                                 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
247
248                         printk(KERN_INFO "%s: PLL not locked, fallback to clksrc=%d\n",
249                                 __FUNCTION__, clksrc);
250                 }
251                 else { /* got succesful PLL lock */
252                         clksrc = 1;
253                         return 0;
254                 }
255         }
256
257         /* we get here if either no PLL detected in auto-mode or
258            the external clock source explicitly specified */
259
260         clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
261         if (clksrc == 3)
262                 clkctl |= CLKCTL_XCKSEL;        
263         pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
264
265         /* no way to test for working XCLK */
266
267         return 0;
268 }
269
270
271 static void vlsi_start_clock(struct pci_dev *pdev)
272 {
273         u8      clkctl;
274
275         printk(KERN_INFO "%s: start clock using %s as input\n", __FUNCTION__,
276                 (clksrc&2)?((clksrc&1)?"40MHz XCLK":"48MHz XCLK"):"48MHz PLL");
277         pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
278         clkctl &= ~CLKCTL_CLKSTP;
279         pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
280 }
281                         
282
283 static void vlsi_stop_clock(struct pci_dev *pdev)
284 {
285         u8      clkctl;
286
287         pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
288         clkctl |= CLKCTL_CLKSTP;
289         pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
290 }
291                         
292
293 static void vlsi_unset_clock(struct pci_dev *pdev)
294 {
295         u8      clkctl;
296
297         pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
298         if (!(clkctl&CLKCTL_CLKSTP))
299                 /* make sure clock is already stopped */
300                 vlsi_stop_clock(pdev);
301
302         clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_NO_PD);
303         pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
304 }
305
306 /********************************************************/
307
308
309 /* ### FIXME: don't use old virt_to_bus() anymore! */
310
311 static int vlsi_alloc_buffers_init(vlsi_irda_dev_t *idev)
312 {
313         void *buf;
314         int i, j;
315
316         idev->ring_buf = kmalloc(RING_ENTRY_SIZE,GFP_KERNEL);
317         if (!idev->ring_buf)
318                 return -ENOMEM;
319         memset(idev->ring_buf, 0, RING_ENTRY_SIZE);
320
321         for (i = MAX_RING_DESCR; i < MAX_RING_DESCR+ringsize[0]; i++) {
322                 buf = kmalloc(XFER_BUF_SIZE, GFP_KERNEL|GFP_DMA);
323                 if (!buf) {
324                         for (j = MAX_RING_DESCR; j < i; j++)
325                                 kfree(idev->ring_buf[j].head);
326                         kfree(idev->ring_buf);
327                         idev->ring_buf = NULL;
328                         return -ENOMEM;
329                 }
330                 idev->ring_buf[i].head = buf;
331                 idev->ring_buf[i].skb = NULL;
332                 rd_set_addr_status(idev->ring_hw+i,virt_to_bus(buf), 0);
333         }
334
335         for (i = 0; i < ringsize[1]; i++) {
336                 buf = kmalloc(XFER_BUF_SIZE, GFP_KERNEL|GFP_DMA);
337                 if (!buf) {
338                         for (j = 0; j < i; j++)
339                                 kfree(idev->ring_buf[j].head);
340                         for (j = MAX_RING_DESCR; j < MAX_RING_DESCR+ringsize[0]; j++)
341                                 kfree(idev->ring_buf[j].head);
342                         kfree(idev->ring_buf);
343                         idev->ring_buf = NULL;
344                         return -ENOMEM;
345                 }
346                 idev->ring_buf[i].head = buf;
347                 idev->ring_buf[i].skb = NULL;
348                 rd_set_addr_status(idev->ring_hw+i,virt_to_bus(buf), RD_STAT_ACTIVE);
349         }
350
351         return 0;
352 }
353
354
355 static int vlsi_init_ring(vlsi_irda_dev_t *idev)
356 {
357
358         idev->tx_mask = MAX_RING_DESCR | (ringsize[0] - 1);
359         idev->rx_mask = ringsize[1] - 1;
360
361         idev->ring_hw = pci_alloc_consistent(idev->pdev,
362                 RING_AREA_SIZE, &idev->busaddr);
363         if (!idev->ring_hw) {
364                 printk(KERN_ERR "%s: insufficient memory for descriptor rings\n",
365                         __FUNCTION__);
366                 return -ENOMEM;
367         }
368 #if 0
369         printk(KERN_DEBUG "%s: (%d,%d)-ring %p / %p\n", __FUNCTION__,
370                 ringsize[0], ringsize[1], idev->ring_hw, 
371                 (void *)(unsigned)idev->busaddr);
372 #endif
373         memset(idev->ring_hw, 0, RING_AREA_SIZE);
374
375         if (vlsi_alloc_buffers_init(idev)) {
376                 
377                 pci_free_consistent(idev->pdev, RING_AREA_SIZE,
378                         idev->ring_hw, idev->busaddr);
379                 printk(KERN_ERR "%s: insufficient memory for ring buffers\n",
380                         __FUNCTION__);
381                 return -1;
382         }
383
384         return 0;
385 }
386
387
388
389 /********************************************************/
390
391
392
393 static int vlsi_set_baud(struct net_device *ndev)
394 {
395         vlsi_irda_dev_t *idev = ndev->priv;
396         unsigned long flags;
397         u16 nphyctl;
398         int iobase; 
399         u16 config;
400         unsigned mode;
401         int     ret;
402         int     baudrate;
403
404         baudrate = idev->new_baud;
405         iobase = ndev->base_addr;
406
407         printk(KERN_DEBUG "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);
408
409         spin_lock_irqsave(&idev->lock, flags);
410
411         outw(0, iobase+VLSI_PIO_IRENABLE);
412
413         if (baudrate == 4000000) {
414                 mode = IFF_FIR;
415                 config = IRCFG_FIR;
416                 nphyctl = PHYCTL_FIR;
417         }
418         else if (baudrate == 1152000) {
419                 mode = IFF_MIR;
420                 config = IRCFG_MIR | IRCFG_CRC16;
421                 nphyctl = PHYCTL_MIR(baudrate);
422         }
423         else {
424                 mode = IFF_SIR;
425                 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
426                 switch(baudrate) {
427                         default:
428                                 printk(KERN_ERR "%s: undefined baudrate %d - fallback to 9600!\n",
429                                         __FUNCTION__, baudrate);
430                                 baudrate = 9600;
431                                 /* fallthru */
432                         case 2400:
433                         case 9600:
434                         case 19200:
435                         case 38400:
436                         case 57600:
437                         case 115200:
438                                 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);
439                                 break;
440                 }
441         }
442
443         config |= IRCFG_MSTR | IRCFG_ENRX;
444
445         outw(config, iobase+VLSI_PIO_IRCFG);
446
447         outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
448         wmb();
449         outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
450                 /* chip fetches IRCFG on next rising edge of its 8MHz clock */
451
452         mb();
453         config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
454
455         if (mode == IFF_FIR)
456                 config ^= IRENABLE_FIR_ON;
457         else if (mode == IFF_MIR)
458                 config ^= (IRENABLE_FIR_ON|IRENABLE_CRC16_ON);
459         else
460                 config ^= IRENABLE_SIR_ON;
461
462
463         if (config != (IRENABLE_IREN|IRENABLE_ENRXST)) {
464                 printk(KERN_ERR "%s: failed to set %s mode!\n", __FUNCTION__,
465                         (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
466                 ret = -1;
467         }
468         else {
469                 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
470                         printk(KERN_ERR "%s: failed to apply baudrate %d\n",
471                                 __FUNCTION__, baudrate);
472                         ret = -1;
473                 }
474                 else {
475                         idev->mode = mode;
476                         idev->baud = baudrate;
477                         idev->new_baud = 0;
478                         ret = 0;
479                 }
480         }
481         spin_unlock_irqrestore(&idev->lock, flags);
482
483         if (ret)
484                 vlsi_reg_debug(iobase,__FUNCTION__);
485
486         return ret;
487 }
488
489
490
491 static int vlsi_init_chip(struct net_device *ndev)
492 {
493         vlsi_irda_dev_t *idev = ndev->priv;
494         u16 ptr;
495         unsigned  iobase;
496
497
498         iobase = ndev->base_addr;
499
500         outw(0, iobase+VLSI_PIO_IRENABLE);
501
502         outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
503
504         /* disable everything, particularly IRCFG_MSTR - which resets the RING_PTR */
505
506         outw(0, iobase+VLSI_PIO_IRCFG);
507         wmb();
508         outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
509
510         mb();
511
512         outw(0, iobase+VLSI_PIO_IRENABLE);
513
514         outw(MAX_PACKET_LEN, iobase+VLSI_PIO_MAXPKT);
515
516         outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
517
518         outw(TX_RX_TO_RINGSIZE(ringsize[0], ringsize[1]), iobase+VLSI_PIO_RINGSIZE);    
519
520
521         ptr = inw(iobase+VLSI_PIO_RINGPTR);
522         idev->rx_put = idev->rx_get = RINGPTR_GET_RX(ptr);
523         idev->tx_put = idev->tx_get = RINGPTR_GET_TX(ptr);
524
525         outw(IRCFG_MSTR, iobase+VLSI_PIO_IRCFG);                /* ready for memory access */
526         wmb();
527         outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
528
529         mb();
530
531         idev->new_baud = 9600;          /* start with IrPHY using 9600(SIR) mode */
532         vlsi_set_baud(ndev);
533
534         outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
535         wmb();
536
537         /* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
538          * basically every received pulse fires an ACTIVITY-INT
539          * leading to >1000 INT's per second instead of few 10
540          */
541
542         outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
543         wmb();
544
545         return 0;
546 }
547
548
549 /**************************************************************/
550
551
552 static int vlsi_rx_interrupt(struct net_device *ndev)
553 {
554         vlsi_irda_dev_t *idev = ndev->priv;
555         int     iobase;
556         int     entry;
557         int     len;
558         u8      status;
559         u16     word;
560         struct sk_buff  *skb;
561         int     crclen;
562
563         iobase = ndev->base_addr;
564
565         entry = idev->rx_get;
566
567         while ( !rd_is_active(idev->ring_hw+idev->rx_get) ) {
568
569                 ring_ptr_inc(&idev->rx_get, idev->rx_mask);
570
571                 while (entry != idev->rx_get) {
572
573                         status = rd_get_status(idev->ring_hw+entry);
574
575                         if (status & RD_STAT_ACTIVE) {
576                                 printk(KERN_CRIT "%s: rx still active!!!\n",
577                                         __FUNCTION__);
578                                 break;
579                         }
580                         if (status & RX_STAT_ERROR) {
581                                 idev->stats.rx_errors++;
582                                 if (status & RX_STAT_OVER)  
583                                         idev->stats.rx_over_errors++;
584                                 if (status & RX_STAT_LENGTH)  
585                                         idev->stats.rx_length_errors++;
586                                 if (status & RX_STAT_PHYERR)  
587                                         idev->stats.rx_frame_errors++;
588                                 if (status & RX_STAT_CRCERR)  
589                                         idev->stats.rx_crc_errors++;
590                         }
591                         else {
592                                 len = rd_get_count(idev->ring_hw+entry);
593                                 crclen = (idev->mode==IFF_FIR) ? 4 : 2;
594                                 if (len < crclen)
595                                         printk(KERN_ERR "%s: strange frame (len=%d)\n",
596                                                 __FUNCTION__, len);
597                                 else
598                                         len -= crclen;          /* remove trailing CRC */
599
600                                 skb = dev_alloc_skb(len+1);
601                                 if (skb) {
602                                         skb->dev = ndev;
603                                         skb_reserve(skb,1);
604                                         memcpy(skb_put(skb,len), idev->ring_buf[entry].head, len);
605                                         idev->stats.rx_packets++;
606                                         idev->stats.rx_bytes += len;
607                                         skb->mac.raw = skb->data;
608                                         skb->protocol = htons(ETH_P_IRDA);
609                                         netif_rx(skb);                          
610                                 }
611                                 else {
612                                         idev->stats.rx_dropped++;
613                                         printk(KERN_ERR "%s: rx packet dropped\n", __FUNCTION__);
614                                 }
615                         }
616                         rd_set_count(idev->ring_hw+entry, 0);
617                         rd_set_status(idev->ring_hw+entry, RD_STAT_ACTIVE);
618                         ring_ptr_inc(&entry, idev->rx_mask);
619                 }
620         }
621         idev->rx_put = idev->rx_get;
622         idev->rx_get = entry;
623
624         word = inw(iobase+VLSI_PIO_IRENABLE);
625         if (!(word & IRENABLE_ENTXST)) {
626
627                 /* only rewrite ENRX, if tx not running!
628                  * rewriting ENRX during tx in progress wouldn't hurt
629                  * but would be racy since we would also have to rewrite
630                  * ENTX then (same register) - which might get disabled meanwhile.
631                  */
632
633                 outw(0, iobase+VLSI_PIO_IRENABLE);
634
635                 word = inw(iobase+VLSI_PIO_IRCFG);
636                 mb();
637                 outw(word | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
638                 wmb();
639                 outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
640         }
641         mb();
642         outw(0, iobase+VLSI_PIO_PROMPT);
643         return 0;
644 }
645
646
647 static int vlsi_tx_interrupt(struct net_device *ndev)
648 {
649         vlsi_irda_dev_t *idev = ndev->priv;
650         int     iobase;
651         int     entry;
652         int     ret;
653         u16     config;
654         u16     status;
655
656         ret = 0;
657         iobase = ndev->base_addr;
658
659         entry = idev->tx_get;
660
661         while ( !rd_is_active(idev->ring_hw+idev->tx_get) ) {
662
663                 if (idev->tx_get == idev->tx_put) { /* tx ring empty */
664                         /* sth more to do here? */
665                         break;
666                 }
667                 ring_ptr_inc(&idev->tx_get, idev->tx_mask);
668                 while (entry != idev->tx_get) {
669                         status = rd_get_status(idev->ring_hw+entry);
670                         if (status & RD_STAT_ACTIVE) {
671                                 printk(KERN_CRIT "%s: tx still active!!!\n",
672                                         __FUNCTION__);
673                                 break;
674                         }
675                         if (status & TX_STAT_UNDRN) {
676                                 idev->stats.tx_errors++;
677                                 idev->stats.tx_fifo_errors++;
678                         }
679                         else {
680                                 idev->stats.tx_packets++;
681                                 idev->stats.tx_bytes += rd_get_count(idev->ring_hw+entry);
682                         }
683                         rd_set_count(idev->ring_hw+entry, 0);
684                         rd_set_status(idev->ring_hw+entry, 0);
685                         ring_ptr_inc(&entry, idev->tx_mask);
686                 }
687         }
688
689         outw(0, iobase+VLSI_PIO_IRENABLE);
690         config = inw(iobase+VLSI_PIO_IRCFG);
691         mb();
692
693         if (idev->tx_get != idev->tx_put) {     /* tx ring not empty */
694                 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
695                 ret = 1;                        /* no speed-change-check */
696         }
697         else
698                 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
699         wmb();
700         outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
701
702         mb();
703
704         outw(0, iobase+VLSI_PIO_PROMPT);
705         wmb();
706
707         idev->tx_get = entry;
708         if (netif_queue_stopped(ndev)) {
709                 netif_wake_queue(ndev);
710                 printk(KERN_DEBUG "%s: queue awoken\n", __FUNCTION__);
711         }
712         return ret;
713 }
714
715
716 static int vlsi_act_interrupt(struct net_device *ndev)
717 {
718         printk(KERN_DEBUG "%s\n", __FUNCTION__);
719         return 0;
720 }
721
722
723 static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
724 {
725         struct net_device *ndev = dev_instance;
726         vlsi_irda_dev_t *idev = ndev->priv;
727         int             iobase;
728         u8              irintr;
729         int             boguscount = 20;
730         int             no_speed_check = 0;
731         unsigned        flags;
732
733
734         iobase = ndev->base_addr;
735         spin_lock_irqsave(&idev->lock,flags);
736         do {
737                 irintr = inb(iobase+VLSI_PIO_IRINTR);
738                 rmb();
739                 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
740                 wmb();
741
742                 if (!(irintr&=IRINTR_INT_MASK))         /* not our INT - probably shared */
743                         break;
744
745 //              vlsi_reg_debug(iobase,__FUNCTION__);
746
747                 if (irintr&IRINTR_RPKTINT)
748                         no_speed_check |= vlsi_rx_interrupt(ndev);
749
750                 if (irintr&IRINTR_TPKTINT)
751                         no_speed_check |= vlsi_tx_interrupt(ndev);
752
753                 if ((irintr&IRINTR_ACTIVITY) && !(irintr^IRINTR_ACTIVITY) )
754                         no_speed_check |= vlsi_act_interrupt(ndev);
755
756                 if (irintr & ~(IRINTR_RPKTINT|IRINTR_TPKTINT|IRINTR_ACTIVITY))
757                         printk(KERN_DEBUG "%s: IRINTR = %02x\n",
758                                 __FUNCTION__, (unsigned)irintr);
759                         
760         } while (--boguscount > 0);
761         spin_unlock_irqrestore(&idev->lock,flags);
762
763         if (boguscount <= 0)
764                 printk(KERN_ERR "%s: too much work in interrupt!\n", __FUNCTION__);
765
766         else if (!no_speed_check) {
767                 if (idev->new_baud)
768                         vlsi_set_baud(ndev);
769         }
770 }
771
772
773 /**************************************************************/
774
775 static int vlsi_open(struct net_device *ndev)
776 {
777         vlsi_irda_dev_t *idev = ndev->priv;
778         struct pci_dev *pdev = idev->pdev;
779         int     err;
780
781         MOD_INC_USE_COUNT;              /* still needed? - we have SET_MODULE_OWNER! */
782
783         if (pci_request_regions(pdev,drivername)) {
784                 printk(KERN_ERR "%s: io resource busy\n", __FUNCTION__);
785                 MOD_DEC_USE_COUNT;
786                 return -EAGAIN;
787         }
788
789         if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ|SA_INTERRUPT,
790                         drivername, ndev)) {
791                 printk(KERN_ERR "%s: couldn't get IRQ: %d\n",
792                         __FUNCTION__, ndev->irq);
793                 pci_release_regions(pdev);
794                 MOD_DEC_USE_COUNT;
795                 return -EAGAIN;
796         }
797         printk(KERN_INFO "%s: got resources for %s - irq=%d / io=%04lx\n",
798                 __FUNCTION__, ndev->name, ndev->irq, ndev->base_addr );
799
800         if (vlsi_set_clock(pdev)) {
801                 printk(KERN_ERR "%s: no valid clock source\n",
802                         __FUNCTION__);
803                 free_irq(ndev->irq,ndev);
804                 pci_release_regions(pdev);
805                 MOD_DEC_USE_COUNT;
806                 return -EIO;
807         }
808
809         vlsi_start_clock(pdev);
810
811         err = vlsi_init_ring(idev);
812         if (err) {
813                 vlsi_unset_clock(pdev);
814                 free_irq(ndev->irq,ndev);
815                 pci_release_regions(pdev);
816                 MOD_DEC_USE_COUNT;
817                 return err;
818         }
819
820         vlsi_init_chip(ndev);
821
822         printk(KERN_INFO "%s: IrPHY setup: %d baud (%s), %s SIR-pulses\n",
823                 __FUNCTION__, idev->baud, 
824                 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"),
825                 (sirpulse)?"3/16 bittime":"short");
826
827         idev->irlap = irlap_open(ndev,&idev->qos);
828
829         netif_start_queue(ndev);
830
831         printk(KERN_INFO "%s: device %s operational using (%d,%d) tx,rx-ring\n",
832                 __FUNCTION__, ndev->name, ringsize[0], ringsize[1]);
833
834         return 0;
835 }
836
837
838 static int vlsi_close(struct net_device *ndev)
839 {
840         vlsi_irda_dev_t *idev = ndev->priv;
841         struct pci_dev *pdev = idev->pdev;
842         int     i;
843         u8      cmd;
844         unsigned iobase;
845
846
847         iobase = ndev->base_addr;
848         netif_stop_queue(ndev);
849
850         if (idev->irlap)
851                 irlap_close(idev->irlap);
852         idev->irlap = NULL;
853
854         outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);  /* w/c pending + disable further IRQ */
855         wmb();
856         outw(0, iobase+VLSI_PIO_IRENABLE);
857         outw(0, iobase+VLSI_PIO_IRCFG);                 /* disable everything */
858         wmb();
859         outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
860         mb();                           /* from now on */
861
862         outw(0, iobase+VLSI_PIO_IRENABLE);
863         wmb();
864
865         vlsi_stop_clock(pdev);
866
867         vlsi_unset_clock(pdev);
868
869         free_irq(ndev->irq,ndev);
870
871         if (idev->ring_buf) {
872                 for (i = 0; i < 2*MAX_RING_DESCR; i++) {
873                         if (idev->ring_buf[i].head)
874                                 kfree(idev->ring_buf[i].head);
875                 }
876                 kfree(idev->ring_buf);
877         }
878
879         if (idev->busaddr)
880                 pci_free_consistent(idev->pdev,RING_AREA_SIZE,idev->ring_hw,idev->busaddr);
881
882         idev->ring_buf = NULL;
883         idev->ring_hw = NULL;
884         idev->busaddr = 0;
885
886         pci_read_config_byte(pdev, PCI_COMMAND, &cmd);
887         cmd &= ~PCI_COMMAND_MASTER;
888         pci_write_config_byte(pdev, PCI_COMMAND, cmd);
889
890         pci_release_regions(pdev);
891
892         printk(KERN_INFO "%s: device %s stopped\n", __FUNCTION__, ndev->name);
893
894         MOD_DEC_USE_COUNT;
895         return 0;
896 }
897
898 static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
899 {
900         vlsi_irda_dev_t *idev = ndev->priv;
901
902         return &idev->stats;
903 }
904
905 static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
906 {
907         vlsi_irda_dev_t *idev = ndev->priv;
908         unsigned long flags;
909         int iobase;
910         u8 status;
911         u16 config;
912         int mtt;
913         int entry;
914         int len, speed;
915
916
917         iobase = ndev->base_addr;
918
919         speed = irda_get_next_speed(skb);
920
921         if (speed != -1  &&  speed != idev->baud) {
922                 idev->new_baud = speed;
923                 if (!skb->len) {
924                         dev_kfree_skb(skb);
925                         vlsi_set_baud(ndev);
926                         return 0;
927                 }
928                 status = TX_STAT_CLRENTX;  /* stop tx-ring after this frame */
929         }
930         else
931                 status = 0;
932
933
934         spin_lock_irqsave(&idev->lock,flags);
935
936         entry = idev->tx_put;
937
938         if (idev->mode == IFF_SIR) {
939                 status |= TX_STAT_DISCRC;
940                 len = async_wrap_skb(skb, idev->ring_buf[entry].head,
941                         XFER_BUF_SIZE);
942         }
943         else {                          /* hw deals with MIR/FIR mode */
944                 len = skb->len;
945                 memcpy(idev->ring_buf[entry].head, skb->data, len);
946         }
947
948         if (len == 0)
949                 printk(KERN_ERR "%s: sending 0-size packet???\n",
950                         __FUNCTION__);
951
952         status |= RD_STAT_ACTIVE;
953
954         rd_set_count(idev->ring_hw+entry, len);
955         rd_set_status(idev->ring_hw+entry, status);
956         ring_ptr_inc(&idev->tx_put, idev->tx_mask);
957
958         dev_kfree_skb(skb);     
959
960 #if 0
961         printk(KERN_DEBUG "%s: dump entry %d: %u %02x %08x\n",
962                 __FUNCTION__, entry,
963                 idev->ring_hw[entry].rd_count,
964                 (unsigned)idev->ring_hw[entry].rd_status,
965                 idev->ring_hw[entry].rd_addr & 0xffffffff);
966         vlsi_reg_debug(iobase,__FUNCTION__);
967 #endif
968
969 /*
970  *      race window due to concurrent controller processing!
971  *
972  *      we may loose ENTX at any time when the controller
973  *      fetches an inactive descr or one with CLR_ENTX set.
974  *      therefore we only rely on the controller telling us
975  *      tx is already stopped because (cannot restart without PROMPT).
976  *      instead we depend on the tx-complete-isr to detect the
977  *      false negatives and retrigger the tx ring.
978  *      that's why we need interrupts disabled till tx has been
979  *      kicked, so the tx-complete-isr was either already finished
980  *      before we've put the new active descriptor on the ring - or
981  *      the isr will be called after the new active descr is on the
982  *      ring _and_ the ring was prompted. Making these two steps
983  *      atomic allows to resolve the race.
984  */
985
986         iobase = ndev->base_addr;
987
988         if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
989
990                 mtt = irda_get_mtt(skb);
991                 if (mtt) {
992                         udelay(mtt);            /* ### FIXME ... */
993                 }
994
995                 outw(0, iobase+VLSI_PIO_IRENABLE);
996
997                 config = inw(iobase+VLSI_PIO_IRCFG);
998                 rmb();
999                 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
1000                 wmb();
1001                 outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
1002
1003                 mb();
1004
1005                 outw(0, iobase+VLSI_PIO_PROMPT);
1006                 wmb();
1007         }
1008
1009         spin_unlock_irqrestore(&idev->lock, flags);
1010
1011         if (idev->tx_put == idev->tx_get) {
1012                 netif_stop_queue(ndev);
1013                 printk(KERN_DEBUG "%s: tx ring full - queue stopped: %d/%d\n",
1014                         __FUNCTION__, idev->tx_put, idev->tx_get);
1015                 entry = idev->tx_get;
1016                 printk(KERN_INFO "%s: dump stalled entry %d: %u %02x %08x\n",
1017                         __FUNCTION__, entry,
1018                         idev->ring_hw[entry].rd_count,
1019                         (unsigned)idev->ring_hw[entry].rd_status,
1020                         idev->ring_hw[entry].rd_addr & 0xffffffff);
1021                 vlsi_reg_debug(iobase,__FUNCTION__);
1022         }
1023
1024 //      vlsi_reg_debug(iobase, __FUNCTION__);
1025
1026         return 0;
1027 }
1028
1029
1030 static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1031 {
1032         vlsi_irda_dev_t *idev = ndev->priv;
1033         struct if_irda_req *irq = (struct if_irda_req *) rq;
1034         unsigned long flags;
1035         u16 fifocnt;
1036         int ret = 0;
1037
1038         spin_lock_irqsave(&idev->lock,flags);
1039         switch (cmd) {
1040                 case SIOCSBANDWIDTH:
1041                         if (!capable(CAP_NET_ADMIN)) {
1042                                 ret = -EPERM;
1043                                 break;
1044                         }
1045                         idev->new_baud = irq->ifr_baudrate;
1046                         break;
1047                 case SIOCSMEDIABUSY:
1048                         if (!capable(CAP_NET_ADMIN)) {
1049                                 ret = -EPERM;
1050                                 break;
1051                         }
1052                         irda_device_set_media_busy(ndev, TRUE);
1053                         break;
1054                 case SIOCGRECEIVING:
1055                         fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1056                         irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
1057                         break;
1058                 default:
1059                         printk(KERN_ERR "%s: notsupp - cmd=%04x\n",
1060                                 __FUNCTION__, cmd);
1061                         ret = -EOPNOTSUPP;
1062         }       
1063         spin_unlock_irqrestore(&idev->lock,flags);
1064         
1065         return ret;
1066 }
1067
1068
1069
1070 int vlsi_irda_init(struct net_device *ndev)
1071 {
1072         vlsi_irda_dev_t *idev = ndev->priv;
1073         struct pci_dev *pdev = idev->pdev;
1074         u8      byte;
1075
1076
1077         SET_MODULE_OWNER(ndev);
1078
1079         ndev->irq = pdev->irq;
1080         ndev->base_addr = pci_resource_start(pdev,0);
1081
1082         /* PCI busmastering - see include file for details! */
1083
1084         if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)) {
1085                 printk(KERN_ERR "%s: aborting due to PCI BM-DMA address limitations\n",
1086                         __FUNCTION__);
1087                 return -1;
1088         }
1089         pci_set_master(pdev);
1090
1091         pdev->dma_mask = DMA_MASK_MSTRPAGE;
1092         pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
1093
1094         /* we don't use the legacy UART, disable its address decoding */
1095
1096         pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
1097         byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
1098         pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
1099
1100
1101         irda_init_max_qos_capabilies(&idev->qos);
1102
1103         /* the VLSI82C147 does not support 576000! */
1104
1105         idev->qos.baud_rate.bits = IR_2400 | IR_9600
1106                 | IR_19200 | IR_38400 | IR_57600 | IR_115200
1107                 | IR_1152000 | (IR_4000000 << 8);
1108
1109         idev->qos.min_turn_time.bits = mtt_bits;
1110
1111         irda_qos_bits_to_value(&idev->qos);
1112
1113         irda_device_setup(ndev);
1114
1115         /* currently no media definitions for SIR/MIR/FIR */
1116
1117         ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
1118         ndev->if_port = IF_PORT_UNKNOWN;
1119  
1120         ndev->open            = vlsi_open;
1121         ndev->stop            = vlsi_close;
1122         ndev->get_stats       = vlsi_get_stats;
1123         ndev->hard_start_xmit = vlsi_hard_start_xmit;
1124         ndev->do_ioctl        = vlsi_ioctl;
1125
1126         return 0;
1127 }       
1128
1129 /**************************************************************/
1130
1131 static int __devinit
1132 vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1133 {
1134         struct net_device       *ndev;
1135         vlsi_irda_dev_t         *idev;
1136         int                     alloc_size;
1137
1138         printk(KERN_INFO "%s: found IrDA PCI controler %s\n", drivername, pdev->name);
1139
1140         if (pci_enable_device(pdev))
1141                 goto out;
1142
1143         if ( !pci_resource_start(pdev,0)
1144              || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
1145                 printk(KERN_ERR "%s: bar 0 invalid", __FUNCTION__);
1146                 goto out;
1147         }
1148
1149         alloc_size = sizeof(*ndev) + sizeof(*idev);
1150
1151         ndev = (struct net_device *) kmalloc (alloc_size, GFP_KERNEL);
1152         if (ndev==NULL) {
1153                 printk(KERN_ERR "%s: Unable to allocate device memory.\n",
1154                         __FUNCTION__);
1155                 goto out;
1156         }
1157
1158         memset(ndev, 0, alloc_size);
1159
1160         idev = (vlsi_irda_dev_t *) (ndev + 1);
1161         ndev->priv = (void *) idev;
1162
1163         spin_lock_init(&idev->lock);
1164         idev->pdev = pdev;
1165         ndev->init = vlsi_irda_init;
1166         strcpy(ndev->name,"irda%d");
1167         if (register_netdev(ndev)) {
1168                 printk(KERN_ERR "%s: register_netdev failed\n",
1169                         __FUNCTION__);
1170                 goto out_freedev;
1171         }
1172         printk(KERN_INFO "%s: registered device %s\n", drivername, ndev->name);
1173
1174         pdev->driver_data = ndev;
1175
1176         return 0;
1177
1178 out_freedev:
1179         kfree(ndev);
1180 out:
1181         pdev->driver_data = NULL;
1182         return -ENODEV;
1183 }
1184
1185 static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
1186 {
1187         struct net_device *ndev = pdev->driver_data;
1188
1189         if (ndev) {
1190                 printk(KERN_INFO "%s: unregister device %s\n",
1191                         drivername, ndev->name);
1192
1193                 unregister_netdev(ndev);
1194                 /* do not free - async completed by unregister_netdev()
1195                  * ndev->destructor called (if present) when going to free
1196                  */
1197
1198         }
1199         else
1200                 printk(KERN_CRIT "%s: lost netdevice?\n", drivername);
1201         pdev->driver_data = NULL;
1202
1203         pci_disable_device(pdev);
1204         printk(KERN_INFO "%s: %s disabled\n", drivername, pdev->name);
1205 }
1206
1207 static int vlsi_irda_suspend(struct pci_dev *pdev, u32 state)
1208 {
1209         printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name);
1210         return 0;
1211 }
1212
1213 static int vlsi_irda_resume(struct pci_dev *pdev)
1214 {
1215         printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name);
1216         return 0;
1217 }
1218
1219 /*********************************************************/
1220
1221 static struct pci_driver vlsi_irda_driver = {
1222         name:           drivername,
1223         id_table:       vlsi_irda_table,
1224         probe:          vlsi_irda_probe,
1225         remove:         vlsi_irda_remove,
1226         suspend:        vlsi_irda_suspend,
1227         resume:         vlsi_irda_resume,
1228 };
1229
1230 static int __init vlsi_mod_init(void)
1231 {
1232         if (clksrc < 0  ||  clksrc > 3) {
1233                 printk(KERN_ERR "%s: invalid clksrc=%d\n", __FUNCTION__, clksrc);
1234                 return -1;
1235         }
1236         if ( ringsize[0]==0  ||  (ringsize[0] & ~(64|32|16|8|4))
1237              ||  ((ringsize[0]-1)&ringsize[0])) {
1238                 printk(KERN_INFO "%s: invalid tx ringsize %d - using default=16\n",
1239                         __FUNCTION__, ringsize[0]);
1240                 ringsize[0] = 16;
1241         } 
1242         if ( ringsize[1]==0  ||  (ringsize[1] & ~(64|32|16|8|4))
1243              ||  ((ringsize[1]-1)&ringsize[1])) {
1244                 printk(KERN_INFO "%s: invalid rx ringsize %d - using default=16\n",
1245                         __FUNCTION__, ringsize[1]);
1246                 ringsize[1] = 16;
1247         }
1248         sirpulse = !!sirpulse;
1249         return pci_module_init(&vlsi_irda_driver);
1250 }
1251
1252 static void __exit vlsi_mod_exit(void)
1253 {
1254         pci_unregister_driver(&vlsi_irda_driver);
1255 }
1256
1257 module_init(vlsi_mod_init);
1258 module_exit(vlsi_mod_exit);
1259