v2.4.8 -> v2.4.8.1
[opensuse:kernel.git] / drivers / ieee1394 / pcilynx.c
1 /*
2  * ti_pcilynx.c - Texas Instruments PCILynx driver
3  * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4  *                         Stephan Linz <linz@mazet.de>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/wait.h>
26 #include <linux/errno.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/fs.h>
31 #include <linux/poll.h>
32 #include <asm/byteorder.h>
33 #include <asm/atomic.h>
34 #include <asm/io.h>
35 #include <asm/uaccess.h>
36
37 #include "ieee1394.h"
38 #include "ieee1394_types.h"
39 #include "hosts.h"
40 #include "ieee1394_core.h"
41 #include "pcilynx.h"
42
43
44 #if MAX_PCILYNX_CARDS > PCILYNX_MINOR_ROM_START
45 #error Max number of cards is bigger than PCILYNX_MINOR_ROM_START - this does not work.
46 #endif
47
48 /* print general (card independent) information */
49 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
50 /* print card specific information */
51 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
52
53 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
54 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
55 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
56 #else
57 #define PRINT_GD(level, fmt, args...) do {} while (0)
58 #define PRINTD(level, card, fmt, args...) do {} while (0)
59 #endif
60
61
62 static struct ti_lynx cards[MAX_PCILYNX_CARDS];
63 static int num_of_cards = 0;
64 static struct hpsb_host_template lynx_template;
65
66 /*
67  * PCL handling functions.
68  */
69
70 static pcl_t alloc_pcl(struct ti_lynx *lynx)
71 {
72         u8 m;
73         int i, j;
74
75         spin_lock(&lynx->lock);
76         /* FIXME - use ffz() to make this readable */
77         for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
78                 m = lynx->pcl_bmap[i];
79                 for (j = 0; j < 8; j++) {
80                         if (m & 1<<j) {
81                                 continue;
82                         }
83                         m |= 1<<j;
84                         lynx->pcl_bmap[i] = m;
85                         spin_unlock(&lynx->lock);
86                         return 8 * i + j;
87                 }
88         }
89         spin_unlock(&lynx->lock);
90
91         return -1;
92 }
93
94
95 #if 0
96 static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
97 {
98         int off, bit;
99
100         off = pclid / 8;
101         bit = pclid % 8;
102
103         if (pclid < 0) {
104                 return;
105         }
106
107         spin_lock(&lynx->lock);
108         if (lynx->pcl_bmap[off] & 1<<bit) {
109                 lynx->pcl_bmap[off] &= ~(1<<bit);
110         } else {
111                 PRINT(KERN_ERR, lynx->id, 
112                       "attempted to free unallocated PCL %d", pclid);
113         }
114         spin_unlock(&lynx->lock);
115 }
116
117 /* functions useful for debugging */        
118 static void pretty_print_pcl(const struct ti_pcl *pcl)
119 {
120         int i;
121
122         printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
123                pcl->next, pcl->user_data, pcl->pcl_status, 
124                pcl->remaining_transfer_count, pcl->next_data_buffer);
125
126         printk("PCL");
127         for (i=0; i<13; i++) {
128                 printk(" c%x:%08x d%x:%08x",
129                        i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
130                 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
131         }
132         printk("\n");
133 }
134         
135 static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
136 {
137         struct ti_pcl pcl;
138
139         get_pcl(lynx, pclid, &pcl);
140         pretty_print_pcl(&pcl);
141 }
142 #endif
143
144
145 static int add_card(struct pci_dev *dev, const struct pci_device_id *devid);
146 static void remove_card(struct pci_dev *dev);
147
148
149
150 /***********************************
151  * IEEE-1394 functionality section *
152  ***********************************/
153
154
155 static int get_phy_reg(struct ti_lynx *lynx, int addr)
156 {
157         int retval;
158         int i = 0;
159
160         unsigned long flags;
161
162         if (addr > 15) {
163                 PRINT(KERN_ERR, lynx->id, __FUNCTION__
164                       ": PHY register address %d out of range", addr);
165                 return -1;
166         }
167
168         spin_lock_irqsave(&lynx->phy_reg_lock, flags);
169
170         reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
171         do {
172                 retval = reg_read(lynx, LINK_PHY);
173
174                 if (i > 10000) {
175                         PRINT(KERN_ERR, lynx->id, __FUNCTION__ 
176                               ": runaway loop, aborting");
177                         retval = -1;
178                         break;
179                 }
180                 i++;
181         } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
182
183         reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
184         spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
185
186         if (retval != -1) {
187                 return retval & 0xff;
188         } else {
189                 return -1;
190         }
191 }
192
193 static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
194 {
195         unsigned long flags;
196
197         if (addr > 15) {
198                 PRINT(KERN_ERR, lynx->id, __FUNCTION__
199                       ": PHY register address %d out of range", addr);
200                 return -1;
201         }
202
203         if (val > 0xff) {
204                 PRINT(KERN_ERR, lynx->id, __FUNCTION__
205                       ": PHY register value %d out of range", val);
206                 return -1;
207         }
208
209         spin_lock_irqsave(&lynx->phy_reg_lock, flags);
210
211         reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
212                   | LINK_PHY_WDATA(val));
213
214         spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
215
216         return 0;
217 }
218
219 static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
220 {
221         int reg;
222
223         if (page > 7) {
224                 PRINT(KERN_ERR, lynx->id, __FUNCTION__
225                       ": PHY page %d out of range", page);
226                 return -1;
227         }
228
229         reg = get_phy_reg(lynx, 7);
230         if (reg != -1) {
231                 reg &= 0x1f;
232                 reg |= (page << 5);
233                 set_phy_reg(lynx, 7, reg);
234                 return 0;
235         } else {
236                 return -1;
237         }
238 }
239
240 #if 0 /* not needed at this time */
241 static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
242 {
243         int reg;
244
245         if (port > 15) {
246                 PRINT(KERN_ERR, lynx->id, __FUNCTION__
247                       ": PHY port %d out of range", port);
248                 return -1;
249         }
250
251         reg = get_phy_reg(lynx, 7);
252         if (reg != -1) {
253                 reg &= 0xf0;
254                 reg |= port;
255                 set_phy_reg(lynx, 7, reg);
256                 return 0;
257         } else {
258                 return -1;
259         }
260 }
261 #endif
262
263 static u32 get_phy_vendorid(struct ti_lynx *lynx)
264 {
265         u32 pvid = 0;
266         sel_phy_reg_page(lynx, 1);
267         pvid |= (get_phy_reg(lynx, 10) << 16);
268         pvid |= (get_phy_reg(lynx, 11) << 8);
269         pvid |= get_phy_reg(lynx, 12);
270         PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
271         return pvid;
272 }
273
274 static u32 get_phy_productid(struct ti_lynx *lynx)
275 {
276         u32 id = 0;
277         sel_phy_reg_page(lynx, 1);
278         id |= (get_phy_reg(lynx, 13) << 16);
279         id |= (get_phy_reg(lynx, 14) << 8);
280         id |= get_phy_reg(lynx, 15);
281         PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
282         return id;
283 }
284
285 static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
286                                      struct hpsb_host *host)
287 {
288         quadlet_t lsid;
289         char phyreg[7];
290         int i;
291
292         phyreg[0] = lynx->phy_reg0;
293         for (i = 1; i < 7; i++) {
294                 phyreg[i] = get_phy_reg(lynx, i);
295         }
296
297         /* FIXME? We assume a TSB21LV03A phy here.  This code doesn't support
298            more than 3 ports on the PHY anyway. */
299
300         lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
301         lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
302         lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
303         lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
304         /* lsid |= 1 << 11; *//* set contender (hack) */
305         lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
306
307         for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
308                 if (phyreg[3 + i] & 0x4) {
309                         lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
310                                 << (6 - i*2);
311                 } else {
312                         lsid |= 1 << (6 - i*2);
313                 }
314         }
315
316         cpu_to_be32s(&lsid);
317         PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
318         return lsid;
319 }
320
321 static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
322 {
323         quadlet_t *q = lynx->rcv_page;
324         int phyid, isroot, size;
325         quadlet_t lsid = 0;
326         int i;
327
328         if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
329
330         size = lynx->selfid_size;
331         phyid = lynx->phy_reg0;
332
333         i = (size > 16 ? 16 : size) / 4 - 1;
334         while (i >= 0) {
335                 cpu_to_be32s(&q[i]);
336                 i--;
337         }
338         
339         if (!lynx->phyic.reg_1394a) {
340                 lsid = generate_own_selfid(lynx, host);
341         }
342
343         isroot = (phyid & 2) != 0;
344         phyid >>= 2;
345         PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
346               phyid, (isroot ? "root" : "not root"));
347         reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
348
349         if (!lynx->phyic.reg_1394a && !size) {
350                 hpsb_selfid_received(host, lsid);
351         }
352
353         while (size > 0) {
354                 struct selfid *sid = (struct selfid *)q;
355
356                 if (!lynx->phyic.reg_1394a && !sid->extended 
357                     && (sid->phy_id == (phyid + 1))) {
358                         hpsb_selfid_received(host, lsid);
359                 }
360
361                 if (q[0] == ~q[1]) {
362                         PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
363                               q[0]);
364                         hpsb_selfid_received(host, q[0]);
365                 } else {
366                         PRINT(KERN_INFO, lynx->id,
367                               "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
368                 }
369                 q += 2;
370                 size -= 8;
371         }
372
373         if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
374                 hpsb_selfid_received(host, lsid);
375         }
376
377         hpsb_selfid_complete(host, phyid, isroot);
378
379         if (host->in_bus_reset) return;
380
381         if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER);
382         reg_set_bits(lynx, LINK_CONTROL,
383                      LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
384                      | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
385 }
386
387
388
389 /* This must be called with the respective queue_lock held. */
390 static void send_next(struct ti_lynx *lynx, int what)
391 {
392         struct ti_pcl pcl;
393         struct lynx_send_data *d;
394         struct hpsb_packet *packet;
395
396         d = (what == iso ? &lynx->iso_send : &lynx->async);
397         packet = d->queue;
398
399         d->header_dma = pci_map_single(lynx->dev, packet->header,
400                                        packet->header_size, PCI_DMA_TODEVICE);
401         if (packet->data_size) {
402                 d->data_dma = pci_map_single(lynx->dev, packet->data,
403                                              packet->data_size,
404                                              PCI_DMA_TODEVICE);
405         } else {
406                 d->data_dma = 0;
407         }
408
409         pcl.next = PCL_NEXT_INVALID;
410         pcl.async_error_next = PCL_NEXT_INVALID;
411 #ifdef __BIG_ENDIAN
412         pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
413 #else
414         pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size 
415                 | PCL_BIGENDIAN;
416 #endif
417         pcl.buffer[0].pointer = d->header_dma;
418         pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
419         pcl.buffer[1].pointer = d->data_dma;
420
421         switch (packet->type) {
422         case async:
423                 pcl.buffer[0].control |= PCL_CMD_XMT;
424                 break;
425         case iso:
426                 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
427                 break;
428         case raw:
429                 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
430                 break;
431         }                
432
433         if (!packet->data_be) {
434                 pcl.buffer[1].control |= PCL_BIGENDIAN;
435         }
436
437         put_pcl(lynx, d->pcl, &pcl);
438         run_pcl(lynx, d->pcl_start, d->channel);
439 }
440
441
442 #if 0
443 static int lynx_detect(struct hpsb_host_template *tmpl)
444 {
445         struct hpsb_host *host;
446         int i;
447
448         init_driver();
449
450         for (i = 0; i < num_of_cards; i++) {
451                 host = hpsb_get_host(tmpl, 0);
452                 if (host == NULL) {
453                         /* simply don't init more after out of mem */
454                         return i;
455                 }
456                 host->hostdata = &cards[i];
457                 cards[i].host = host;
458         }
459
460         return num_of_cards;
461 }
462 #endif
463
464 static int lynx_initialize(struct hpsb_host *host)
465 {
466         struct ti_lynx *lynx = host->hostdata;
467         struct ti_pcl pcl;
468         int i;
469         u32 *pcli;
470
471         lynx->selfid_size = -1;
472         lynx->phy_reg0 = -1;
473
474         lynx->async.queue = NULL;
475         
476         pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
477         put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
478         
479         pcl.next = PCL_NEXT_INVALID;
480         pcl.async_error_next = PCL_NEXT_INVALID;
481 #ifdef __BIG_ENDIAN
482         pcl.buffer[0].control = PCL_CMD_RCV | 16;
483         pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
484 #else
485         pcl.buffer[0].control = PCL_CMD_RCV | PCL_BIGENDIAN | 16;
486         pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
487 #endif
488         pcl.buffer[0].pointer = lynx->rcv_page_dma;
489         pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
490         put_pcl(lynx, lynx->rcv_pcl, &pcl);
491         
492         pcl.next = pcl_bus(lynx, lynx->async.pcl);
493         pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
494         put_pcl(lynx, lynx->async.pcl_start, &pcl);
495
496         pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
497         pcl.async_error_next = PCL_NEXT_INVALID;
498         put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
499
500         pcl.next = PCL_NEXT_INVALID;
501         pcl.async_error_next = PCL_NEXT_INVALID;
502         pcl.buffer[0].control = PCL_CMD_RCV | 4;
503 #ifndef __BIG_ENDIAN
504         pcl.buffer[0].control |= PCL_BIGENDIAN;
505 #endif
506         pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
507
508         for (i = 0; i < NUM_ISORCV_PCL; i++) {
509                 int page = i / ISORCV_PER_PAGE;
510                 int sec = i % ISORCV_PER_PAGE;
511
512                 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page] 
513                         + sec * MAX_ISORCV_SIZE;
514                 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
515                 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
516         }
517
518         pcli = (u32 *)&pcl;
519         for (i = 0; i < NUM_ISORCV_PCL; i++) {
520                 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
521         }
522         put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
523
524         /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
525         reg_write(lynx, FIFO_SIZES, 0x003030a0);
526         /* 20 byte threshold before triggering PCI transfer */
527         reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
528         /* threshold on both send FIFOs before transmitting:
529            FIFO size - cache line size - 1 */
530         i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
531         i = 0x30 - i - 1;
532         reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
533         
534         reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
535
536         reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
537                   | LINK_INT_PHY_REG_RCVD  | LINK_INT_PHY_BUSRESET
538                   | LINK_INT_ISO_STUCK     | LINK_INT_ASYNC_STUCK 
539                   | LINK_INT_SENT_REJECT   | LINK_INT_TX_INVALID_TC
540                   | LINK_INT_GRF_OVERFLOW  | LINK_INT_ITF_UNDERFLOW
541                   | LINK_INT_ATF_UNDERFLOW);
542         
543         reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
544         reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
545         reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
546         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
547                   DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
548                   | DMA_WORD1_CMP_MATCH_EXACT    | DMA_WORD1_CMP_MATCH_BUS_BCAST
549                   | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
550
551         run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
552
553         reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
554         reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
555         reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
556         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
557
558         run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
559
560         reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
561                   | LINK_CONTROL_TX_ISO_EN   | LINK_CONTROL_RX_ISO_EN
562                   | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
563                   | LINK_CONTROL_RESET_TX    | LINK_CONTROL_RESET_RX);
564
565         if (!lynx->phyic.reg_1394a) {
566                 /* attempt to enable contender bit -FIXME- would this work
567                  * elsewhere? */
568                 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
569                 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1); 
570         } else {
571                 /* set the contender bit in the extended PHY register
572                  * set. (Should check that bis 0,1,2 (=0xE0) is set
573                  * in register 2?)
574                  */
575                 i = get_phy_reg(lynx, 4);
576                 if (i != -1) set_phy_reg(lynx, 4, i | 0x40);
577         }
578
579         return 1;
580 }
581
582 static void lynx_release(struct hpsb_host *host)
583 {
584         struct ti_lynx *lynx;
585         
586         if (host != NULL) {
587                 lynx = host->hostdata;
588                 remove_card(lynx->dev);
589         } else {
590 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
591                 unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
592 #endif
593         }
594 }
595
596 static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
597 {
598         struct ti_lynx *lynx = host->hostdata;
599         struct lynx_send_data *d;
600         unsigned long flags;
601
602         if (packet->data_size >= 4096) {
603                 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
604                       packet->data_size);
605                 return 0;
606         }
607
608         switch (packet->type) {
609         case async:
610         case raw:
611                 d = &lynx->async;
612                 break;
613         case iso:
614                 d = &lynx->iso_send;
615                 break;
616         default:
617                 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
618                       packet->type);
619                 return 0;
620         }
621
622         packet->xnext = NULL;
623         if (packet->tcode == TCODE_WRITEQ
624             || packet->tcode == TCODE_READQ_RESPONSE) {
625                 cpu_to_be32s(&packet->header[3]);
626         }
627
628         spin_lock_irqsave(&d->queue_lock, flags);
629
630         if (d->queue == NULL) {
631                 d->queue = packet;
632                 d->queue_last = packet;
633                 send_next(lynx, packet->type);
634         } else {
635                 d->queue_last->xnext = packet;
636                 d->queue_last = packet;
637         }
638
639         spin_unlock_irqrestore(&d->queue_lock, flags);
640
641         return 1;
642 }
643
644 static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
645 {
646         struct ti_lynx *lynx = host->hostdata;
647         int retval = 0;
648         struct hpsb_packet *packet, *lastpacket;
649         unsigned long flags;
650
651         switch (cmd) {
652         case RESET_BUS:
653                 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
654                         retval = 0;
655                         break;
656                 }
657
658                 if (arg) {
659                         arg = 3 << 6;
660                 } else {
661                         arg = 1 << 6;
662                 }
663
664                 retval = get_phy_reg(lynx, 1);
665                 arg |= (retval == -1 ? 63 : retval);
666                 retval = 0;
667
668                 PRINT(KERN_INFO, lynx->id, "resetting bus on request%s",
669                       (host->attempt_root ? " and attempting to become root"
670                        : ""));
671
672                 lynx->selfid_size = -1;
673                 lynx->phy_reg0 = -1;
674                 set_phy_reg(lynx, 1, arg);
675                 break;
676
677         case GET_CYCLE_COUNTER:
678                 retval = reg_read(lynx, CYCLE_TIMER);
679                 break;
680                 
681         case SET_CYCLE_COUNTER:
682                 reg_write(lynx, CYCLE_TIMER, arg);
683                 break;
684
685         case SET_BUS_ID:
686                 reg_write(lynx, LINK_ID, 
687                           (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
688                 break;
689                 
690         case ACT_CYCLE_MASTER:
691                 if (arg) {
692                         reg_set_bits(lynx, LINK_CONTROL,
693                                      LINK_CONTROL_CYCMASTER);
694                 } else {
695                         reg_clear_bits(lynx, LINK_CONTROL,
696                                        LINK_CONTROL_CYCMASTER);
697                 }
698                 break;
699
700         case CANCEL_REQUESTS:
701                 spin_lock_irqsave(&lynx->async.queue_lock, flags);
702
703                 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
704                 packet = lynx->async.queue;
705                 lynx->async.queue = NULL;
706
707                 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
708
709                 while (packet != NULL) {
710                         lastpacket = packet;
711                         packet = packet->xnext;
712                         hpsb_packet_sent(host, lastpacket, ACKX_ABORTED);
713                 }
714
715                 break;
716
717         case MODIFY_USAGE:
718                 if (arg) {
719                         MOD_INC_USE_COUNT;
720                 } else {
721                         MOD_DEC_USE_COUNT;
722                 }
723                 break;
724
725         case ISO_LISTEN_CHANNEL:
726                 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
727                 
728                 if (lynx->iso_rcv.chan_count++ == 0) {
729                         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
730                                   DMA_WORD1_CMP_ENABLE_MASTER);
731                 }
732
733                 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
734                 break;
735
736         case ISO_UNLISTEN_CHANNEL:
737                 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
738
739                 if (--lynx->iso_rcv.chan_count == 0) {
740                         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
741                                   0);
742                 }
743
744                 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
745                 break;
746
747         default:
748                 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
749                 retval = -1;
750         }
751
752         return retval;
753 }
754
755
756 /***************************************
757  * IEEE-1394 functionality section END *
758  ***************************************/
759
760 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
761 /* VFS functions for local bus / aux device access.  Access to those
762  * is implemented as a character device instead of block devices
763  * because buffers are not wanted for this.  Therefore llseek (from
764  * VFS) can be used for these char devices with obvious effects.
765  */
766 static int mem_open(struct inode*, struct file*);
767 static int mem_release(struct inode*, struct file*);
768 static unsigned int aux_poll(struct file*, struct poll_table_struct*);
769 static loff_t mem_llseek(struct file*, loff_t, int);
770 static ssize_t mem_read (struct file*, char*, size_t, loff_t*);
771 static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
772
773
774 static struct file_operations aux_ops = {
775         OWNER_THIS_MODULE
776         read:           mem_read,
777         write:          mem_write,
778         poll:           aux_poll,
779         llseek:         mem_llseek,
780         open:           mem_open,
781         release:        mem_release,
782 };
783
784
785 static void aux_setup_pcls(struct ti_lynx *lynx)
786 {
787         struct ti_pcl pcl;
788
789         pcl.next = PCL_NEXT_INVALID;
790         pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl);
791         put_pcl(lynx, lynx->dmem_pcl, &pcl);
792 }
793
794 static int mem_open(struct inode *inode, struct file *file)
795 {
796         int cid = MINOR(inode->i_rdev);
797         enum { t_rom, t_aux, t_ram } type;
798         struct memdata *md;
799         
800         V22_COMPAT_MOD_INC_USE_COUNT;
801
802         if (cid < PCILYNX_MINOR_AUX_START) {
803                 /* just for completeness */
804                 V22_COMPAT_MOD_DEC_USE_COUNT;
805                 return -ENXIO;
806         } else if (cid < PCILYNX_MINOR_ROM_START) {
807                 cid -= PCILYNX_MINOR_AUX_START;
808                 if (cid >= num_of_cards || !cards[cid].aux_port) {
809                         V22_COMPAT_MOD_DEC_USE_COUNT;
810                         return -ENXIO;
811                 }
812                 type = t_aux;
813         } else if (cid < PCILYNX_MINOR_RAM_START) {
814                 cid -= PCILYNX_MINOR_ROM_START;
815                 if (cid >= num_of_cards || !cards[cid].local_rom) {
816                         V22_COMPAT_MOD_DEC_USE_COUNT;
817                         return -ENXIO;
818                 }
819                 type = t_rom;
820         } else {
821                 /* WARNING: Know what you are doing when opening RAM.
822                  * It is currently used inside the driver! */
823                 cid -= PCILYNX_MINOR_RAM_START;
824                 if (cid >= num_of_cards || !cards[cid].local_ram) {
825                         V22_COMPAT_MOD_DEC_USE_COUNT;
826                         return -ENXIO;
827                 }
828                 type = t_ram;
829         }
830
831         md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
832         if (md == NULL) {
833                 V22_COMPAT_MOD_DEC_USE_COUNT;
834                 return -ENOMEM;
835         }
836
837         md->lynx = &cards[cid];
838         md->cid = cid;
839
840         switch (type) {
841         case t_rom:
842                 md->type = rom;
843                 break;
844         case t_ram:
845                 md->type = ram;
846                 break;
847         case t_aux:
848                 atomic_set(&md->aux_intr_last_seen,
849                            atomic_read(&cards[cid].aux_intr_seen));
850                 md->type = aux;
851                 break;
852         }
853
854         file->private_data = md;
855
856         return 0;
857 }
858
859 static int mem_release(struct inode *inode, struct file *file)
860 {
861         struct memdata *md = (struct memdata *)file->private_data;
862
863         kfree(md);
864
865         V22_COMPAT_MOD_DEC_USE_COUNT;
866         return 0;
867 }
868
869 static unsigned int aux_poll(struct file *file, poll_table *pt)
870 {
871         struct memdata *md = (struct memdata *)file->private_data;
872         int cid = md->cid;
873         unsigned int mask;
874
875         /* reading and writing is always allowed */
876         mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
877
878         if (md->type == aux) {
879                 poll_wait(file, &cards[cid].aux_intr_wait, pt);
880
881                 if (atomic_read(&md->aux_intr_last_seen)
882                     != atomic_read(&cards[cid].aux_intr_seen)) {
883                         mask |= POLLPRI;
884                         atomic_inc(&md->aux_intr_last_seen);
885                 }
886         }
887
888         return mask;
889 }
890
891 loff_t mem_llseek(struct file *file, loff_t offs, int orig)
892 {
893         loff_t newoffs;
894
895         switch (orig) {
896         case 0:
897                 newoffs = offs;
898                 break;
899         case 1:
900                 newoffs = offs + file->f_pos;
901                 break;
902         case 2:
903                 newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
904                 break;
905         default:
906                 return -EINVAL;
907         }
908
909         if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) return -EINVAL;
910
911         file->f_pos = newoffs;
912         return newoffs;
913 }
914
915 /* 
916  * do not DMA if count is too small because this will have a serious impact 
917  * on performance - the value 2400 was found by experiment and may not work
918  * everywhere as good as here - use mem_mindma option for modules to change 
919  */
920 short mem_mindma = 2400;
921 MODULE_PARM(mem_mindma, "h");
922
923 static ssize_t mem_dmaread(struct memdata *md, u32 physbuf, ssize_t count,
924                            int offset)
925 {
926         pcltmp_t pcltmp;
927         struct ti_pcl *pcl;
928         size_t retval;
929         int i;
930         DECLARE_WAITQUEUE(wait, current);
931
932         count &= ~3;
933         count = MIN(count, 53196);
934         retval = count;
935
936         if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
937             & DMA_CHAN_CTRL_BUSY) {
938                 PRINT(KERN_WARNING, md->lynx->id, "DMA ALREADY ACTIVE!");
939         }
940
941         reg_write(md->lynx, LBUS_ADDR, md->type | offset);
942
943         pcl = edit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
944         pcl->buffer[0].control = PCL_CMD_LBUS_TO_PCI | MIN(count, 4092);
945         pcl->buffer[0].pointer = physbuf;
946         count -= 4092;
947
948         i = 0;
949         while (count > 0) {
950                 i++;
951                 pcl->buffer[i].control = MIN(count, 4092);
952                 pcl->buffer[i].pointer = physbuf + i * 4092;
953                 count -= 4092;
954         }
955         pcl->buffer[i].control |= PCL_LAST_BUFF;
956         commit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
957
958         set_current_state(TASK_INTERRUPTIBLE);
959         add_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
960         run_sub_pcl(md->lynx, md->lynx->dmem_pcl, 2, CHANNEL_LOCALBUS);
961
962         schedule();
963         while (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
964                & DMA_CHAN_CTRL_BUSY) {
965                 if (signal_pending(current)) {
966                         retval = -EINTR;
967                         break;
968                 }
969                 schedule();
970         }
971
972         reg_write(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS), 0);
973         remove_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
974
975         if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
976             & DMA_CHAN_CTRL_BUSY) {
977                 PRINT(KERN_ERR, md->lynx->id, "DMA STILL ACTIVE!");
978         }
979
980         return retval;
981 }
982
983 static ssize_t mem_read(struct file *file, char *buffer, size_t count,
984                         loff_t *offset)
985 {
986         struct memdata *md = (struct memdata *)file->private_data;
987         ssize_t bcount;
988         size_t alignfix;
989         int off = (int)*offset; /* avoid useless 64bit-arithmetic */
990         ssize_t retval;
991         void *membase;
992
993         if ((off + count) > PCILYNX_MAX_MEMORY + 1) {
994                 count = PCILYNX_MAX_MEMORY + 1 - off;
995         }
996         if (count == 0) {
997                 return 0;
998         }
999
1000
1001         switch (md->type) {
1002         case rom:
1003                 membase = md->lynx->local_rom;
1004                 break;
1005         case ram:
1006                 membase = md->lynx->local_ram;
1007                 break;
1008         case aux:
1009                 membase = md->lynx->aux_port;
1010                 break;
1011         default:
1012                 panic("pcilynx%d: unsupported md->type %d in " __FUNCTION__,
1013                       md->lynx->id, md->type);
1014         }
1015
1016         down(&md->lynx->mem_dma_mutex);
1017
1018         if (count < mem_mindma) {
1019                 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off, count);
1020                 goto out;
1021         }
1022
1023         bcount = count;
1024         alignfix = 4 - (off % 4);
1025         if (alignfix != 4) {
1026                 if (bcount < alignfix) {
1027                         alignfix = bcount;
1028                 }
1029                 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off,
1030                               alignfix);
1031                 if (bcount == alignfix) {
1032                         goto out;
1033                 }
1034                 bcount -= alignfix;
1035                 off += alignfix;
1036         }
1037
1038         while (bcount >= 4) {
1039                 retval = mem_dmaread(md, md->lynx->mem_dma_buffer_dma
1040                                      + count - bcount, bcount, off);
1041                 if (retval < 0) return retval;
1042
1043                 bcount -= retval;
1044                 off += retval;
1045         }
1046
1047         if (bcount) {
1048                 memcpy_fromio(md->lynx->mem_dma_buffer + count - bcount,
1049                               membase+off, bcount);
1050         }
1051
1052  out:
1053         retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
1054         up(&md->lynx->mem_dma_mutex);
1055
1056         if (retval < 0) return retval;
1057         *offset += count;
1058         return count;
1059 }
1060
1061
1062 static ssize_t mem_write(struct file *file, const char *buffer, size_t count, 
1063                          loff_t *offset)
1064 {
1065         struct memdata *md = (struct memdata *)file->private_data;
1066
1067         if (((*offset) + count) > PCILYNX_MAX_MEMORY+1) {
1068                 count = PCILYNX_MAX_MEMORY+1 - *offset;
1069         }
1070         if (count == 0 || *offset > PCILYNX_MAX_MEMORY) {
1071                 return -ENOSPC;
1072         }
1073
1074         /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
1075         switch (md->type) {
1076         case aux:
1077                 copy_from_user(md->lynx->aux_port+(*offset), buffer, count);
1078                 break;
1079         case ram:
1080                 copy_from_user(md->lynx->local_ram+(*offset), buffer, count);
1081                 break;
1082         case rom:
1083                 /* the ROM may be writeable */
1084                 copy_from_user(md->lynx->local_rom+(*offset), buffer, count);
1085                 break;
1086         }
1087
1088         file->f_pos += count;
1089         return count;
1090 }
1091 #endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
1092
1093
1094 /********************************************************
1095  * Global stuff (interrupt handler, init/shutdown code) *
1096  ********************************************************/
1097
1098
1099 static void lynx_irq_handler(int irq, void *dev_id,
1100                              struct pt_regs *regs_are_unused)
1101 {
1102         struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
1103         struct hpsb_host *host = lynx->host;
1104         u32 intmask;
1105         u32 linkint;
1106
1107         linkint = reg_read(lynx, LINK_INT_STATUS);
1108         intmask = reg_read(lynx, PCI_INT_STATUS);
1109
1110         PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
1111                linkint);
1112
1113         if (!(intmask & PCI_INT_INT_PEND)) return;
1114
1115         reg_write(lynx, LINK_INT_STATUS, linkint);
1116         reg_write(lynx, PCI_INT_STATUS, intmask);
1117
1118 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1119         if (intmask & PCI_INT_AUX_INT) {
1120                 atomic_inc(&lynx->aux_intr_seen);
1121                 wake_up_interruptible(&lynx->aux_intr_wait);
1122         }
1123
1124         if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) {
1125                 wake_up_interruptible(&lynx->mem_dma_intr_wait);
1126         }
1127 #endif
1128
1129
1130         if (intmask & PCI_INT_1394) {
1131                 if (linkint & LINK_INT_PHY_TIMEOUT) {
1132                         PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
1133                 }
1134                 if (linkint & LINK_INT_PHY_BUSRESET) {
1135                         PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
1136                         lynx->selfid_size = -1;
1137                         lynx->phy_reg0 = -1;
1138                         if (!host->in_bus_reset)
1139                                 hpsb_bus_reset(host);
1140                 }
1141                 if (linkint & LINK_INT_PHY_REG_RCVD) {
1142                         u32 reg;
1143
1144                         spin_lock(&lynx->phy_reg_lock);
1145                         reg = reg_read(lynx, LINK_PHY);
1146                         spin_unlock(&lynx->phy_reg_lock);
1147
1148                         if (!host->in_bus_reset) {
1149                                 PRINT(KERN_INFO, lynx->id,
1150                                       "phy reg received without reset");
1151                         } else if (reg & 0xf00) {
1152                                 PRINT(KERN_INFO, lynx->id,
1153                                       "unsolicited phy reg %d received",
1154                                       (reg >> 8) & 0xf);
1155                         } else {
1156                                 lynx->phy_reg0 = reg & 0xff;
1157                                 handle_selfid(lynx, host);
1158                         }
1159                 }
1160                 if (linkint & LINK_INT_ISO_STUCK) {
1161                         PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
1162                 }
1163                 if (linkint & LINK_INT_ASYNC_STUCK) {
1164                         PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
1165                 }
1166                 if (linkint & LINK_INT_SENT_REJECT) {
1167                         PRINT(KERN_INFO, lynx->id, "sent reject");
1168                 }
1169                 if (linkint & LINK_INT_TX_INVALID_TC) {
1170                         PRINT(KERN_INFO, lynx->id, "invalid transaction code");
1171                 }
1172                 if (linkint & LINK_INT_GRF_OVERFLOW) {
1173                         /* flush FIFO if overflow happens during reset */
1174                         if (host->in_bus_reset)
1175                                 reg_write(lynx, FIFO_CONTROL,
1176                                           FIFO_CONTROL_GRF_FLUSH);
1177                         PRINT(KERN_INFO, lynx->id, "GRF overflow");
1178                 }
1179                 if (linkint & LINK_INT_ITF_UNDERFLOW) {
1180                         PRINT(KERN_INFO, lynx->id, "ITF underflow");
1181                 }
1182                 if (linkint & LINK_INT_ATF_UNDERFLOW) {
1183                         PRINT(KERN_INFO, lynx->id, "ATF underflow");
1184                 }
1185         }
1186
1187         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
1188                 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
1189
1190                 spin_lock(&lynx->iso_rcv.lock);
1191
1192                 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
1193                         reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
1194
1195                 lynx->iso_rcv.used++;
1196                 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
1197
1198                 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
1199                     || !lynx->iso_rcv.chan_count) {
1200                         PRINTD(KERN_DEBUG, lynx->id, "stopped");
1201                         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1202                 }
1203
1204                 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
1205                             CHANNEL_ISO_RCV);
1206
1207                 spin_unlock(&lynx->iso_rcv.lock);
1208
1209                 tasklet_schedule(&lynx->iso_rcv.tq);
1210         }
1211
1212         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
1213                 u32 ack;
1214                 struct hpsb_packet *packet;
1215                 
1216                 spin_lock(&lynx->async.queue_lock);
1217
1218                 ack = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_SEND));
1219                 packet = lynx->async.queue;
1220                 lynx->async.queue = packet->xnext;
1221
1222                 pci_unmap_single(lynx->dev, lynx->async.header_dma,
1223                                  packet->header_size, PCI_DMA_TODEVICE);
1224                 if (packet->data_size) {
1225                         pci_unmap_single(lynx->dev, lynx->async.data_dma,
1226                                          packet->data_size, PCI_DMA_TODEVICE);
1227                 }
1228
1229                 if (lynx->async.queue != NULL) {
1230                         send_next(lynx, async);
1231                 }
1232
1233                 spin_unlock(&lynx->async.queue_lock);
1234
1235                 if (ack & DMA_CHAN_STAT_SPECIALACK) {
1236                         ack = (ack >> 15) & 0xf;
1237                         PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1238                         ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1239                 } else {
1240                         ack = (ack >> 15) & 0xf;
1241                 }
1242                 
1243                 hpsb_packet_sent(host, packet, ack);
1244         }
1245
1246         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
1247                 struct hpsb_packet *packet;
1248
1249                 spin_lock(&lynx->iso_send.queue_lock);
1250
1251                 packet = lynx->iso_send.queue;
1252                 lynx->iso_send.queue = packet->xnext;
1253
1254                 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
1255                                  packet->header_size, PCI_DMA_TODEVICE);
1256                 if (packet->data_size) {
1257                         pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
1258                                          packet->data_size, PCI_DMA_TODEVICE);
1259                 }
1260
1261                 if (lynx->iso_send.queue != NULL) {
1262                         send_next(lynx, iso);
1263                 }
1264
1265                 spin_unlock(&lynx->iso_send.queue_lock);
1266
1267                 hpsb_packet_sent(host, packet, ACK_COMPLETE);
1268         }
1269
1270         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1271                 /* general receive DMA completed */
1272                 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1273
1274                 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1275                        stat & 0x1fff); 
1276
1277                 if (stat & DMA_CHAN_STAT_SELFID) {
1278                         lynx->selfid_size = stat & 0x1fff;
1279                         handle_selfid(lynx, host);
1280                 } else {
1281                         quadlet_t *q_data = lynx->rcv_page;
1282                         if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1283                             || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1284                                 cpu_to_be32s(q_data + 3);
1285                         }
1286                         hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1287                 }
1288
1289                 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1290         }
1291 }
1292
1293 static void iso_rcv_bh(struct ti_lynx *lynx)
1294 {
1295         unsigned int idx;
1296         quadlet_t *data;
1297         unsigned long flags;
1298
1299         spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1300
1301         while (lynx->iso_rcv.used) {
1302                 idx = lynx->iso_rcv.last;
1303                 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1304
1305                 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1306                         + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1307
1308                 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1309                         PRINT(KERN_ERR, lynx->id,
1310                               "iso length mismatch 0x%08x/0x%08x", *data,
1311                               lynx->iso_rcv.stat[idx]);
1312                 }
1313
1314                 if (lynx->iso_rcv.stat[idx] 
1315                     & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1316                         PRINT(KERN_INFO, lynx->id,
1317                               "iso receive error on %d to 0x%p", idx, data);
1318                 } else {
1319                         hpsb_packet_received(lynx->host, data,
1320                                              lynx->iso_rcv.stat[idx] & 0x1fff,
1321                                              0);
1322                 }
1323
1324                 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1325                 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1326                 lynx->iso_rcv.used--;
1327         }
1328
1329         if (lynx->iso_rcv.chan_count) {
1330                 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1331                           DMA_WORD1_CMP_ENABLE_MASTER);
1332         }
1333         spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1334 }
1335
1336
1337 static int __devinit add_card(struct pci_dev *dev,
1338                               const struct pci_device_id *devid)
1339 {
1340 #define FAIL(fmt, args...) do { \
1341         PRINT_G(KERN_ERR, fmt , ## args); \
1342         num_of_cards--; \
1343         remove_card(dev); \
1344         return -1; \
1345         } while (0)
1346
1347         struct ti_lynx *lynx; /* shortcut to currently handled device */
1348         unsigned int i;
1349
1350         if (num_of_cards == MAX_PCILYNX_CARDS) {
1351                 PRINT_G(KERN_WARNING, "cannot handle more than %d cards.  "
1352                         "Adjust MAX_PCILYNX_CARDS in pcilynx.h.",
1353                         MAX_PCILYNX_CARDS);
1354                 return -1;
1355         }
1356
1357         lynx = &cards[num_of_cards++];
1358
1359         if (pci_set_dma_mask(dev, 0xffffffff))
1360                 FAIL("DMA address limits not supported for PCILynx hardware %d",
1361                      lynx->id);
1362         if (pci_enable_device(dev))
1363                 FAIL("failed to enable PCILynx hardware %d", lynx->id);
1364         pci_set_master(dev);
1365
1366         lynx->host = hpsb_get_host(&lynx_template, 0);
1367         if (!lynx->host)
1368                 FAIL("failed to allocate host structure");
1369
1370         lynx->state = have_host_struct;
1371
1372         lynx->id = num_of_cards-1;
1373         lynx->dev = dev;
1374         lynx->host->pdev = dev;
1375
1376         lynx->lock = SPIN_LOCK_UNLOCKED;
1377         lynx->phy_reg_lock = SPIN_LOCK_UNLOCKED;
1378
1379 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1380         lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1381                                              &lynx->pcl_mem_dma);
1382
1383         if (lynx->pcl_mem != NULL) {
1384                 lynx->state = have_pcl_mem;
1385                 PRINT(KERN_INFO, lynx->id, 
1386                       "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1387                       lynx->pcl_mem);
1388         } else {
1389                 FAIL("failed to allocate PCL memory area");
1390         }
1391 #endif
1392
1393 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1394         lynx->mem_dma_buffer = pci_alloc_consistent(dev, 65536,
1395                                                     &lynx->mem_dma_buffer_dma);
1396         if (lynx->mem_dma_buffer == NULL) {
1397                 FAIL("failed to allocate DMA buffer for aux");
1398         }
1399         lynx->state = have_aux_buf;
1400 #endif
1401
1402         lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1403                                               &lynx->rcv_page_dma);
1404         if (lynx->rcv_page == NULL) {
1405                 FAIL("failed to allocate receive buffer");
1406         }
1407         lynx->state = have_1394_buffers;
1408
1409         for (i = 0; i < ISORCV_PAGES; i++) {
1410                 lynx->iso_rcv.page[i] =
1411                         pci_alloc_consistent(dev, PAGE_SIZE,
1412                                              &lynx->iso_rcv.page_dma[i]);
1413                 if (lynx->iso_rcv.page[i] == NULL) {
1414                         FAIL("failed to allocate iso receive buffers");
1415                 }
1416         }
1417
1418         lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1419                                           PCILYNX_MAX_REGISTER);
1420         lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1421         lynx->aux_port  = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1422         lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1423                                   PCILYNX_MAX_MEMORY);
1424         lynx->state = have_iomappings;
1425
1426         if (lynx->registers == NULL) {
1427                 FAIL("failed to remap registers - card not accessible");
1428         }
1429
1430 #ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1431         if (lynx->local_ram == NULL) {
1432                 FAIL("failed to remap local RAM which is required for "
1433                      "operation");
1434         }
1435 #endif
1436
1437         reg_write(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1438
1439         if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
1440                          PCILYNX_DRIVER_NAME, lynx)) {
1441                 PRINT(KERN_INFO, lynx->id, "allocated interrupt %d", dev->irq);
1442                 lynx->state = have_intr;
1443         } else {
1444                 FAIL("failed to allocate shared interrupt %d", dev->irq);
1445         }
1446
1447         /* alloc_pcl return values are not checked, it is expected that the
1448          * provided PCL space is sufficient for the initial allocations */
1449 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1450         if (lynx->aux_port != NULL) {
1451                 lynx->dmem_pcl = alloc_pcl(lynx);
1452                 aux_setup_pcls(lynx);
1453                 sema_init(&lynx->mem_dma_mutex, 1);
1454         }
1455 #endif
1456         lynx->rcv_pcl = alloc_pcl(lynx);
1457         lynx->rcv_pcl_start = alloc_pcl(lynx);
1458         lynx->async.pcl = alloc_pcl(lynx);
1459         lynx->async.pcl_start = alloc_pcl(lynx);
1460         lynx->iso_send.pcl = alloc_pcl(lynx);
1461         lynx->iso_send.pcl_start = alloc_pcl(lynx);
1462
1463         for (i = 0; i < NUM_ISORCV_PCL; i++) {
1464                 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1465         }
1466         lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1467
1468         /* all allocations successful - simple init stuff follows */
1469
1470         reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1471
1472 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1473         reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_AUX_INT);
1474         init_waitqueue_head(&lynx->mem_dma_intr_wait);
1475         init_waitqueue_head(&lynx->aux_intr_wait);
1476 #endif
1477
1478         tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1479                      (unsigned long)lynx);
1480
1481         lynx->iso_rcv.lock = SPIN_LOCK_UNLOCKED;
1482
1483         lynx->async.queue_lock = SPIN_LOCK_UNLOCKED;
1484         lynx->async.channel = CHANNEL_ASYNC_SEND;
1485         lynx->iso_send.queue_lock = SPIN_LOCK_UNLOCKED;
1486         lynx->iso_send.channel = CHANNEL_ISO_SEND;
1487         
1488         PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1489               "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1490               lynx->local_ram, lynx->aux_port);
1491
1492         /* now, looking for PHY register set */
1493         if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1494                 lynx->phyic.reg_1394a = 1;
1495                 PRINT(KERN_INFO, lynx->id,
1496                       "found 1394a conform PHY (using extended register set)");
1497                 lynx->phyic.vendor = get_phy_vendorid(lynx);
1498                 lynx->phyic.product = get_phy_productid(lynx);
1499         } else {
1500                 lynx->phyic.reg_1394a = 0;
1501                 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1502         }
1503
1504         return 0;
1505 #undef FAIL
1506 }
1507
1508 static void remove_card(struct pci_dev *dev)
1509 {
1510         struct ti_lynx *lynx;
1511         int i;
1512
1513         lynx = cards;
1514         while (lynx->dev != dev) lynx++;
1515
1516         switch (lynx->state) {
1517         case have_intr:
1518                 reg_write(lynx, PCI_INT_ENABLE, 0);
1519                 free_irq(lynx->dev->irq, lynx);
1520         case have_iomappings:
1521                 reg_write(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1522                 iounmap(lynx->registers);
1523                 iounmap(lynx->local_rom);
1524                 iounmap(lynx->local_ram);
1525                 iounmap(lynx->aux_port);
1526         case have_1394_buffers:
1527                 for (i = 0; i < ISORCV_PAGES; i++) {
1528                         if (lynx->iso_rcv.page[i]) {
1529                                 pci_free_consistent(lynx->dev, PAGE_SIZE,
1530                                                     lynx->iso_rcv.page[i],
1531                                                     lynx->iso_rcv.page_dma[i]);
1532                         }
1533                 }
1534                 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1535                                     lynx->rcv_page_dma);
1536         case have_aux_buf:
1537 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1538                 pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer,
1539                                     lynx->mem_dma_buffer_dma);
1540 #endif
1541         case have_pcl_mem:
1542 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1543                 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1544                                     lynx->pcl_mem_dma);
1545 #endif
1546         case have_host_struct:
1547                 /* FIXME - verify host freeing */
1548         case clear:;
1549                 /* do nothing - already freed */
1550         }
1551
1552         tasklet_kill(&lynx->iso_rcv.tq);
1553
1554         lynx->state = clear;
1555 }
1556
1557 #if 0
1558 static int init_driver()
1559 {
1560         struct pci_dev *dev = NULL;
1561         int success = 0;
1562
1563         if (num_of_cards) {
1564                 PRINT_G(KERN_DEBUG, __PRETTY_FUNCTION__ " called again");
1565                 return 0;
1566         }
1567
1568         PRINT_G(KERN_INFO, "looking for PCILynx cards");
1569
1570         while ((dev = pci_find_device(PCI_VENDOR_ID_TI,
1571                                       PCI_DEVICE_ID_TI_PCILYNX, dev)) 
1572                != NULL) {
1573                 if (add_card(dev) == 0) {
1574                         success = 1;
1575                 }
1576         }
1577
1578         if (success == 0) {
1579                 PRINT_G(KERN_WARNING, "no operable PCILynx cards found");
1580                 return -ENXIO;
1581         }
1582
1583 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1584         if (register_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME, &aux_ops)) {
1585                 PRINT_G(KERN_ERR, "allocation of char major number %d failed",
1586                         PCILYNX_MAJOR);
1587                 return -EBUSY;
1588         }
1589 #endif
1590
1591         return 0;
1592 }
1593 #endif
1594
1595
1596 static size_t get_lynx_rom(struct hpsb_host *host, const quadlet_t **ptr)
1597 {
1598         *ptr = lynx_csr_rom;
1599         return sizeof(lynx_csr_rom);
1600 }
1601
1602 static struct hpsb_host_template lynx_template = {
1603         name:             PCILYNX_DRIVER_NAME,
1604         initialize_host:  lynx_initialize,
1605         release_host:     lynx_release,
1606         get_rom:          get_lynx_rom,
1607         transmit_packet:  lynx_transmit,
1608         devctl:           lynx_devctl
1609 };
1610
1611 static struct pci_device_id pci_table[] __devinitdata = {
1612         {
1613                 vendor:     PCI_VENDOR_ID_TI,
1614                 device:     PCI_DEVICE_ID_TI_PCILYNX,
1615                 subvendor:  PCI_ANY_ID,
1616                 subdevice:  PCI_ANY_ID,
1617         },
1618         { }                     /* Terminating entry */
1619 };
1620
1621 static struct pci_driver lynx_pcidriver = {
1622         name:      PCILYNX_DRIVER_NAME,
1623         id_table:  pci_table,
1624         probe:     add_card,
1625         remove:    remove_card,
1626 };
1627
1628 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1629 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1630 MODULE_SUPPORTED_DEVICE("pcilynx");
1631 MODULE_DEVICE_TABLE(pci, pci_table);
1632
1633 static void __exit pcilynx_cleanup(void)
1634 {
1635         pci_unregister_driver(&lynx_pcidriver);
1636         hpsb_unregister_lowlevel(&lynx_template);
1637         PRINT_G(KERN_INFO, "removed " PCILYNX_DRIVER_NAME " module");
1638 }
1639
1640 static int __init pcilynx_init(void)
1641 {
1642         int ret;
1643
1644         if (hpsb_register_lowlevel(&lynx_template)) {
1645                 PRINT_G(KERN_ERR, "registering failed");
1646                 return -ENXIO;
1647         }
1648
1649         ret = pci_module_init(&lynx_pcidriver);
1650         if (ret < 0) {
1651                 PRINT_G(KERN_ERR, "PCI module init failed");
1652                 hpsb_unregister_lowlevel(&lynx_template);
1653         }
1654
1655         return ret;
1656 }
1657
1658 module_init(pcilynx_init);
1659 module_exit(pcilynx_cleanup);