v2.4.8 -> v2.4.8.1
[opensuse:kernel.git] / drivers / ieee1394 / raw1394.c
1 /*
2  * IEEE 1394 for Linux
3  *
4  * Raw interface to the bus
5  *
6  * Copyright (C) 1999, 2000 Andreas E. Bombe
7  *
8  * This code is licensed under the GPL.  See the file COPYING in the root
9  * directory of the kernel sources for details.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
16 #include <linux/fs.h>
17 #include <linux/poll.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/version.h>
21 #include <linux/smp_lock.h>
22 #include <asm/uaccess.h>
23 #include <asm/atomic.h>
24
25 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
26 #include <linux/devfs_fs_kernel.h>
27 #endif
28
29 #include "ieee1394.h"
30 #include "ieee1394_types.h"
31 #include "ieee1394_core.h"
32 #include "hosts.h"
33 #include "highlevel.h"
34 #include "ieee1394_transactions.h"
35 #include "raw1394.h"
36
37
38 #if BITS_PER_LONG == 64
39 #define int2ptr(x) ((void *)x)
40 #define ptr2int(x) ((u64)x)
41 #else
42 #define int2ptr(x) ((void *)(u32)x)
43 #define ptr2int(x) ((u64)(u32)x)
44 #endif
45
46
47 static devfs_handle_t devfs_handle;
48
49 static LIST_HEAD(host_info_list);
50 static int host_count;
51 static spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
52 static atomic_t internal_generation = ATOMIC_INIT(0);
53
54 static struct hpsb_highlevel *hl_handle;
55
56 static atomic_t iso_buffer_size;
57 static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
58
59 static void queue_complete_cb(struct pending_request *req);
60
61 static struct pending_request *__alloc_pending_request(int flags)
62 {
63         struct pending_request *req;
64
65         req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
66                                                 flags);
67         if (req != NULL) {
68                 memset(req, 0, sizeof(struct pending_request));
69                 INIT_LIST_HEAD(&req->list);
70                 INIT_TQUEUE(&req->tq, (void(*)(void*))queue_complete_cb, NULL);
71         }
72
73         return req;
74 }
75
76 static inline struct pending_request *alloc_pending_request(void)
77 {
78         return __alloc_pending_request(SLAB_KERNEL);
79 }
80
81 static void free_pending_request(struct pending_request *req)
82 {
83         if (req->ibs) {
84                 if (atomic_dec_and_test(&req->ibs->refcount)) {
85                         atomic_sub(req->ibs->data_size, &iso_buffer_size);
86                         kfree(req->ibs);
87                 }
88         } else if (req->free_data) {
89                 kfree(req->data);
90         }
91         free_hpsb_packet(req->packet);
92         kfree(req);
93 }
94
95 static void queue_complete_req(struct pending_request *req)
96 {
97         unsigned long flags;
98         struct file_info *fi = req->file_info;
99
100         spin_lock_irqsave(&fi->reqlists_lock, flags);
101         list_del(&req->list);
102         list_add_tail(&req->list, &fi->req_complete);
103         spin_unlock_irqrestore(&fi->reqlists_lock, flags);
104
105         up(&fi->complete_sem);
106         wake_up_interruptible(&fi->poll_wait_complete);
107 }
108
109 static void queue_complete_cb(struct pending_request *req)
110 {
111         struct hpsb_packet *packet = req->packet;
112         int rcode = (packet->header[1] >> 12) & 0xf;
113
114         switch (packet->ack_code) {
115         case ACKX_NONE:
116         case ACKX_SEND_ERROR:
117                 req->req.error = RAW1394_ERROR_SEND_ERROR;
118                 break;
119         case ACKX_ABORTED:
120                 req->req.error = RAW1394_ERROR_ABORTED;
121                 break;
122         case ACKX_TIMEOUT:
123                 req->req.error = RAW1394_ERROR_TIMEOUT;
124                 break;
125         default:
126                 req->req.error = (packet->ack_code << 16) | rcode;
127                 break;
128         }
129
130         if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
131                 req->req.length = 0;
132         }
133
134         free_tlabel(packet->host, packet->node_id, packet->tlabel);
135
136         queue_complete_req(req);
137 }
138
139
140 static void add_host(struct hpsb_host *host)
141 {
142         struct host_info *hi;
143
144         hi = (struct host_info *)kmalloc(sizeof(struct host_info), SLAB_KERNEL);
145         if (hi != NULL) {
146                 INIT_LIST_HEAD(&hi->list);
147                 hi->host = host;
148                 INIT_LIST_HEAD(&hi->file_info_list);
149
150                 spin_lock_irq(&host_info_lock);
151                 list_add_tail(&hi->list, &host_info_list);
152                 host_count++;
153                 spin_unlock_irq(&host_info_lock);
154         }
155
156         atomic_inc(&internal_generation);
157 }
158
159
160 static struct host_info *find_host_info(struct hpsb_host *host)
161 {
162         struct list_head *lh;
163         struct host_info *hi;
164
165         list_for_each(lh, &host_info_list) {
166                 hi = list_entry(lh, struct host_info, list);
167                 if (hi->host == host) {
168                         return hi;
169                 }
170         }
171
172         return NULL;
173 }
174
175 static void remove_host(struct hpsb_host *host)
176 {
177         struct host_info *hi;
178
179         spin_lock_irq(&host_info_lock);
180         hi = find_host_info(host);
181
182         if (hi != NULL) {
183                 list_del(&hi->list);
184                 host_count--;
185         }
186         spin_unlock_irq(&host_info_lock);
187
188         if (hi == NULL) {
189                 printk(KERN_ERR "raw1394: attempt to remove unknown host "
190                        "0x%p\n", host);
191                 return;
192         }
193
194         kfree(hi);
195 }
196
197 static void host_reset(struct hpsb_host *host)
198 {
199         unsigned long flags;
200         struct list_head *lh;
201         struct host_info *hi;
202         struct file_info *fi;
203         struct pending_request *req;
204
205         spin_lock_irqsave(&host_info_lock, flags);
206         hi = find_host_info(host);
207
208         if (hi != NULL) {
209                 list_for_each(lh, &hi->file_info_list) {
210                         fi = list_entry(lh, struct file_info, list);
211                         req = __alloc_pending_request(SLAB_ATOMIC);
212
213                         if (req != NULL) {
214                                 req->file_info = fi;
215                                 req->req.type = RAW1394_REQ_BUS_RESET;
216                                 req->req.generation = get_hpsb_generation(host);
217                                 req->req.misc = (host->node_id << 16)
218                                         | host->node_count;
219                                 if (fi->protocol_version > 3) {
220                                         req->req.misc |= ((host->irm_id
221                                                            & NODE_MASK) << 8);
222                                 }
223
224                                 queue_complete_req(req);
225                         }
226                 }
227         }
228         spin_unlock_irqrestore(&host_info_lock, flags);
229
230         atomic_inc(&internal_generation);
231 }
232
233 static void iso_receive(struct hpsb_host *host, int channel, quadlet_t *data,
234                         unsigned int length)
235 {
236         unsigned long flags;
237         struct list_head *lh;
238         struct host_info *hi;
239         struct file_info *fi;
240         struct pending_request *req;
241         struct iso_block_store *ibs = NULL;
242         LIST_HEAD(reqs);
243
244         if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
245                 HPSB_INFO("dropped iso packet");
246                 return;
247         }
248
249         spin_lock_irqsave(&host_info_lock, flags);
250         hi = find_host_info(host);
251
252         if (hi != NULL) {
253                 list_for_each(lh, &hi->file_info_list) {
254                         fi = list_entry(lh, struct file_info, list);
255
256                         if (!(fi->listen_channels & (1ULL << channel))) {
257                                 continue;
258                         }
259
260                         req = __alloc_pending_request(SLAB_ATOMIC);
261                         if (!req) break;
262
263                         if (!ibs) {
264                                 ibs = kmalloc(sizeof(struct iso_block_store)
265                                               + length, SLAB_ATOMIC);
266                                 if (!ibs) {
267                                         kfree(req);
268                                         break;
269                                 }
270
271                                 atomic_add(length, &iso_buffer_size);
272                                 atomic_set(&ibs->refcount, 0);
273                                 ibs->data_size = length;
274                                 memcpy(ibs->data, data, length);
275                         }
276
277                         atomic_inc(&ibs->refcount);
278
279                         req->file_info = fi;
280                         req->ibs = ibs;
281                         req->data = ibs->data;
282                         req->req.type = RAW1394_REQ_ISO_RECEIVE;
283                         req->req.generation = get_hpsb_generation(host);
284                         req->req.misc = 0;
285                         req->req.recvb = ptr2int(fi->iso_buffer);
286                         req->req.length = MIN(length, fi->iso_buffer_length);
287                         
288                         list_add_tail(&req->list, &reqs);
289                 }
290         }
291         spin_unlock_irqrestore(&host_info_lock, flags);
292
293         list_for_each(lh, &reqs) {
294                 req = list_entry(lh, struct pending_request, list);
295                 queue_complete_req(req);
296         }
297 }
298
299 static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
300                         int cts, u8 *data, unsigned int length)
301 {
302         unsigned long flags;
303         struct list_head *lh;
304         struct host_info *hi;
305         struct file_info *fi;
306         struct pending_request *req;
307         struct iso_block_store *ibs = NULL;
308         LIST_HEAD(reqs);
309
310         if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
311                 HPSB_INFO("dropped fcp request");
312                 return;
313         }
314
315         spin_lock_irqsave(&host_info_lock, flags);
316         hi = find_host_info(host);
317
318         if (hi != NULL) {
319                 list_for_each(lh, &hi->file_info_list) {
320                         fi = list_entry(lh, struct file_info, list);
321
322                         if (!fi->fcp_buffer) {
323                                 continue;
324                         }
325
326                         req = __alloc_pending_request(SLAB_ATOMIC);
327                         if (!req) break;
328
329                         if (!ibs) {
330                                 ibs = kmalloc(sizeof(struct iso_block_store)
331                                               + length, SLAB_ATOMIC);
332                                 if (!ibs) {
333                                         kfree(req);
334                                         break;
335                                 }
336
337                                 atomic_add(length, &iso_buffer_size);
338                                 atomic_set(&ibs->refcount, 0);
339                                 ibs->data_size = length;
340                                 memcpy(ibs->data, data, length);
341                         }
342
343                         atomic_inc(&ibs->refcount);
344
345                         req->file_info = fi;
346                         req->ibs = ibs;
347                         req->data = ibs->data;
348                         req->req.type = RAW1394_REQ_FCP_REQUEST;
349                         req->req.generation = get_hpsb_generation(host);
350                         req->req.misc = nodeid | (direction << 16);
351                         req->req.recvb = ptr2int(fi->fcp_buffer);
352                         req->req.length = length;
353                         
354                         list_add_tail(&req->list, &reqs);
355                 }
356         }
357         spin_unlock_irqrestore(&host_info_lock, flags);
358
359         list_for_each(lh, &reqs) {
360                 req = list_entry(lh, struct pending_request, list);
361                 queue_complete_req(req);
362         }
363 }
364
365
366 static ssize_t raw1394_read(struct file *file, char *buffer, size_t count,
367                     loff_t *offset_is_ignored)
368 {
369         struct file_info *fi = (struct file_info *)file->private_data;
370         struct list_head *lh;
371         struct pending_request *req;
372
373         if (count != sizeof(struct raw1394_request)) {
374                 return -EINVAL;
375         }
376
377         if (!access_ok(VERIFY_WRITE, buffer, count)) {
378                 return -EFAULT;
379         }
380
381         if (file->f_flags & O_NONBLOCK) {
382                 if (down_trylock(&fi->complete_sem)) {
383                         return -EAGAIN;
384                 }
385         } else {
386                 if (down_interruptible(&fi->complete_sem)) {
387                         return -ERESTARTSYS;
388                 }
389         }
390
391         spin_lock_irq(&fi->reqlists_lock);
392         lh = fi->req_complete.next;
393         list_del(lh);
394         spin_unlock_irq(&fi->reqlists_lock);
395
396         req = list_entry(lh, struct pending_request, list);
397
398         if (req->req.length) {
399                 if (copy_to_user(int2ptr(req->req.recvb), req->data,
400                                  req->req.length)) {
401                         req->req.error = RAW1394_ERROR_MEMFAULT;
402                 }
403         }
404         __copy_to_user(buffer, &req->req, sizeof(req->req));
405
406         free_pending_request(req);
407         return sizeof(struct raw1394_request);
408 }
409
410
411 static int state_opened(struct file_info *fi, struct pending_request *req)
412 {
413         if (req->req.type == RAW1394_REQ_INITIALIZE) {
414                 switch (req->req.misc) {
415                 case RAW1394_KERNELAPI_VERSION:
416                 case 3:
417                         fi->state = initialized;
418                         fi->protocol_version = req->req.misc;
419                         req->req.error = RAW1394_ERROR_NONE;
420                         req->req.generation = atomic_read(&internal_generation);
421                         break;
422
423                 default:
424                         req->req.error = RAW1394_ERROR_COMPAT;
425                         req->req.misc = RAW1394_KERNELAPI_VERSION;
426                 }
427         } else {
428                 req->req.error = RAW1394_ERROR_STATE_ORDER;
429         }
430
431         req->req.length = 0;
432         queue_complete_req(req);
433         return sizeof(struct raw1394_request);
434 }
435
436 static int state_initialized(struct file_info *fi, struct pending_request *req)
437 {
438         struct list_head *lh;
439         struct host_info *hi;
440         struct raw1394_khost_list *khl;
441
442         if (req->req.generation != atomic_read(&internal_generation)) {
443                 req->req.error = RAW1394_ERROR_GENERATION;
444                 req->req.generation = atomic_read(&internal_generation);
445                 req->req.length = 0;
446                 queue_complete_req(req);
447                 return sizeof(struct raw1394_request);
448         }
449
450         switch (req->req.type) {
451         case RAW1394_REQ_LIST_CARDS:
452                 spin_lock_irq(&host_info_lock);
453                 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
454                               SLAB_ATOMIC);
455
456                 if (khl != NULL) {
457                         req->req.misc = host_count;
458                         req->data = (quadlet_t *)khl;
459                         
460                         list_for_each(lh, &host_info_list) {
461                                 hi = list_entry(lh, struct host_info, list);
462
463                                 khl->nodes = hi->host->node_count;
464                                 strcpy(khl->name, hi->host->template->name);
465
466                                 khl++;
467                         }
468                 }
469                 spin_unlock_irq(&host_info_lock);
470
471                 if (khl != NULL) {
472                         req->req.error = RAW1394_ERROR_NONE;
473                         req->req.length = MIN(req->req.length,
474                                               sizeof(struct raw1394_khost_list)
475                                               * req->req.misc);
476                         req->free_data = 1;
477                 } else {
478                         return -ENOMEM;
479                 }
480                 break;
481
482         case RAW1394_REQ_SET_CARD:
483                 lh = NULL;
484
485                 spin_lock_irq(&host_info_lock);
486                 if (req->req.misc < host_count) {
487                         lh = host_info_list.next;
488                         while (req->req.misc--) {
489                                 lh = lh->next;
490                         }
491                         hi = list_entry(lh, struct host_info, list);
492                         hpsb_inc_host_usage(hi->host);
493                         list_add_tail(&fi->list, &hi->file_info_list);
494                         fi->host = hi->host;
495                         fi->state = connected;
496                 }
497                 spin_unlock_irq(&host_info_lock);
498
499                 if (lh != NULL) {
500                         req->req.error = RAW1394_ERROR_NONE;
501                         req->req.generation = get_hpsb_generation(fi->host);
502                         req->req.misc = (fi->host->node_id << 16) 
503                                 | fi->host->node_count;
504                         if (fi->protocol_version > 3) {
505                                 req->req.misc |=
506                                         (fi->host->irm_id & NODE_MASK) << 8;
507                         }
508                 } else {
509                         req->req.error = RAW1394_ERROR_INVALID_ARG;
510                 }
511
512                 req->req.length = 0;
513                 break;
514
515         default:
516                 req->req.error = RAW1394_ERROR_STATE_ORDER;
517                 req->req.length = 0;
518                 break;
519         }
520
521         queue_complete_req(req);
522         return sizeof(struct raw1394_request);
523 }
524
525 static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
526 {
527         int channel = req->req.misc;
528
529         spin_lock(&host_info_lock);
530         if ((channel > 63) || (channel < -64)) {
531                 req->req.error = RAW1394_ERROR_INVALID_ARG;
532         } else if (channel >= 0) {
533                 /* allocate channel req.misc */
534                 if (fi->listen_channels & (1ULL << channel)) {
535                         req->req.error = RAW1394_ERROR_ALREADY;
536                 } else {
537                         fi->listen_channels |= 1ULL << channel;
538                         hpsb_listen_channel(hl_handle, fi->host, channel);
539                         fi->iso_buffer = int2ptr(req->req.recvb);
540                         fi->iso_buffer_length = req->req.length;
541                 }
542         } else {
543                 /* deallocate channel (one's complement neg) req.misc */
544                 channel = ~channel;
545
546                 if (fi->listen_channels & (1ULL << channel)) {
547                         hpsb_unlisten_channel(hl_handle, fi->host, channel);
548                         fi->listen_channels &= ~(1ULL << channel);
549                 } else {
550                         req->req.error = RAW1394_ERROR_INVALID_ARG;
551                 }
552         }
553
554         req->req.length = 0;
555         queue_complete_req(req);
556         spin_unlock(&host_info_lock);
557 }
558
559 static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
560 {
561         if (req->req.misc) {
562                 if (fi->fcp_buffer) {
563                         req->req.error = RAW1394_ERROR_ALREADY;
564                 } else {
565                         fi->fcp_buffer = (u8 *)int2ptr(req->req.recvb);
566                 }
567         } else {
568                 if (!fi->fcp_buffer) {
569                         req->req.error = RAW1394_ERROR_ALREADY;
570                 } else {
571                         fi->fcp_buffer = NULL;
572                 }
573         }
574
575         req->req.length = 0;
576         queue_complete_req(req);
577 }
578
579 static int handle_local_request(struct file_info *fi,
580                                 struct pending_request *req, int node)
581 {
582         u64 addr = req->req.address & 0xffffffffffffULL;
583
584         req->data = kmalloc(req->req.length, SLAB_KERNEL);
585         if (!req->data) return -ENOMEM;
586         req->free_data = 1;
587
588         switch (req->req.type) {
589         case RAW1394_REQ_ASYNC_READ:
590                 req->req.error = highlevel_read(fi->host, node, req->data, addr,
591                                                 req->req.length);
592                 break;
593
594         case RAW1394_REQ_ASYNC_WRITE:
595                 if (copy_from_user(req->data, int2ptr(req->req.sendb),
596                                    req->req.length)) {
597                         req->req.error = RAW1394_ERROR_MEMFAULT;
598                         break;
599                 }
600
601                 req->req.error = highlevel_write(fi->host, node, node, req->data,
602                                                  addr, req->req.length);
603                 req->req.length = 0;
604                 break;
605
606         case RAW1394_REQ_LOCK:
607                 if ((req->req.misc == EXTCODE_FETCH_ADD)
608                     || (req->req.misc == EXTCODE_LITTLE_ADD)) {
609                         if (req->req.length != 4) {
610                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
611                                 break;
612                         }
613                 } else {
614                         if (req->req.length != 8) {
615                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
616                                 break;
617                         }
618                 }
619
620                 if (copy_from_user(req->data, int2ptr(req->req.sendb),
621                                    req->req.length)) {
622                         req->req.error = RAW1394_ERROR_MEMFAULT;
623                         break;
624                 }
625
626                 if (req->req.length == 8) {
627                         req->req.error = highlevel_lock(fi->host, node,
628                                                         req->data, addr,
629                                                         req->data[1],
630                                                         req->data[0],
631                                                         req->req.misc);
632                         req->req.length = 4;
633                 } else {
634                         req->req.error = highlevel_lock(fi->host, node,
635                                                         req->data, addr,
636                                                         req->data[0], 0,
637                                                         req->req.misc);
638                 }
639                 break;
640
641         case RAW1394_REQ_LOCK64:
642         default:
643                 req->req.error = RAW1394_ERROR_STATE_ORDER;
644         }
645
646         if (req->req.error)
647                 req->req.length = 0;
648         if (req->req.error >= 0)
649                 req->req.error |= ACK_PENDING << 16;
650
651         queue_complete_req(req);
652         return sizeof(struct raw1394_request);
653 }
654
655 static int handle_remote_request(struct file_info *fi,
656                                  struct pending_request *req, int node)
657 {
658         struct hpsb_packet *packet = NULL;
659         u64 addr = req->req.address & 0xffffffffffffULL;
660
661         switch (req->req.type) {
662         case RAW1394_REQ_ASYNC_READ:
663                 if (req->req.length == 4) {
664                         packet = hpsb_make_readqpacket(fi->host, node, addr);
665                         if (!packet) return -ENOMEM;
666
667                         req->data = &packet->header[3];
668                 } else {
669                         packet = hpsb_make_readbpacket(fi->host, node, addr,
670                                                        req->req.length);
671                         if (!packet) return -ENOMEM;
672
673                         req->data = packet->data;
674                 }
675                 break;
676
677         case RAW1394_REQ_ASYNC_WRITE:
678                 if (req->req.length == 4) {
679                         quadlet_t x;
680
681                         if (copy_from_user(&x, int2ptr(req->req.sendb), 4)) {
682                                 req->req.error = RAW1394_ERROR_MEMFAULT;
683                         }
684
685                         packet = hpsb_make_writeqpacket(fi->host, node, addr,
686                                                         x);
687                         if (!packet) return -ENOMEM;
688                 } else {
689                         packet = hpsb_make_writebpacket(fi->host, node, addr,
690                                                         req->req.length);
691                         if (!packet) return -ENOMEM;
692
693                         if (copy_from_user(packet->data, int2ptr(req->req.sendb),
694                                            req->req.length)) {
695                                 req->req.error = RAW1394_ERROR_MEMFAULT;
696                         }
697                 }
698                 req->req.length = 0;
699                 break;
700
701         case RAW1394_REQ_LOCK:
702                 if ((req->req.misc == EXTCODE_FETCH_ADD)
703                     || (req->req.misc == EXTCODE_LITTLE_ADD)) {
704                         if (req->req.length != 4) {
705                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
706                                 break;
707                         }
708                 } else {
709                         if (req->req.length != 8) {
710                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
711                                 break;
712                         }
713                 }
714
715                 packet = hpsb_make_lockpacket(fi->host, node, addr,
716                                               req->req.misc);
717                 if (!packet) return -ENOMEM;
718
719                 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
720                                    req->req.length)) {
721                         req->req.error = RAW1394_ERROR_MEMFAULT;
722                         break;
723                 }
724
725                 req->data = packet->data;
726                 req->req.length = 4;
727                 break;
728
729         case RAW1394_REQ_LOCK64:
730         default:
731                 req->req.error = RAW1394_ERROR_STATE_ORDER;
732         }
733
734         req->packet = packet;
735
736         if (req->req.error) {
737                 req->req.length = 0;
738                 queue_complete_req(req);
739                 return sizeof(struct raw1394_request);
740         }
741
742         req->tq.data = req;
743         queue_task(&req->tq, &packet->complete_tq);
744
745         spin_lock_irq(&fi->reqlists_lock);
746         list_add_tail(&req->list, &fi->req_pending);
747         spin_unlock_irq(&fi->reqlists_lock);
748
749         if (!hpsb_send_packet(packet)) {
750                 req->req.error = RAW1394_ERROR_SEND_ERROR;
751                 req->req.length = 0;
752                 free_tlabel(packet->host, packet->node_id, packet->tlabel);
753                 queue_complete_req(req);
754         }
755         return sizeof(struct raw1394_request);
756 }
757
758 static int handle_iso_send(struct file_info *fi, struct pending_request *req,
759                            int channel)
760 {
761         struct hpsb_packet *packet;
762
763         packet = alloc_hpsb_packet(req->req.length);
764         if (!packet) return -ENOMEM;
765         req->packet = packet;
766
767         fill_iso_packet(packet, req->req.length, channel & 0x3f,
768                         (req->req.misc >> 16) & 0x3, req->req.misc & 0xf);
769         packet->type = iso;
770         packet->speed_code = req->req.address & 0x3;
771         packet->host = fi->host;
772
773         if (copy_from_user(packet->data, int2ptr(req->req.sendb),
774                            req->req.length)) {
775                 req->req.error = RAW1394_ERROR_MEMFAULT;
776                 req->req.length = 0;
777                 queue_complete_req(req);
778                 return sizeof(struct raw1394_request);
779         }
780
781         req->tq.data = req;
782         req->tq.routine = (void (*)(void*))queue_complete_req;
783         req->req.length = 0;
784         queue_task(&req->tq, &packet->complete_tq);
785
786         spin_lock_irq(&fi->reqlists_lock);
787         list_add_tail(&req->list, &fi->req_pending);
788         spin_unlock_irq(&fi->reqlists_lock);
789
790         if (!hpsb_send_packet(packet)) {
791                 req->req.error = RAW1394_ERROR_SEND_ERROR;
792                 queue_complete_req(req);
793         }
794
795         return sizeof(struct raw1394_request);
796 }
797
798 static int state_connected(struct file_info *fi, struct pending_request *req)
799 {
800         int node = req->req.address >> 48;
801
802         req->req.error = RAW1394_ERROR_NONE;
803
804         if (req->req.type ==  RAW1394_REQ_ISO_SEND) {
805                 return handle_iso_send(fi, req, node);
806         }
807
808         if (req->req.generation != get_hpsb_generation(fi->host)) {
809                 req->req.error = RAW1394_ERROR_GENERATION;
810                 req->req.generation = get_hpsb_generation(fi->host);
811                 req->req.length = 0;
812                 queue_complete_req(req);
813                 return sizeof(struct raw1394_request);
814         }
815
816         switch (req->req.type) {
817         case RAW1394_REQ_ISO_LISTEN:
818                 handle_iso_listen(fi, req);
819                 return sizeof(struct raw1394_request);
820
821         case RAW1394_REQ_FCP_LISTEN:
822                 handle_fcp_listen(fi, req);
823                 return sizeof(struct raw1394_request);
824
825         case RAW1394_REQ_RESET_BUS:
826                 hpsb_reset_bus(fi->host, LONG_RESET);
827                 return sizeof(struct raw1394_request);
828         }
829
830         if (req->req.length == 0) {
831                 req->req.error = RAW1394_ERROR_INVALID_ARG;
832                 queue_complete_req(req);
833                 return sizeof(struct raw1394_request);
834         }
835
836         if (fi->host->node_id == node) {
837                 return handle_local_request(fi, req, node);
838         }
839
840         return handle_remote_request(fi, req, node);
841 }
842
843
844 static ssize_t raw1394_write(struct file *file, const char *buffer, size_t count,
845                      loff_t *offset_is_ignored)
846 {
847         struct file_info *fi = (struct file_info *)file->private_data;
848         struct pending_request *req;
849         ssize_t retval = 0;
850
851         if (count != sizeof(struct raw1394_request)) {
852                 return -EINVAL;
853         }
854
855         req = alloc_pending_request();
856         if (req == NULL) {
857                 return -ENOMEM;
858         }
859         req->file_info = fi;
860
861         if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
862                 free_pending_request(req);
863                 return -EFAULT;
864         }
865
866         switch (fi->state) {
867         case opened:
868                 retval = state_opened(fi, req);
869                 break;
870
871         case initialized:
872                 retval = state_initialized(fi, req);
873                 break;
874
875         case connected:
876                 retval = state_connected(fi, req);
877                 break;
878         }
879
880         if (retval < 0) {
881                 free_pending_request(req);
882         }
883
884         return retval;
885 }
886
887 static unsigned int raw1394_poll(struct file *file, poll_table *pt)
888 {
889         struct file_info *fi = file->private_data;
890         unsigned int mask = POLLOUT | POLLWRNORM;
891
892         poll_wait(file, &fi->poll_wait_complete, pt);
893
894         spin_lock_irq(&fi->reqlists_lock);
895         if (!list_empty(&fi->req_complete)) {
896                 mask |= POLLIN | POLLRDNORM;
897         }
898         spin_unlock_irq(&fi->reqlists_lock);
899
900         return mask;
901 }
902
903 static int raw1394_open(struct inode *inode, struct file *file)
904 {
905         struct file_info *fi;
906
907         if (MINOR(inode->i_rdev)) {
908                 return -ENXIO;
909         }
910
911         V22_COMPAT_MOD_INC_USE_COUNT;
912
913         fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
914         if (fi == NULL) {
915                 V22_COMPAT_MOD_DEC_USE_COUNT;
916                 return -ENOMEM;
917         }
918         
919         memset(fi, 0, sizeof(struct file_info));
920
921         INIT_LIST_HEAD(&fi->list);
922         fi->state = opened;
923         INIT_LIST_HEAD(&fi->req_pending);
924         INIT_LIST_HEAD(&fi->req_complete);
925         sema_init(&fi->complete_sem, 0);
926         spin_lock_init(&fi->reqlists_lock);
927         init_waitqueue_head(&fi->poll_wait_complete);
928
929         file->private_data = fi;
930
931         return 0;
932 }
933
934 static int raw1394_release(struct inode *inode, struct file *file)
935 {
936         struct file_info *fi = file->private_data;
937         struct list_head *lh;
938         struct pending_request *req;
939         int done = 0, i;
940
941         lock_kernel();
942         for (i = 0; i < 64; i++) {
943                 if (fi->listen_channels & (1ULL << i)) {
944                         hpsb_unlisten_channel(hl_handle, fi->host, i);
945                 }
946         }
947
948         spin_lock(&host_info_lock);
949         fi->listen_channels = 0;
950         spin_unlock(&host_info_lock);
951
952         while (!done) {
953                 spin_lock_irq(&fi->reqlists_lock);
954
955                 while (!list_empty(&fi->req_complete)) {
956                         lh = fi->req_complete.next;
957                         list_del(lh);
958
959                         req = list_entry(lh, struct pending_request, list);
960
961                         free_pending_request(req);
962                 }
963
964                 if (list_empty(&fi->req_pending)) done = 1;
965
966                 spin_unlock_irq(&fi->reqlists_lock);
967
968                 if (!done) down_interruptible(&fi->complete_sem);
969         }
970
971         if (fi->state == connected) {
972                 spin_lock_irq(&host_info_lock);
973                 list_del(&fi->list);
974                 spin_unlock_irq(&host_info_lock);
975
976                 hpsb_dec_host_usage(fi->host);
977         }
978
979         kfree(fi);
980
981         V22_COMPAT_MOD_DEC_USE_COUNT;
982         unlock_kernel();
983         return 0;
984 }
985
986 static struct hpsb_highlevel_ops hl_ops = {
987         add_host:     add_host,
988         remove_host:  remove_host,
989         host_reset:   host_reset,
990         iso_receive:  iso_receive,
991         fcp_request:  fcp_request,
992 };
993
994 static struct file_operations file_ops = {
995         OWNER_THIS_MODULE
996         read:     raw1394_read, 
997         write:    raw1394_write, 
998         poll:     raw1394_poll, 
999         open:     raw1394_open, 
1000         release:  raw1394_release, 
1001 };
1002
1003 static int __init init_raw1394(void)
1004 {
1005         hl_handle = hpsb_register_highlevel(RAW1394_DEVICE_NAME, &hl_ops);
1006         if (hl_handle == NULL) {
1007                 HPSB_ERR("raw1394 failed to register with ieee1394 highlevel");
1008                 return -ENOMEM;
1009         }
1010
1011         devfs_handle = devfs_register(NULL, RAW1394_DEVICE_NAME, DEVFS_FL_NONE,
1012                                       RAW1394_DEVICE_MAJOR, 0,
1013                                       S_IFCHR | S_IRUSR | S_IWUSR, &file_ops,
1014                                       NULL);
1015
1016         if (devfs_register_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME, 
1017                                   &file_ops)) {
1018                 HPSB_ERR("raw1394 failed to register /dev/raw1394 device");
1019                 return -EBUSY;
1020         }
1021         printk(KERN_INFO "raw1394: /dev/%s device initialized\n", RAW1394_DEVICE_NAME);
1022         return 0;
1023 }
1024
1025 static void __exit cleanup_raw1394(void)
1026 {
1027         devfs_unregister_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME);
1028         devfs_unregister(devfs_handle);
1029         hpsb_unregister_highlevel(hl_handle);
1030 }
1031
1032 module_init(init_raw1394);
1033 module_exit(cleanup_raw1394);