v2.4.8 -> v2.4.8.1
[opensuse:kernel.git] / drivers / ieee1394 / ieee1394_core.c
1 /*
2  * IEEE 1394 for Linux
3  *
4  * Core support: hpsb_packet management, packet handling and forwarding to
5  *               highlevel or lowlevel code
6  *
7  * Copyright (C) 1999, 2000 Andreas E. Bombe
8  *
9  * This code is licensed under the GPL.  See the file COPYING in the root
10  * directory of the kernel sources for details.
11  */
12
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <asm/bitops.h>
21 #include <asm/byteorder.h>
22 #include <asm/semaphore.h>
23
24 #include "ieee1394_types.h"
25 #include "ieee1394.h"
26 #include "hosts.h"
27 #include "ieee1394_core.h"
28 #include "highlevel.h"
29 #include "ieee1394_transactions.h"
30 #include "csr.h"
31 #include "nodemgr.h"
32
33 static kmem_cache_t *hpsb_packet_cache;
34
35
36 static void dump_packet(const char *text, quadlet_t *data, int size)
37 {
38         int i;
39
40         size /= 4;
41         size = (size > 4 ? 4 : size);
42
43         printk(KERN_DEBUG "ieee1394: %s", text);
44         for (i = 0; i < size; i++) {
45                 printk(" %8.8x", data[i]);
46         }
47         printk("\n");
48 }
49
50
51 /**
52  * alloc_hpsb_packet - allocate new packet structure
53  * @data_size: size of the data block to be allocated
54  *
55  * This function allocates, initializes and returns a new &struct hpsb_packet.
56  * It can be used in interrupt context.  A header block is always included, its
57  * size is big enough to contain all possible 1394 headers.  The data block is
58  * only allocated when @data_size is not zero.
59  *
60  * For packets for which responses will be received the @data_size has to be big
61  * enough to contain the response's data block since no further allocation
62  * occurs at response matching time.
63  *
64  * The packet's generation value will be set to the current generation number
65  * for ease of use.  Remember to overwrite it with your own recorded generation
66  * number if you can not be sure that your code will not race with a bus reset.
67  *
68  * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
69  * failure.
70  */
71 struct hpsb_packet *alloc_hpsb_packet(size_t data_size)
72 {
73         struct hpsb_packet *packet = NULL;
74         void *data = NULL;
75         int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
76
77         packet = kmem_cache_alloc(hpsb_packet_cache, kmflags);
78         if (packet == NULL)
79                 return NULL;
80
81         memset(packet, 0, sizeof(struct hpsb_packet));
82         packet->header = packet->embedded_header;
83
84         if (data_size) {
85                 data = kmalloc(data_size + 8, kmflags);
86                 if (data == NULL) {
87                         kmem_cache_free(hpsb_packet_cache, packet);
88                         return NULL;
89                 }
90
91                 packet->data = data;
92                 packet->data_size = data_size;
93         }
94
95         INIT_TQ_HEAD(packet->complete_tq);
96         INIT_LIST_HEAD(&packet->list);
97         sema_init(&packet->state_change, 0);
98         packet->state = unused;
99         packet->generation = -1;
100         packet->data_be = 1;
101
102         return packet;
103 }
104
105
106 /**
107  * free_hpsb_packet - free packet and data associated with it
108  * @packet: packet to free (is NULL safe)
109  *
110  * This function will free packet->data, packet->header and finally the packet
111  * itself.
112  */
113 void free_hpsb_packet(struct hpsb_packet *packet)
114 {
115         if (!packet) return;
116
117         kfree(packet->data);
118         kmem_cache_free(hpsb_packet_cache, packet);
119 }
120
121
122 int hpsb_reset_bus(struct hpsb_host *host, int type)
123 {
124         if (!host->initialized) {
125                 return 1;
126         }
127
128         if (!host->in_bus_reset) {
129                 host->template->devctl(host, RESET_BUS, type);
130                 return 0;
131         } else {
132                 return 1;
133         }
134 }
135
136
137 int hpsb_bus_reset(struct hpsb_host *host)
138 {
139         if (host->in_bus_reset) {
140                 HPSB_NOTICE(__FUNCTION__ 
141                             " called while bus reset already in progress");
142                 return 1;
143         }
144
145         abort_requests(host);
146         host->in_bus_reset = 1;
147         host->irm_id = -1;
148         host->busmgr_id = -1;
149         host->node_count = 0;
150         host->selfid_count = 0;
151
152         return 0;
153 }
154
155
156 /*
157  * Verify num_of_selfids SelfIDs and return number of nodes.  Return zero in
158  * case verification failed.
159  */
160 static int check_selfids(struct hpsb_host *host, unsigned int num_of_selfids)
161 {
162         int nodeid = -1;
163         int rest_of_selfids = num_of_selfids;
164         struct selfid *sid = (struct selfid *)host->topology_map;
165         struct ext_selfid *esid;
166         int esid_seq = 23;
167
168         while (rest_of_selfids--) {
169                 if (!sid->extended) {
170                         nodeid++;
171                         esid_seq = 0;
172                         
173                         if (sid->phy_id != nodeid) {
174                                 HPSB_INFO("SelfIDs failed monotony check with "
175                                           "%d", sid->phy_id);
176                                 return 0;
177                         }
178                         
179                         if (sid->contender && sid->link_active) {
180                                 host->irm_id = LOCAL_BUS | sid->phy_id;
181                         }
182                 } else {
183                         esid = (struct ext_selfid *)sid;
184
185                         if ((esid->phy_id != nodeid) 
186                             || (esid->seq_nr != esid_seq)) {
187                                 HPSB_INFO("SelfIDs failed monotony check with "
188                                           "%d/%d", esid->phy_id, esid->seq_nr);
189                                 return 0;
190                         }
191                         esid_seq++;
192                 }
193                 sid++;
194         }
195         
196         esid = (struct ext_selfid *)(sid - 1);
197         while (esid->extended) {
198                 if ((esid->porta == 0x2) || (esid->portb == 0x2)
199                     || (esid->portc == 0x2) || (esid->portd == 0x2)
200                     || (esid->porte == 0x2) || (esid->portf == 0x2)
201                     || (esid->portg == 0x2) || (esid->porth == 0x2)) {
202                                 HPSB_INFO("SelfIDs failed root check on "
203                                           "extended SelfID");
204                                 return 0;
205                 }
206                 esid--;
207         }
208
209         sid = (struct selfid *)esid;
210         if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) {
211                         HPSB_INFO("SelfIDs failed root check");
212                         return 0;
213         }
214
215         return nodeid + 1;
216 }
217
218 static void build_speed_map(struct hpsb_host *host, int nodecount)
219 {
220         char speedcap[nodecount];
221         char cldcnt[nodecount];
222         u8 *map = host->speed_map;
223         struct selfid *sid;
224         struct ext_selfid *esid;
225         int i, j, n;
226
227         for (i = 0; i < (nodecount * 64); i += 64) {
228                 for (j = 0; j < nodecount; j++) {
229                         map[i+j] = SPEED_400;
230                 }
231         }
232
233         for (i = 0; i < nodecount; i++) {
234                 cldcnt[i] = 0;
235         }
236
237         /* find direct children count and speed */
238         for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
239                      n = nodecount - 1;
240              (void *)sid >= (void *)host->topology_map; sid--) {
241                 if (sid->extended) {
242                         esid = (struct ext_selfid *)sid;
243
244                         if (esid->porta == 0x3) cldcnt[n]++;
245                         if (esid->portb == 0x3) cldcnt[n]++;
246                         if (esid->portc == 0x3) cldcnt[n]++;
247                         if (esid->portd == 0x3) cldcnt[n]++;
248                         if (esid->porte == 0x3) cldcnt[n]++;
249                         if (esid->portf == 0x3) cldcnt[n]++;
250                         if (esid->portg == 0x3) cldcnt[n]++;
251                         if (esid->porth == 0x3) cldcnt[n]++;
252                 } else {
253                         if (sid->port0 == 0x3) cldcnt[n]++;
254                         if (sid->port1 == 0x3) cldcnt[n]++;
255                         if (sid->port2 == 0x3) cldcnt[n]++;
256
257                         speedcap[n] = sid->speed;
258                         n--;
259                 }
260         }
261
262         /* set self mapping */
263         for (i = 0; i < nodecount; i++) {
264                 map[64*i + i] = speedcap[i];
265         }
266
267         /* fix up direct children count to total children count;
268          * also fix up speedcaps for sibling and parent communication */
269         for (i = 1; i < nodecount; i++) {
270                 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
271                         cldcnt[i] += cldcnt[n];
272                         speedcap[n] = MIN(speedcap[n], speedcap[i]);
273                         n -= cldcnt[n] + 1;
274                 }
275         }
276
277         for (n = 0; n < nodecount; n++) {
278                 for (i = n - cldcnt[n]; i <= n; i++) {
279                         for (j = 0; j < (n - cldcnt[n]); j++) {
280                                 map[j*64 + i] = map[i*64 + j] =
281                                         MIN(map[i*64 + j], speedcap[n]);
282                         }
283                         for (j = n + 1; j < nodecount; j++) {
284                                 map[j*64 + i] = map[i*64 + j] =
285                                         MIN(map[i*64 + j], speedcap[n]);
286                         }
287                 }
288         }
289 }
290
291
292 void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
293 {
294         if (host->in_bus_reset) {
295 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
296                 HPSB_INFO("Including SelfID 0x%x", sid);
297 #endif
298                 host->topology_map[host->selfid_count++] = sid;
299         } else {
300                 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
301                             sid, (host->node_id & BUS_MASK) >> 6);
302         }
303 }
304
305 void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
306 {
307         host->node_id = 0xffc0 | phyid;
308         host->in_bus_reset = 0;
309         host->is_root = isroot;
310
311         host->node_count = check_selfids(host, host->selfid_count);
312         if (!host->node_count) {
313                 if (host->reset_retries++ < 20) {
314                         /* selfid stage did not complete without error */
315                         HPSB_NOTICE("Error in SelfID stage, resetting");
316                         hpsb_reset_bus(host, LONG_RESET);
317                         return;
318                 } else {
319                         HPSB_NOTICE("Stopping out-of-control reset loop");
320                         HPSB_NOTICE("Warning - topology map and speed map will not be valid");
321                 }
322         } else {
323                 build_speed_map(host, host->node_count);
324         }
325
326         /* irm_id is kept up to date by check_selfids() */
327         if (host->irm_id == host->node_id) {
328                 host->is_irm = 1;
329                 host->is_busmgr = 1;
330                 host->busmgr_id = host->node_id;
331                 host->csr.bus_manager_id = host->node_id;
332         }
333
334         host->reset_retries = 0;
335         atomic_inc(&host->generation);
336         if (isroot) host->template->devctl(host, ACT_CYCLE_MASTER, 1);
337         highlevel_host_reset(host);
338 }
339
340
341 void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 
342                       int ackcode)
343 {
344         unsigned long flags;
345
346         packet->ack_code = ackcode;
347
348         if (packet->no_waiter) {
349                 /* must not have a tlabel allocated */
350                 free_hpsb_packet(packet);
351                 return;
352         }
353
354         if (ackcode != ACK_PENDING || !packet->expect_response) {
355                 packet->state = complete;
356                 up(&packet->state_change);
357                 up(&packet->state_change);
358                 run_task_queue(&packet->complete_tq);
359                 return;
360         }
361
362         packet->state = pending;
363         packet->sendtime = jiffies;
364
365         spin_lock_irqsave(&host->pending_pkt_lock, flags);
366         list_add_tail(&packet->list, &host->pending_packets);
367         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
368
369         up(&packet->state_change);
370         queue_task(&host->timeout_tq, &tq_timer);
371 }
372
373 /**
374  * hpsb_send_packet - transmit a packet on the bus
375  * @packet: packet to send
376  *
377  * The packet is sent through the host specified in the packet->host field.
378  * Before sending, the packet's transmit speed is automatically determined using
379  * the local speed map when it is an async, non-broadcast packet.
380  *
381  * Possibilities for failure are that host is either not initialized, in bus
382  * reset, the packet's generation number doesn't match the current generation
383  * number or the host reports a transmit error.
384  *
385  * Return value: False (0) on failure, true (1) otherwise.
386  */
387 int hpsb_send_packet(struct hpsb_packet *packet)
388 {
389         struct hpsb_host *host = packet->host;
390
391         if (!host->initialized || host->in_bus_reset 
392             || (packet->generation != get_hpsb_generation(host))) {
393                 return 0;
394         }
395
396         packet->state = queued;
397
398         if (packet->type == async && packet->node_id != ALL_NODES) {
399                 packet->speed_code =
400                         host->speed_map[(host->node_id & NODE_MASK) * 64
401                                        + (packet->node_id & NODE_MASK)];
402         }
403
404 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
405         switch (packet->speed_code) {
406         case 2:
407                 dump_packet("send packet 400:", packet->header,
408                             packet->header_size);
409                 break;
410         case 1:
411                 dump_packet("send packet 200:", packet->header,
412                             packet->header_size);
413                 break;
414         default:
415                 dump_packet("send packet 100:", packet->header,
416                             packet->header_size);
417         }
418 #endif
419
420         return host->template->transmit_packet(host, packet);
421 }
422
423 static void send_packet_nocare(struct hpsb_packet *packet)
424 {
425         if (!hpsb_send_packet(packet)) {
426                 free_hpsb_packet(packet);
427         }
428 }
429
430
431 void handle_packet_response(struct hpsb_host *host, int tcode, quadlet_t *data,
432                             size_t size)
433 {
434         struct hpsb_packet *packet = NULL;
435         struct list_head *lh;
436         int tcode_match = 0;
437         int tlabel;
438         unsigned long flags;
439
440         tlabel = (data[0] >> 10) & 0x3f;
441
442         spin_lock_irqsave(&host->pending_pkt_lock, flags);
443
444         list_for_each(lh, &host->pending_packets) {
445                 packet = list_entry(lh, struct hpsb_packet, list);
446                 if ((packet->tlabel == tlabel)
447                     && (packet->node_id == (data[1] >> 16))){
448                         break;
449                 }
450         }
451
452         if (lh == &host->pending_packets) {
453                 HPSB_DEBUG("unsolicited response packet received - np");
454                 dump_packet("contents:", data, 16);
455                 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
456                 return;
457         }
458
459         switch (packet->tcode) {
460         case TCODE_WRITEQ:
461         case TCODE_WRITEB:
462                 if (tcode == TCODE_WRITE_RESPONSE) tcode_match = 1;
463                 break;
464         case TCODE_READQ:
465                 if (tcode == TCODE_READQ_RESPONSE) tcode_match = 1;
466                 break;
467         case TCODE_READB:
468                 if (tcode == TCODE_READB_RESPONSE) tcode_match = 1;
469                 break;
470         case TCODE_LOCK_REQUEST:
471                 if (tcode == TCODE_LOCK_RESPONSE) tcode_match = 1;
472                 break;
473         }
474
475         if (!tcode_match || (packet->tlabel != tlabel)
476             || (packet->node_id != (data[1] >> 16))) {
477                 HPSB_INFO("unsolicited response packet received");
478                 dump_packet("contents:", data, 16);
479
480                 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
481                 return;
482         }
483
484         list_del(&packet->list);
485
486         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
487
488         /* FIXME - update size fields? */
489         switch (tcode) {
490         case TCODE_WRITE_RESPONSE:
491                 memcpy(packet->header, data, 12);
492                 break;
493         case TCODE_READQ_RESPONSE:
494                 memcpy(packet->header, data, 16);
495                 break;
496         case TCODE_READB_RESPONSE:
497                 memcpy(packet->header, data, 16);
498                 memcpy(packet->data, data + 4, size - 16);
499                 break;
500         case TCODE_LOCK_RESPONSE:
501                 memcpy(packet->header, data, 16);
502                 memcpy(packet->data, data + 4, (size - 16) > 8 ? 8 : size - 16);
503                 break;
504         }
505
506         packet->state = complete;
507         up(&packet->state_change);
508         run_task_queue(&packet->complete_tq);
509 }
510
511
512 struct hpsb_packet *create_reply_packet(struct hpsb_host *host, quadlet_t *data,
513                                         size_t dsize)
514 {
515         struct hpsb_packet *p;
516
517         dsize += (dsize % 4 ? 4 - (dsize % 4) : 0);
518
519         p = alloc_hpsb_packet(dsize);
520         if (p == NULL) {
521                 /* FIXME - send data_error response */
522                 return NULL;
523         }
524
525         p->type = async;
526         p->state = unused;
527         p->host = host;
528         p->node_id = data[1] >> 16;
529         p->tlabel = (data[0] >> 10) & 0x3f;
530         p->no_waiter = 1;
531
532         if (dsize % 4) {
533                 p->data[dsize / 4] = 0;
534         }
535
536         return p;
537 }
538
539 #define PREP_REPLY_PACKET(length) \
540                 packet = create_reply_packet(host, data, length); \
541                 if (packet == NULL) break
542
543 void handle_incoming_packet(struct hpsb_host *host, int tcode, quadlet_t *data,
544                             size_t size, int write_acked)
545 {
546         struct hpsb_packet *packet;
547         int length, rcode, extcode;
548         nodeid_t source = data[1] >> 16;
549         nodeid_t dest = data[0] >> 16;
550         u64 addr;
551
552         /* big FIXME - no error checking is done for an out of bounds length */
553
554         switch (tcode) {
555         case TCODE_WRITEQ:
556                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
557                 rcode = highlevel_write(host, source, dest, data+3,
558                                         addr, 4);
559
560                 if (!write_acked
561                     && ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
562                         /* not a broadcast write, reply */
563                         PREP_REPLY_PACKET(0);
564                         fill_async_write_resp(packet, rcode);
565                         send_packet_nocare(packet);
566                 }
567                 break;
568
569         case TCODE_WRITEB:
570                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
571                 rcode = highlevel_write(host, source, dest, data+4,
572                                         addr, data[3]>>16);
573
574                 if (!write_acked
575                     && ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
576                         /* not a broadcast write, reply */
577                         PREP_REPLY_PACKET(0);
578                         fill_async_write_resp(packet, rcode);
579                         send_packet_nocare(packet);
580                 }
581                 break;
582
583         case TCODE_READQ:
584                 PREP_REPLY_PACKET(0);
585
586                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
587                 rcode = highlevel_read(host, source, data, addr, 4);
588                 fill_async_readquad_resp(packet, rcode, *data);
589                 send_packet_nocare(packet);
590                 break;
591
592         case TCODE_READB:
593                 length = data[3] >> 16;
594                 PREP_REPLY_PACKET(length);
595
596                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
597                 rcode = highlevel_read(host, source, packet->data, addr,
598                                        length);
599                 fill_async_readblock_resp(packet, rcode, length);
600                 send_packet_nocare(packet);
601                 break;
602
603         case TCODE_LOCK_REQUEST:
604                 length = data[3] >> 16;
605                 extcode = data[3] & 0xffff;
606                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
607
608                 PREP_REPLY_PACKET(8);
609
610                 if ((extcode == 0) || (extcode >= 7)) {
611                         /* let switch default handle error */
612                         length = 0;
613                 }
614
615                 switch (length) {
616                 case 4:
617                         rcode = highlevel_lock(host, source, packet->data, addr,
618                                                data[4], 0, extcode);
619                         fill_async_lock_resp(packet, rcode, extcode, 4);
620                         break;
621                 case 8:
622                         if ((extcode != EXTCODE_FETCH_ADD) 
623                             && (extcode != EXTCODE_LITTLE_ADD)) {
624                                 rcode = highlevel_lock(host, source,
625                                                        packet->data, addr,
626                                                        data[5], data[4], 
627                                                        extcode);
628                                 fill_async_lock_resp(packet, rcode, extcode, 4);
629                         } else {
630                                 rcode = highlevel_lock64(host, source,
631                                              (octlet_t *)packet->data, addr,
632                                              *(octlet_t *)(data + 4), 0ULL,
633                                              extcode);
634                                 fill_async_lock_resp(packet, rcode, extcode, 8);
635                         }
636                         break;
637                 case 16:
638                         rcode = highlevel_lock64(host, source,
639                                                  (octlet_t *)packet->data, addr,
640                                                  *(octlet_t *)(data + 6),
641                                                  *(octlet_t *)(data + 4), 
642                                                  extcode);
643                         fill_async_lock_resp(packet, rcode, extcode, 8);
644                         break;
645                 default:
646                         fill_async_lock_resp(packet, RCODE_TYPE_ERROR,
647                                              extcode, 0);
648                 }
649
650                 send_packet_nocare(packet);
651                 break;
652         }
653
654 }
655 #undef PREP_REPLY_PACKET
656
657
658 void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
659                           int write_acked)
660 {
661         int tcode;
662
663         if (host->in_bus_reset) {
664                 HPSB_INFO("received packet during reset; ignoring");
665                 return;
666         }
667
668 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
669         dump_packet("received packet:", data, size);
670 #endif
671
672         tcode = (data[0] >> 4) & 0xf;
673
674         switch (tcode) {
675         case TCODE_WRITE_RESPONSE:
676         case TCODE_READQ_RESPONSE:
677         case TCODE_READB_RESPONSE:
678         case TCODE_LOCK_RESPONSE:
679                 handle_packet_response(host, tcode, data, size);
680                 break;
681
682         case TCODE_WRITEQ:
683         case TCODE_WRITEB:
684         case TCODE_READQ:
685         case TCODE_READB:
686         case TCODE_LOCK_REQUEST:
687                 handle_incoming_packet(host, tcode, data, size, write_acked);
688                 break;
689
690
691         case TCODE_ISO_DATA:
692                 highlevel_iso_receive(host, data, size);
693                 break;
694
695         case TCODE_CYCLE_START:
696                 /* simply ignore this packet if it is passed on */
697                 break;
698
699         default:
700                 HPSB_NOTICE("received packet with bogus transaction code %d", 
701                             tcode);
702                 break;
703         }
704 }
705
706
707 void abort_requests(struct hpsb_host *host)
708 {
709         unsigned long flags;
710         struct hpsb_packet *packet;
711         struct list_head *lh;
712         LIST_HEAD(llist);
713
714         host->template->devctl(host, CANCEL_REQUESTS, 0);
715
716         spin_lock_irqsave(&host->pending_pkt_lock, flags);
717         list_splice(&host->pending_packets, &llist);
718         INIT_LIST_HEAD(&host->pending_packets);
719         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
720
721         list_for_each(lh, &llist) {
722                 packet = list_entry(lh, struct hpsb_packet, list);
723                 packet->state = complete;
724                 packet->ack_code = ACKX_ABORTED;
725                 up(&packet->state_change);
726                 run_task_queue(&packet->complete_tq);
727         }
728 }
729
730 void abort_timedouts(struct hpsb_host *host)
731 {
732         unsigned long flags;
733         struct hpsb_packet *packet;
734         unsigned long expire;
735         struct list_head *lh;
736         LIST_HEAD(expiredlist);
737
738         spin_lock_irqsave(&host->csr.lock, flags);
739         expire = (host->csr.split_timeout_hi * 8000 
740                   + (host->csr.split_timeout_lo >> 19))
741                 * HZ / 8000;
742         /* Avoid shortening of timeout due to rounding errors: */
743         expire++;
744         spin_unlock_irqrestore(&host->csr.lock, flags);
745
746
747         spin_lock_irqsave(&host->pending_pkt_lock, flags);
748
749         list_for_each(lh, &host->pending_packets) {
750                 packet = list_entry(lh, struct hpsb_packet, list);
751                 if (time_before(packet->sendtime + expire, jiffies)) {
752                         list_del(&packet->list);
753                         list_add(&packet->list, &expiredlist);
754                 }
755         }
756
757         if (!list_empty(&host->pending_packets)) {
758                 queue_task(&host->timeout_tq, &tq_timer);
759         }
760         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
761
762         list_for_each(lh, &expiredlist) {
763                 packet = list_entry(lh, struct hpsb_packet, list);
764                 packet->state = complete;
765                 packet->ack_code = ACKX_TIMEOUT;
766                 up(&packet->state_change);
767                 run_task_queue(&packet->complete_tq);
768         }
769 }
770
771
772 static int __init ieee1394_init(void)
773 {
774         hpsb_packet_cache = kmem_cache_create("hpsb_packet", sizeof(struct hpsb_packet),
775                                               0, 0, NULL, NULL);
776         init_hpsb_highlevel();
777         init_csr();
778         init_ieee1394_nodemgr();
779         return 0;
780 }
781
782 static void __exit ieee1394_cleanup(void)
783 {
784         cleanup_ieee1394_nodemgr();
785         cleanup_csr();
786         kmem_cache_destroy(hpsb_packet_cache);
787 }
788
789 module_init(ieee1394_init);
790 module_exit(ieee1394_cleanup);