v2.4.8 -> v2.4.8.1
[opensuse:kernel.git] / drivers / i2o / i2o_lan.c
1 /*
2  *      drivers/i2o/i2o_lan.c
3  *
4  *      I2O LAN CLASS OSM               May 26th 2000
5  *
6  *      (C) Copyright 1999, 2000        University of Helsinki,
7  *                                      Department of Computer Science
8  *
9  *      This code is still under development / test.
10  *
11  *      This program is free software; you can redistribute it and/or
12  *      modify it under the terms of the GNU General Public License
13  *      as published by the Free Software Foundation; either version
14  *      2 of the License, or (at your option) any later version.
15  *
16  *      Authors:        Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
17  *      Fixes:          Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
18  *                      Taneli Vähäkangas <Taneli.Vahakangas@cs.Helsinki.FI>
19  *                      Deepak Saxena <deepak@plexity.net>
20  *
21  *      Tested:         in FDDI environment (using SysKonnect's DDM)
22  *                      in Gigabit Eth environment (using SysKonnect's DDM)
23  *                      in Fast Ethernet environment (using Intel 82558 DDM)
24  *
25  *      TODO:           tests for other LAN classes (Token Ring, Fibre Channel)
26  */
27
28 #include <linux/config.h>
29 #include <linux/module.h>
30
31 #include <linux/pci.h>
32
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/fddidevice.h>
36 #include <linux/trdevice.h>
37 #include <linux/fcdevice.h>
38
39 #include <linux/skbuff.h>
40 #include <linux/if_arp.h>
41 #include <linux/slab.h>
42 #include <linux/init.h>
43 #include <linux/spinlock.h>
44 #include <linux/tqueue.h>
45 #include <asm/io.h>
46
47 #include <linux/errno.h>
48
49 #include <linux/i2o.h>
50 #include "i2o_lan.h"
51
52 //#define DRIVERDEBUG
53 #ifdef DRIVERDEBUG
54 #define dprintk(s, args...) printk(s, ## args)
55 #else
56 #define dprintk(s, args...)
57 #endif
58
59 /* The following module parameters are used as default values
60  * for per interface values located in the net_device private area.
61  * Private values are changed via /proc filesystem.
62  */
63 static u32 max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
64 static u32 bucket_thresh   = I2O_LAN_BUCKET_THRESH;
65 static u32 rx_copybreak    = I2O_LAN_RX_COPYBREAK;
66 static u8  tx_batch_mode   = I2O_LAN_TX_BATCH_MODE;
67 static u32 i2o_event_mask  = I2O_LAN_EVENT_MASK;
68
69 #define MAX_LAN_CARDS 16
70 static struct net_device *i2o_landevs[MAX_LAN_CARDS+1];
71 static int unit = -1;     /* device unit number */
72
73 static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
74 static void i2o_lan_send_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
75 static int i2o_lan_receive_post(struct net_device *dev);
76 static void i2o_lan_receive_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
77 static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg);
78
79 static int i2o_lan_reset(struct net_device *dev);
80 static void i2o_lan_handle_event(struct net_device *dev, u32 *msg);
81
82 /* Structures to register handlers for the incoming replies. */
83
84 static struct i2o_handler i2o_lan_send_handler = {
85         i2o_lan_send_post_reply,        // For send replies
86         NULL,
87         NULL,
88         NULL,
89         "I2O LAN OSM send",
90         -1,
91         I2O_CLASS_LAN
92 };
93 static int lan_send_context;
94
95 static struct i2o_handler i2o_lan_receive_handler = {
96         i2o_lan_receive_post_reply,     // For receive replies
97         NULL,
98         NULL,
99         NULL,
100         "I2O LAN OSM receive",
101         -1,
102         I2O_CLASS_LAN
103 };
104 static int lan_receive_context;
105
106 static struct i2o_handler i2o_lan_handler = {
107         i2o_lan_reply,                  // For other replies
108         NULL,
109         NULL,
110         NULL,
111         "I2O LAN OSM",
112         -1,
113         I2O_CLASS_LAN
114 };
115 static int lan_context;
116
117 DECLARE_TASK_QUEUE(i2o_post_buckets_task);
118 struct tq_struct run_i2o_post_buckets_task = {
119         routine: (void (*)(void *)) run_task_queue,
120         data: (void *) 0
121 };
122
123 /* Functions to handle message failures and transaction errors:
124 ==============================================================*/
125
126 /*
127  * i2o_lan_handle_failure(): Fail bit has been set since IOP's message
128  * layer cannot deliver the request to the target, or the target cannot
129  * process the request.
130  */
131 static void i2o_lan_handle_failure(struct net_device *dev, u32 *msg)
132 {
133         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
134         struct i2o_device *i2o_dev = priv->i2o_dev;
135         struct i2o_controller *iop = i2o_dev->controller;
136
137         u32 *preserved_msg = (u32*)(iop->mem_offset + msg[7]);
138         u32 *sgl_elem = &preserved_msg[4];
139         struct sk_buff *skb = NULL;
140         u8 le_flag;
141
142         i2o_report_status(KERN_INFO, dev->name, msg);
143
144         /* If PacketSend failed, free sk_buffs reserved by upper layers */
145
146         if (msg[1] >> 24 == LAN_PACKET_SEND) {
147                 do {
148                         skb = (struct sk_buff *)(sgl_elem[1]);
149                         dev_kfree_skb_irq(skb);
150
151                         atomic_dec(&priv->tx_out);
152
153                         le_flag = *sgl_elem >> 31;
154                         sgl_elem +=3;
155                 } while (le_flag == 0); /* Last element flag not set */
156
157                 if (netif_queue_stopped(dev))
158                         netif_wake_queue(dev);
159         }
160
161         /* If ReceivePost failed, free sk_buffs we have reserved */
162
163         if (msg[1] >> 24 == LAN_RECEIVE_POST) {
164                 do {
165                         skb = (struct sk_buff *)(sgl_elem[1]);
166                         dev_kfree_skb_irq(skb);
167
168                         atomic_dec(&priv->buckets_out);
169
170                         le_flag = *sgl_elem >> 31;
171                         sgl_elem +=3;
172                 } while (le_flag == 0); /* Last element flag not set */
173         }
174
175         /* Release the preserved msg frame by resubmitting it as a NOP */
176
177         preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
178         preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
179         preserved_msg[2] = 0;
180         i2o_post_message(iop, msg[7]);
181 }
182 /*
183  * i2o_lan_handle_transaction_error(): IOP or DDM has rejected the request
184  * for general cause (format error, bad function code, insufficient resources,
185  * etc.). We get one transaction_error for each failed transaction.
186  */
187 static void i2o_lan_handle_transaction_error(struct net_device *dev, u32 *msg)
188 {
189         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
190         struct sk_buff *skb;
191
192         i2o_report_status(KERN_INFO, dev->name, msg);
193
194         /* If PacketSend was rejected, free sk_buff reserved by upper layers */
195
196         if (msg[1] >> 24 == LAN_PACKET_SEND) {
197                 skb = (struct sk_buff *)(msg[3]); // TransactionContext
198                 dev_kfree_skb_irq(skb);
199                 atomic_dec(&priv->tx_out);
200
201                 if (netif_queue_stopped(dev))
202                         netif_wake_queue(dev);
203         }
204
205         /* If ReceivePost was rejected, free sk_buff we have reserved */
206
207         if (msg[1] >> 24 == LAN_RECEIVE_POST) {
208                 skb = (struct sk_buff *)(msg[3]);
209                 dev_kfree_skb_irq(skb);
210                 atomic_dec(&priv->buckets_out);
211         }
212 }
213
214 /*
215  * i2o_lan_handle_status(): Common parts of handling a not succeeded request
216  * (status != SUCCESS).
217  */
218 static int i2o_lan_handle_status(struct net_device *dev, u32 *msg)
219 {
220         /* Fail bit set? */
221
222         if (msg[0] & MSG_FAIL) {
223                 i2o_lan_handle_failure(dev, msg);
224                 return -1;
225         }
226
227         /* Message rejected for general cause? */
228
229         if ((msg[4]>>24) == I2O_REPLY_STATUS_TRANSACTION_ERROR) {
230                 i2o_lan_handle_transaction_error(dev, msg);
231                 return -1;
232         }
233
234         /* Else have to handle it in the callback function */
235
236         return 0;
237 }
238
239 /* Callback functions called from the interrupt routine:
240 =======================================================*/
241
242 /*
243  * i2o_lan_send_post_reply(): Callback function to handle PostSend replies.
244  */
245 static void i2o_lan_send_post_reply(struct i2o_handler *h,
246                         struct i2o_controller *iop, struct i2o_message *m)
247 {
248         u32 *msg = (u32 *)m;
249         u8 unit  = (u8)(msg[2]>>16); // InitiatorContext
250         struct net_device *dev = i2o_landevs[unit];
251         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
252         u8 trl_count  = msg[3] & 0x000000FF;
253
254         if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
255                 if (i2o_lan_handle_status(dev, msg))
256                         return;
257         }
258
259 #ifdef DRIVERDEBUG
260         i2o_report_status(KERN_INFO, dev->name, msg);
261 #endif
262
263         /* DDM has handled transmit request(s), free sk_buffs.
264          * We get similar single transaction reply also in error cases 
265          * (except if msg failure or transaction error).
266          */
267         while (trl_count) {
268                 dev_kfree_skb_irq((struct sk_buff *)msg[4 + trl_count]);
269                 dprintk(KERN_INFO "%s: tx skb freed (trl_count=%d).\n",
270                         dev->name, trl_count);
271                 atomic_dec(&priv->tx_out);
272                 trl_count--;
273         }
274
275         /* If priv->tx_out had reached tx_max_out, the queue was stopped */
276
277         if (netif_queue_stopped(dev))
278                 netif_wake_queue(dev);
279 }
280
281 /*
282  * i2o_lan_receive_post_reply(): Callback function to process incoming packets.
283  */
284 static void i2o_lan_receive_post_reply(struct i2o_handler *h,
285                         struct i2o_controller *iop, struct i2o_message *m)
286 {
287         u32 *msg = (u32 *)m;
288         u8 unit  = (u8)(msg[2]>>16); // InitiatorContext
289         struct net_device *dev = i2o_landevs[unit];
290
291         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
292         struct i2o_bucket_descriptor *bucket = (struct i2o_bucket_descriptor *)&msg[6];
293         struct i2o_packet_info *packet;
294         u8 trl_count = msg[3] & 0x000000FF;
295         struct sk_buff *skb, *old_skb;
296         unsigned long flags = 0;
297
298         if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
299                 if (i2o_lan_handle_status(dev, msg))
300                         return;
301
302                 i2o_lan_release_buckets(dev, msg);
303                 return;
304         }
305
306 #ifdef DRIVERDEBUG
307         i2o_report_status(KERN_INFO, dev->name, msg);
308 #endif
309
310         /* Else we are receiving incoming post. */
311
312         while (trl_count--) {
313                 skb = (struct sk_buff *)bucket->context;
314                 packet = (struct i2o_packet_info *)bucket->packet_info;
315                 atomic_dec(&priv->buckets_out);
316
317                 /* Sanity checks: Any weird characteristics in bucket? */
318
319                 if (packet->flags & 0x0f || ! packet->flags & 0x40) {
320                         if (packet->flags & 0x01)
321                                 printk(KERN_WARNING "%s: packet with errors, error code=0x%02x.\n",
322                                         dev->name, packet->status & 0xff);
323
324                         /* The following shouldn't happen, unless parameters in
325                          * LAN_OPERATION group are changed during the run time.
326                          */
327                          if (packet->flags & 0x0c)
328                                 printk(KERN_DEBUG "%s: multi-bucket packets not supported!\n", 
329                                         dev->name);
330                                         
331                         if (! packet->flags & 0x40)
332                                 printk(KERN_DEBUG "%s: multiple packets in a bucket not supported!\n", 
333                                         dev->name);
334
335                         dev_kfree_skb_irq(skb);
336
337                         bucket++;
338                         continue;
339                 }
340
341                 /* Copy short packet to a new skb */
342                 
343                 if (packet->len < priv->rx_copybreak) {
344                         old_skb = skb;
345                         skb = (struct sk_buff *)dev_alloc_skb(packet->len+2);
346                         if (skb == NULL) {
347                                 printk(KERN_ERR "%s: Can't allocate skb.\n", dev->name);
348                                 return;
349                         }
350                         skb_reserve(skb, 2);
351                         memcpy(skb_put(skb, packet->len), old_skb->data, packet->len);
352
353                         spin_lock_irqsave(&priv->fbl_lock, flags);
354                         if (priv->i2o_fbl_tail < I2O_LAN_MAX_BUCKETS_OUT)
355                                 priv->i2o_fbl[++priv->i2o_fbl_tail] = old_skb;
356                         else
357                                 dev_kfree_skb_irq(old_skb);
358
359                         spin_unlock_irqrestore(&priv->fbl_lock, flags);
360                 } else
361                         skb_put(skb, packet->len);
362
363                 /* Deliver to upper layers */
364
365                 skb->dev = dev;
366                 skb->protocol = priv->type_trans(skb, dev);
367                 netif_rx(skb);
368
369                 dev->last_rx = jiffies;
370
371                 dprintk(KERN_INFO "%s: Incoming packet (%d bytes) delivered "
372                         "to upper level.\n", dev->name, packet->len);
373
374                 bucket++; // to next Packet Descriptor Block
375         }
376
377 #ifdef DRIVERDEBUG
378         if (msg[5] == 0)
379                 printk(KERN_INFO "%s: DDM out of buckets (priv->count = %d)!\n",
380                        dev->name, atomic_read(&priv->buckets_out));
381 #endif
382
383         /* If DDM has already consumed bucket_thresh buckets, post new ones */
384
385         if (atomic_read(&priv->buckets_out) <= priv->max_buckets_out - priv->bucket_thresh) {
386                 run_i2o_post_buckets_task.data = (void *)dev;
387                 queue_task(&run_i2o_post_buckets_task, &tq_immediate);
388                 mark_bh(IMMEDIATE_BH);
389         }
390
391         return;
392 }
393
394 /*
395  * i2o_lan_reply(): Callback function to handle other incoming messages
396  * except SendPost and ReceivePost.
397  */
398 static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
399                           struct i2o_message *m)
400 {
401         u32 *msg = (u32 *)m;
402         u8 unit  = (u8)(msg[2]>>16); // InitiatorContext
403         struct net_device *dev = i2o_landevs[unit];
404
405         if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
406                 if (i2o_lan_handle_status(dev, msg))
407                         return;
408
409                 /* In other error cases just report and continue */
410
411                 i2o_report_status(KERN_INFO, dev->name, msg);
412         }
413
414 #ifdef DRIVERDEBUG
415         i2o_report_status(KERN_INFO, dev->name, msg);
416 #endif
417         switch (msg[1] >> 24) {
418                 case LAN_RESET:
419                 case LAN_SUSPEND:
420                         /* default reply without payload */
421                 break;
422
423                 case I2O_CMD_UTIL_EVT_REGISTER: 
424                 case I2O_CMD_UTIL_EVT_ACK:
425                         i2o_lan_handle_event(dev, msg);
426                 break;
427
428                 case I2O_CMD_UTIL_PARAMS_SET:
429                         /* default reply, results in ReplyPayload (not examined) */
430                         switch (msg[3] >> 16) {
431                             case 1: dprintk(KERN_INFO "%s: Reply to set MAC filter mask.\n",
432                                         dev->name);
433                             break;
434                             case 2: dprintk(KERN_INFO "%s: Reply to set MAC table.\n", 
435                                         dev->name);
436                             break;
437                             default: printk(KERN_WARNING "%s: Bad group 0x%04X\n",
438                                         dev->name,msg[3] >> 16);
439                         }
440                 break;
441
442                 default:
443                         printk(KERN_ERR "%s: No handler for the reply.\n",
444                                 dev->name);
445                         i2o_report_status(KERN_INFO, dev->name, msg);
446         }
447 }
448
449 /* Functions used by the above callback functions:
450 =================================================*/
451 /*
452  * i2o_lan_release_buckets(): Free unused buckets (sk_buffs).
453  */
454 static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg)
455 {
456         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
457         u8 trl_elem_size = (u8)(msg[3]>>8 & 0x000000FF);
458         u8 trl_count = (u8)(msg[3] & 0x000000FF);
459         u32 *pskb = &msg[6];
460
461         while (trl_count--) {
462                 dprintk(KERN_DEBUG "%s: Releasing unused rx skb %p (trl_count=%d).\n",
463                         dev->name, (struct sk_buff*)(*pskb),trl_count+1);
464                 dev_kfree_skb_irq((struct sk_buff *)(*pskb));
465                 pskb += 1 + trl_elem_size;
466                 atomic_dec(&priv->buckets_out);
467         }
468 }
469
470 /*
471  * i2o_lan_event_reply(): Handle events.
472  */
473 static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
474 {
475         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
476         struct i2o_device *i2o_dev = priv->i2o_dev;
477         struct i2o_controller *iop = i2o_dev->controller;
478         u32 max_evt_data_size =iop->status_block->inbound_frame_size-5;
479         struct i2o_reply {
480                 u32 header[4];
481                 u32 evt_indicator;
482                 u32 data[max_evt_data_size];
483         } *evt = (struct i2o_reply *)msg;
484         int evt_data_len = ((msg[0]>>16) - 5) * 4; /* real size*/
485
486         printk(KERN_INFO "%s: I2O event - ", dev->name);
487
488         if (msg[1]>>24 == I2O_CMD_UTIL_EVT_ACK) {
489                 printk("Event acknowledgement reply.\n");
490                 return;
491         }
492
493         /* Else evt->function == I2O_CMD_UTIL_EVT_REGISTER) */
494
495         switch (evt->evt_indicator) {
496         case I2O_EVT_IND_STATE_CHANGE:  {
497                 struct state_data {
498                         u16 status;
499                         u8 state;
500                         u8 data;
501                 } *evt_data = (struct state_data *)(evt->data[0]);
502
503                 printk("State chance 0x%08x.\n", evt->data[0]);
504
505                 /* If the DDM is in error state, recovery may be
506                  * possible if status = Transmit or Receive Control
507                  * Unit Inoperable.
508                  */
509                 if (evt_data->state==0x05 && evt_data->status==0x0003)
510                         i2o_lan_reset(dev);
511                 break;
512         }
513
514         case I2O_EVT_IND_FIELD_MODIFIED: {
515                 u16 *work16 = (u16 *)evt->data;
516                 printk("Group 0x%04x, field %d changed.\n", work16[0], work16[1]);
517                 break;
518         }
519
520         case I2O_EVT_IND_VENDOR_EVT: {
521                 int i;
522                 printk("Vendor event:\n");
523                 for (i = 0; i < evt_data_len / 4; i++)
524                         printk("   0x%08x\n", evt->data[i]);
525                 break;
526         }
527
528         case I2O_EVT_IND_DEVICE_RESET:
529                 /* Spec 2.0 p. 6-121:
530                  * The event of _DEVICE_RESET should also be responded
531                  */
532                 printk("Device reset.\n");
533                 if (i2o_event_ack(iop, msg) < 0)
534                         printk("%s: Event Acknowledge timeout.\n", dev->name);
535                 break;
536
537 #if 0
538         case I2O_EVT_IND_EVT_MASK_MODIFIED:
539                 printk("Event mask modified, 0x%08x.\n", evt->data[0]);
540                 break;
541
542         case I2O_EVT_IND_GENERAL_WARNING:
543                 printk("General warning 0x%04x.\n", evt->data[0]);
544                 break;
545
546         case I2O_EVT_IND_CONFIGURATION_FLAG:
547                 printk("Configuration requested.\n");
548                 break;
549
550         case I2O_EVT_IND_CAPABILITY_CHANGE:
551                 printk("Capability change 0x%04x.\n", evt->data[0]);
552                 break;
553
554         case I2O_EVT_IND_DEVICE_STATE:
555                 printk("Device state changed 0x%08x.\n", evt->data[0]);
556                 break;
557 #endif
558         case I2O_LAN_EVT_LINK_DOWN:
559                 netif_carrier_off(dev); 
560                 printk("Link to the physical device is lost.\n");
561                 break;
562
563         case I2O_LAN_EVT_LINK_UP:
564                 netif_carrier_on(dev); 
565                 printk("Link to the physical device is (re)established.\n");
566                 break;
567
568         case I2O_LAN_EVT_MEDIA_CHANGE:
569                 printk("Media change.\n");
570                 break;
571         default:
572                 printk("0x%08x. No handler.\n", evt->evt_indicator);
573         }
574 }
575
576 /*
577  * i2o_lan_receive_post(): Post buckets to receive packets.
578  */
579 static int i2o_lan_receive_post(struct net_device *dev)
580 {
581         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
582         struct i2o_device *i2o_dev = priv->i2o_dev;
583         struct i2o_controller *iop = i2o_dev->controller;
584         struct sk_buff *skb;
585         u32 m, *msg;
586         u32 bucket_len = (dev->mtu + dev->hard_header_len);
587         u32 total = priv->max_buckets_out - atomic_read(&priv->buckets_out);
588         u32 bucket_count;
589         u32 *sgl_elem;
590         unsigned long flags;
591
592         /* Send (total/bucket_count) separate I2O requests */
593
594         while (total) {
595                 m = I2O_POST_READ32(iop);
596                 if (m == 0xFFFFFFFF)
597                         return -ETIMEDOUT;
598                 msg = (u32 *)(iop->mem_offset + m);
599
600                 bucket_count = (total >= priv->sgl_max) ? priv->sgl_max : total;
601                 total -= bucket_count;
602                 atomic_add(bucket_count, &priv->buckets_out);
603
604                 dprintk(KERN_INFO "%s: Sending %d buckets (size %d) to LAN DDM.\n",
605                         dev->name, bucket_count, bucket_len);
606
607                 /* Fill in the header */
608
609                 __raw_writel(I2O_MESSAGE_SIZE(4 + 3 * bucket_count) | SGL_OFFSET_4, msg);
610                 __raw_writel(LAN_RECEIVE_POST<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
611                 __raw_writel(priv->unit << 16 | lan_receive_context, msg+2);
612                 __raw_writel(bucket_count, msg+3);
613                 sgl_elem = &msg[4];
614
615                 /* Fill in the payload - contains bucket_count SGL elements */
616
617                 while (bucket_count--) {
618                         spin_lock_irqsave(&priv->fbl_lock, flags);
619                         if (priv->i2o_fbl_tail >= 0)
620                                 skb = priv->i2o_fbl[priv->i2o_fbl_tail--];
621                         else {
622                                 skb = dev_alloc_skb(bucket_len + 2);
623                                 if (skb == NULL) {
624                                         spin_unlock_irqrestore(&priv->fbl_lock, flags);
625                                         return -ENOMEM;
626                                 }
627                                 skb_reserve(skb, 2);
628                         }
629                         spin_unlock_irqrestore(&priv->fbl_lock, flags);
630
631                         __raw_writel(0x51000000 | bucket_len, sgl_elem);
632                         __raw_writel((u32)skb,                sgl_elem+1);
633                         __raw_writel(virt_to_bus(skb->data),  sgl_elem+2);
634                         sgl_elem += 3;
635                 }
636
637                 /* set LE flag and post  */
638                 __raw_writel(__raw_readl(sgl_elem-3) | 0x80000000, (sgl_elem-3));
639                 i2o_post_message(iop, m);
640         }
641
642         return 0;
643 }
644
645 /* Functions called from the network stack, and functions called by them:
646 ========================================================================*/
647
648 /*
649  * i2o_lan_reset(): Reset the LAN adapter into the operational state and
650  *      restore it to full operation.
651  */
652 static int i2o_lan_reset(struct net_device *dev)
653 {
654         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
655         struct i2o_device *i2o_dev = priv->i2o_dev;
656         struct i2o_controller *iop = i2o_dev->controller;
657         u32 msg[5];
658
659         dprintk(KERN_INFO "%s: LAN RESET MESSAGE.\n", dev->name);
660         msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
661         msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
662         msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
663         msg[3] = 0;                              // TransactionContext
664         msg[4] = 0;                              // Keep posted buckets
665
666         if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
667                 return -ETIMEDOUT;
668
669         return 0;
670 }
671
672 /*
673  * i2o_lan_suspend(): Put LAN adapter into a safe, non-active state.
674  *      IOP replies to any LAN class message with status error_no_data_transfer
675  *      / suspended.
676  */
677 static int i2o_lan_suspend(struct net_device *dev)
678 {
679         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
680         struct i2o_device *i2o_dev = priv->i2o_dev;
681         struct i2o_controller *iop = i2o_dev->controller;
682         u32 msg[5];
683
684         dprintk(KERN_INFO "%s: LAN SUSPEND MESSAGE.\n", dev->name);
685         msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
686         msg[1] = LAN_SUSPEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
687         msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
688         msg[3] = 0;                              // TransactionContext
689         msg[4] = 1 << 16;                        // return posted buckets
690
691         if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
692                 return -ETIMEDOUT;
693
694         return 0;
695 }
696
697 /*
698  * i2o_set_ddm_parameters:
699  * These settings are done to ensure proper initial values for DDM.
700  * They can be changed via proc file system or vai configuration utility.
701  */
702 static void i2o_set_ddm_parameters(struct net_device *dev)
703 {
704         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
705         struct i2o_device *i2o_dev = priv->i2o_dev;
706         struct i2o_controller *iop = i2o_dev->controller;
707         u32 val;
708
709         /*
710          * When PacketOrphanlimit is set to the maximum packet length,
711          * the packets will never be split into two separate buckets
712          */
713         val = dev->mtu + dev->hard_header_len;
714         if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0004, 2, &val, sizeof(val)) < 0)
715                 printk(KERN_WARNING "%s: Unable to set PacketOrphanLimit.\n",
716                        dev->name);
717         else
718                 dprintk(KERN_INFO "%s: PacketOrphanLimit set to %d.\n",
719                         dev->name, val);
720
721         /* When RxMaxPacketsBucket = 1, DDM puts only one packet into bucket */
722
723         val = 1;
724         if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0008, 4, &val, sizeof(val)) <0)
725                 printk(KERN_WARNING "%s: Unable to set RxMaxPacketsBucket.\n",
726                        dev->name);
727         else
728                 dprintk(KERN_INFO "%s: RxMaxPacketsBucket set to %d.\n", 
729                         dev->name, val);
730         return;
731 }
732
733 /* Functions called from the network stack:
734 ==========================================*/
735
736 /*
737  * i2o_lan_open(): Open the device to send/receive packets via
738  * the network device.
739  */
740 static int i2o_lan_open(struct net_device *dev)
741 {
742         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
743         struct i2o_device *i2o_dev = priv->i2o_dev;
744         struct i2o_controller *iop = i2o_dev->controller;
745         u32 mc_addr_group[64];
746
747         MOD_INC_USE_COUNT;
748
749         if (i2o_claim_device(i2o_dev, &i2o_lan_handler)) {
750                 printk(KERN_WARNING "%s: Unable to claim the I2O LAN device.\n", dev->name);
751                 MOD_DEC_USE_COUNT;
752                 return -EAGAIN;
753         }
754         dprintk(KERN_INFO "%s: I2O LAN device (tid=%d) claimed by LAN OSM.\n",
755                 dev->name, i2o_dev->lct_data.tid);
756
757         if (i2o_event_register(iop, i2o_dev->lct_data.tid,
758                                priv->unit << 16 | lan_context, 0, priv->i2o_event_mask) < 0)
759                 printk(KERN_WARNING "%s: Unable to set the event mask.\n", dev->name);
760
761         i2o_lan_reset(dev);
762
763         /* Get the max number of multicast addresses */
764
765         if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0001, -1,
766                              &mc_addr_group, sizeof(mc_addr_group)) < 0 ) {
767                 printk(KERN_WARNING "%s: Unable to query LAN_MAC_ADDRESS group.\n", dev->name);
768                 MOD_DEC_USE_COUNT;
769                 return -EAGAIN;
770         }
771         priv->max_size_mc_table = mc_addr_group[8];
772
773         /* Malloc space for free bucket list to resuse reveive post buckets */
774
775         priv->i2o_fbl = kmalloc(priv->max_buckets_out * sizeof(struct sk_buff *),
776                                 GFP_KERNEL);
777         if (priv->i2o_fbl == NULL) {
778                 MOD_DEC_USE_COUNT;
779                 return -ENOMEM;
780         }
781         priv->i2o_fbl_tail = -1;
782         priv->send_active = 0;
783
784         i2o_set_ddm_parameters(dev);
785         i2o_lan_receive_post(dev);
786
787         netif_start_queue(dev);
788
789         return 0;
790 }
791
792 /*
793  * i2o_lan_close(): End the transfering.
794  */
795 static int i2o_lan_close(struct net_device *dev)
796 {
797         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
798         struct i2o_device *i2o_dev = priv->i2o_dev;
799         struct i2o_controller *iop = i2o_dev->controller;
800         int ret = 0;
801
802         netif_stop_queue(dev);
803         i2o_lan_suspend(dev);
804
805         if (i2o_event_register(iop, i2o_dev->lct_data.tid,
806                                priv->unit << 16 | lan_context, 0, 0) < 0)
807                 printk(KERN_WARNING "%s: Unable to clear the event mask.\n",
808                        dev->name);
809
810         while (priv->i2o_fbl_tail >= 0)
811                 dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
812
813         kfree(priv->i2o_fbl);
814
815         if (i2o_release_device(i2o_dev, &i2o_lan_handler)) {
816                 printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device "
817                        "(tid=%d).\n", dev->name, i2o_dev->lct_data.tid);
818                 ret = -EBUSY;
819         }
820
821         MOD_DEC_USE_COUNT;
822
823         return ret;
824 }
825
826 /*
827  * i2o_lan_tx_timeout(): Tx timeout handler.
828  */
829 static void i2o_lan_tx_timeout(struct net_device *dev)
830 {
831         if (!netif_queue_stopped(dev))
832                 netif_start_queue(dev);
833 }
834
835 /*
836  * i2o_lan_batch_send(): Send packets in batch. 
837  * Both i2o_lan_sdu_send and i2o_lan_packet_send use this.
838  */
839 static void i2o_lan_batch_send(struct net_device *dev)
840 {
841         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
842         struct i2o_controller *iop = priv->i2o_dev->controller;
843
844         spin_lock_irq(&priv->tx_lock);
845         if (priv->tx_count != 0) {
846                 dev->trans_start = jiffies;
847                 i2o_post_message(iop, priv->m);
848                 dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
849                 priv->tx_count = 0;
850         }
851         priv->send_active = 0;
852         spin_unlock_irq(&priv->tx_lock);
853         MOD_DEC_USE_COUNT;
854 }
855
856 #ifdef CONFIG_NET_FC
857 /*
858  * i2o_lan_sdu_send(): Send a packet, MAC header added by the DDM.
859  * Must be supported by Fibre Channel, optional for Ethernet/802.3,
860  * Token Ring, FDDI
861  */
862 static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
863 {
864         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
865         struct i2o_device *i2o_dev = priv->i2o_dev;
866         struct i2o_controller *iop = i2o_dev->controller;
867         int tickssofar = jiffies - dev->trans_start;
868         u32 m, *msg;
869         u32 *sgl_elem;
870
871         spin_lock_irq(&priv->tx_lock);
872
873         priv->tx_count++;
874         atomic_inc(&priv->tx_out);
875
876         /* 
877          * If tx_batch_mode = 0x00 forced to immediate mode
878          * If tx_batch_mode = 0x01 forced to batch mode
879          * If tx_batch_mode = 0x10 switch automatically, current mode immediate
880          * If tx_batch_mode = 0x11 switch automatically, current mode batch
881          *      If gap between two packets is > 0 ticks, switch to immediate
882          */
883         if (priv->tx_batch_mode >> 1) // switch automatically
884                 priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
885
886         if (priv->tx_count == 1) {
887                 m = I2O_POST_READ32(iop);
888                 if (m == 0xFFFFFFFF) {
889                         spin_unlock_irq(&priv->tx_lock);
890                         return 1;
891                 }
892                 msg = (u32 *)(iop->mem_offset + m);
893                 priv->m = m;
894
895                 __raw_writel(NINE_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
896                 __raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
897                 __raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
898                 __raw_writel(1 << 30 | 1 << 3, msg+3);                    // TransmitControlWord
899
900                 __raw_writel(0xD7000000 | skb->len, msg+4);          // MAC hdr included
901                 __raw_writel((u32)skb, msg+5);                       // TransactionContext
902                 __raw_writel(virt_to_bus(skb->data), msg+6);
903                 __raw_writel((u32)skb->mac.raw, msg+7);
904                 __raw_writel((u32)skb->mac.raw+4, msg+8);
905
906                 if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
907                         priv->send_active = 1;
908                         MOD_INC_USE_COUNT;
909                         if (schedule_task(&priv->i2o_batch_send_task) == 0)
910                                 MOD_DEC_USE_COUNT;
911                 }
912         } else {  /* Add new SGL element to the previous message frame */
913
914                 msg = (u32 *)(iop->mem_offset + priv->m);
915                 sgl_elem = &msg[priv->tx_count * 5 + 1];
916
917                 __raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 5) | 1<<12 | SGL_OFFSET_4, msg);
918                 __raw_writel(__raw_readl(sgl_elem-5) & 0x7FFFFFFF, sgl_elem-5); /* clear LE flag */
919                 __raw_writel(0xD5000000 | skb->len, sgl_elem);
920                 __raw_writel((u32)skb, sgl_elem+1);
921                 __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
922                 __raw_writel((u32)(skb->mac.raw), sgl_elem+3);
923                 __raw_writel((u32)(skb->mac.raw)+1, sgl_elem+4);
924         }
925
926         /* If tx not in batch mode or frame is full, send immediatelly */
927
928         if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
929                 dev->trans_start = jiffies;
930                 i2o_post_message(iop, priv->m);
931                 dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
932                 priv->tx_count = 0;
933         }
934
935         /* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
936
937         if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
938                 netif_stop_queue(dev);
939
940         spin_unlock_irq(&priv->tx_lock);
941         return 0;
942 }
943 #endif /* CONFIG_NET_FC */
944
945 /*
946  * i2o_lan_packet_send(): Send a packet as is, including the MAC header.
947  *
948  * Must be supported by Ethernet/802.3, Token Ring, FDDI, optional for
949  * Fibre Channel
950  */
951 static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev)
952 {
953         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
954         struct i2o_device *i2o_dev = priv->i2o_dev;
955         struct i2o_controller *iop = i2o_dev->controller;
956         int tickssofar = jiffies - dev->trans_start;
957         u32 m, *msg;
958         u32 *sgl_elem;
959
960         spin_lock_irq(&priv->tx_lock);
961
962         priv->tx_count++;
963         atomic_inc(&priv->tx_out);
964
965         /* 
966          * If tx_batch_mode = 0x00 forced to immediate mode
967          * If tx_batch_mode = 0x01 forced to batch mode
968          * If tx_batch_mode = 0x10 switch automatically, current mode immediate
969          * If tx_batch_mode = 0x11 switch automatically, current mode batch
970          *      If gap between two packets is > 0 ticks, switch to immediate
971          */
972         if (priv->tx_batch_mode >> 1) // switch automatically
973                 priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
974
975         if (priv->tx_count == 1) {
976                 m = I2O_POST_READ32(iop);
977                 if (m == 0xFFFFFFFF) {
978                         spin_unlock_irq(&priv->tx_lock);
979                         return 1;
980                 }
981                 msg = (u32 *)(iop->mem_offset + m);
982                 priv->m = m;
983
984                 __raw_writel(SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
985                 __raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
986                 __raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
987                 __raw_writel(1 << 30 | 1 << 3, msg+3);                    // TransmitControlWord
988                         // bit 30: reply as soon as transmission attempt is complete
989                         // bit 3: Suppress CRC generation
990                 __raw_writel(0xD5000000 | skb->len, msg+4);          // MAC hdr included
991                 __raw_writel((u32)skb, msg+5);                       // TransactionContext
992                 __raw_writel(virt_to_bus(skb->data), msg+6);
993
994                 if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
995                         priv->send_active = 1;
996                         MOD_INC_USE_COUNT;
997                         if (schedule_task(&priv->i2o_batch_send_task) == 0)
998                                 MOD_DEC_USE_COUNT;
999                 }
1000         } else {  /* Add new SGL element to the previous message frame */
1001
1002                 msg = (u32 *)(iop->mem_offset + priv->m);
1003                 sgl_elem = &msg[priv->tx_count * 3 + 1];
1004
1005                 __raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 3) | 1<<12 | SGL_OFFSET_4, msg);
1006                 __raw_writel(__raw_readl(sgl_elem-3) & 0x7FFFFFFF, sgl_elem-3); /* clear LE flag */
1007                 __raw_writel(0xD5000000 | skb->len, sgl_elem);
1008                 __raw_writel((u32)skb, sgl_elem+1);
1009                 __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
1010         }
1011
1012         /* If tx is in immediate mode or frame is full, send now */
1013
1014         if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
1015                 dev->trans_start = jiffies;
1016                 i2o_post_message(iop, priv->m);
1017                 dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
1018                 priv->tx_count = 0;
1019         }
1020
1021         /* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
1022
1023         if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
1024                 netif_stop_queue(dev);
1025
1026         spin_unlock_irq(&priv->tx_lock);
1027         return 0;
1028 }
1029
1030 /*
1031  * i2o_lan_get_stats(): Fill in the statistics.
1032  */
1033 static struct net_device_stats *i2o_lan_get_stats(struct net_device *dev)
1034 {
1035         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1036         struct i2o_device *i2o_dev = priv->i2o_dev;
1037         struct i2o_controller *iop = i2o_dev->controller;
1038         u64 val64[16];
1039         u64 supported_group[4] = { 0, 0, 0, 0 };
1040
1041         if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0100, -1, val64,
1042                              sizeof(val64)) < 0)
1043                 printk(KERN_INFO "%s: Unable to query LAN_HISTORICAL_STATS.\n", dev->name);
1044         else {
1045                 dprintk(KERN_DEBUG "%s: LAN_HISTORICAL_STATS queried.\n", dev->name);
1046                 priv->stats.tx_packets = val64[0];
1047                 priv->stats.tx_bytes   = val64[1];
1048                 priv->stats.rx_packets = val64[2];
1049                 priv->stats.rx_bytes   = val64[3];
1050                 priv->stats.tx_errors  = val64[4];
1051                 priv->stats.rx_errors  = val64[5];
1052                 priv->stats.rx_dropped = val64[6];
1053         }
1054
1055         if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0180, -1,
1056                              &supported_group, sizeof(supported_group)) < 0)
1057                 printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_OPTIONAL_HISTORICAL_STATS.\n", dev->name);
1058
1059         if (supported_group[2]) {
1060                 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0183, -1,
1061                                      val64, sizeof(val64)) < 0)
1062                         printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_RX_HISTORICAL_STATS.\n", dev->name);
1063                 else {
1064                         dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_RX_HISTORICAL_STATS queried.\n", dev->name);
1065                         priv->stats.multicast        = val64[4];
1066                         priv->stats.rx_length_errors = val64[10];
1067                         priv->stats.rx_crc_errors    = val64[0];
1068                 }
1069         }
1070
1071         if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET) {
1072                 u64 supported_stats = 0;
1073                 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0200, -1,
1074                                      val64, sizeof(val64)) < 0)
1075                         printk(KERN_INFO "%s: Unable to query LAN_802_3_HISTORICAL_STATS.\n", dev->name);
1076                 else {
1077                         dprintk(KERN_DEBUG "%s: LAN_802_3_HISTORICAL_STATS queried.\n", dev->name);
1078                         priv->stats.transmit_collision = val64[1] + val64[2];
1079                         priv->stats.rx_frame_errors    = val64[0];
1080                         priv->stats.tx_carrier_errors  = val64[6];
1081                 }
1082
1083                 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0280, -1,
1084                                      &supported_stats, sizeof(supported_stats)) < 0)
1085                         printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_802_3_HISTORICAL_STATS.\n", dev->name);
1086
1087                 if (supported_stats != 0) {
1088                         if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0281, -1,
1089                                              val64, sizeof(val64)) < 0)
1090                                 printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_802_3_HISTORICAL_STATS.\n", dev->name);
1091                         else {
1092                                 dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_802_3_HISTORICAL_STATS queried.\n", dev->name);
1093                                 if (supported_stats & 0x1)
1094                                         priv->stats.rx_over_errors = val64[0];
1095                                 if (supported_stats & 0x4)
1096                                         priv->stats.tx_heartbeat_errors = val64[2];
1097                         }
1098                 }
1099         }
1100
1101 #ifdef CONFIG_TR
1102         if (i2o_dev->lct_data.sub_class == I2O_LAN_TR) {
1103                 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0300, -1,
1104                                      val64, sizeof(val64)) < 0)
1105                         printk(KERN_INFO "%s: Unable to query LAN_802_5_HISTORICAL_STATS.\n", dev->name);
1106                 else {
1107                         struct tr_statistics *stats =
1108                                 (struct tr_statistics *)&priv->stats;
1109                         dprintk(KERN_DEBUG "%s: LAN_802_5_HISTORICAL_STATS queried.\n", dev->name);
1110
1111                         stats->line_errors              = val64[0];
1112                         stats->internal_errors          = val64[7];
1113                         stats->burst_errors             = val64[4];
1114                         stats->A_C_errors               = val64[2];
1115                         stats->abort_delimiters         = val64[3];
1116                         stats->lost_frames              = val64[1];
1117                         /* stats->recv_congest_count    = ?;  FIXME ??*/
1118                         stats->frame_copied_errors      = val64[5];
1119                         stats->frequency_errors         = val64[6];
1120                         stats->token_errors             = val64[9];
1121                 }
1122                 /* Token Ring optional stats not yet defined */
1123         }
1124 #endif
1125
1126 #ifdef CONFIG_FDDI
1127         if (i2o_dev->lct_data.sub_class == I2O_LAN_FDDI) {
1128                 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0400, -1,
1129                                      val64, sizeof(val64)) < 0)
1130                         printk(KERN_INFO "%s: Unable to query LAN_FDDI_HISTORICAL_STATS.\n", dev->name);
1131                 else {
1132                         dprintk(KERN_DEBUG "%s: LAN_FDDI_HISTORICAL_STATS queried.\n", dev->name);
1133                         priv->stats.smt_cf_state = val64[0];
1134                         memcpy(priv->stats.mac_upstream_nbr, &val64[1], FDDI_K_ALEN);
1135                         memcpy(priv->stats.mac_downstream_nbr, &val64[2], FDDI_K_ALEN);
1136                         priv->stats.mac_error_cts = val64[3];
1137                         priv->stats.mac_lost_cts  = val64[4];
1138                         priv->stats.mac_rmt_state = val64[5];
1139                         memcpy(priv->stats.port_lct_fail_cts, &val64[6], 8);
1140                         memcpy(priv->stats.port_lem_reject_cts, &val64[7], 8);
1141                         memcpy(priv->stats.port_lem_cts, &val64[8], 8);
1142                         memcpy(priv->stats.port_pcm_state, &val64[9], 8);
1143                 }
1144                 /* FDDI optional stats not yet defined */
1145         }
1146 #endif
1147
1148 #ifdef CONFIG_NET_FC
1149         /* Fibre Channel Statistics not yet defined in 1.53 nor 2.0 */
1150 #endif
1151
1152         return (struct net_device_stats *)&priv->stats;
1153 }
1154
1155 /* 
1156  * i2o_lan_set_mc_filter(): Post a request to set multicast filter.
1157  */
1158 int i2o_lan_set_mc_filter(struct net_device *dev, u32 filter_mask)
1159 {
1160         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv; 
1161         struct i2o_device *i2o_dev = priv->i2o_dev;
1162         struct i2o_controller *iop = i2o_dev->controller;
1163         u32 msg[10]; 
1164
1165         msg[0] = TEN_WORD_MSG_SIZE | SGL_OFFSET_5;
1166         msg[1] = I2O_CMD_UTIL_PARAMS_SET << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid;
1167         msg[2] = priv->unit << 16 | lan_context;
1168         msg[3] = 0x0001 << 16 | 3 ;     // TransactionContext: group&field
1169         msg[4] = 0;
1170         msg[5] = 0xCC000000 | 16;                       // Immediate data SGL
1171         msg[6] = 1;                                     // OperationCount
1172         msg[7] = 0x0001<<16 | I2O_PARAMS_FIELD_SET;     // Group, Operation
1173         msg[8] = 3 << 16 | 1;                           // FieldIndex, FieldCount 
1174         msg[9] = filter_mask;                           // Value
1175
1176         return i2o_post_this(iop, msg, sizeof(msg));
1177 }
1178
1179 /* 
1180  * i2o_lan_set_mc_table(): Post a request to set LAN_MULTICAST_MAC_ADDRESS table.
1181  */
1182 int i2o_lan_set_mc_table(struct net_device *dev)
1183 {
1184         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv; 
1185         struct i2o_device *i2o_dev = priv->i2o_dev;
1186         struct i2o_controller *iop = i2o_dev->controller;
1187         struct dev_mc_list *mc;
1188         u32 msg[10 + 2 * dev->mc_count]; 
1189         u8 *work8 = (u8 *)(msg + 10);
1190
1191         msg[0] = I2O_MESSAGE_SIZE(10 + 2 * dev->mc_count) | SGL_OFFSET_5;
1192         msg[1] = I2O_CMD_UTIL_PARAMS_SET << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid;
1193         msg[2] = priv->unit << 16 | lan_context;        // InitiatorContext
1194         msg[3] = 0x0002 << 16 | (u16)-1;                // TransactionContext
1195         msg[4] = 0;                                     // OperationFlags
1196         msg[5] = 0xCC000000 | (16 + 8 * dev->mc_count); // Immediate data SGL
1197         msg[6] = 2;                                     // OperationCount
1198         msg[7] = 0x0002 << 16 | I2O_PARAMS_TABLE_CLEAR; // Group, Operation
1199         msg[8] = 0x0002 << 16 | I2O_PARAMS_ROW_ADD;     // Group, Operation
1200         msg[9] = dev->mc_count << 16 | (u16)-1;         // RowCount, FieldCount
1201
1202         for (mc = dev->mc_list; mc ; mc = mc->next, work8 += 8) {
1203                   memset(work8, 0, 8);
1204                   memcpy(work8, mc->dmi_addr, mc->dmi_addrlen); // Values
1205         }
1206
1207         return i2o_post_this(iop, msg, sizeof(msg));
1208 }
1209
1210 /*
1211  * i2o_lan_set_multicast_list(): Enable a network device to receive packets
1212  *      not send to the protocol address.
1213  */
1214 static void i2o_lan_set_multicast_list(struct net_device *dev)
1215 {
1216         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1217         u32 filter_mask;
1218
1219         if (dev->flags & IFF_PROMISC) {
1220                 filter_mask = 0x00000002;
1221                 dprintk(KERN_INFO "%s: Enabling promiscuous mode...\n", dev->name);
1222         } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > priv->max_size_mc_table) {
1223                 filter_mask = 0x00000004;
1224                 dprintk(KERN_INFO "%s: Enabling all multicast mode...\n", dev->name);
1225         } else if (dev->mc_count) {
1226                 filter_mask = 0x00000000;
1227                 dprintk(KERN_INFO "%s: Enabling multicast mode...\n", dev->name);
1228                 if (i2o_lan_set_mc_table(dev) < 0)
1229                         printk(KERN_WARNING "%s: Unable to send MAC table.\n", dev->name);
1230         } else {
1231                 filter_mask = 0x00000300; // Broadcast, Multicast disabled
1232                 dprintk(KERN_INFO "%s: Enabling unicast mode...\n", dev->name);
1233         }
1234
1235         /* Finally copy new FilterMask to DDM */
1236
1237         if (i2o_lan_set_mc_filter(dev, filter_mask) < 0)
1238                 printk(KERN_WARNING "%s: Unable to send MAC FilterMask.\n", dev->name);
1239 }
1240
1241 /*
1242  * i2o_lan_change_mtu(): Change maximum transfer unit size.
1243  */
1244 static int i2o_lan_change_mtu(struct net_device *dev, int new_mtu)
1245 {
1246         struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1247         struct i2o_device *i2o_dev = priv->i2o_dev;
1248         u32 max_pkt_size;
1249
1250         if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1251                              0x0000, 6, &max_pkt_size, 4) < 0)
1252                 return -EFAULT;
1253
1254         if (new_mtu < 68 || new_mtu > 9000 || new_mtu > max_pkt_size)
1255                 return -EINVAL;
1256
1257         dev->mtu = new_mtu;
1258
1259         i2o_lan_suspend(dev);           // to SUSPENDED state, return buckets
1260
1261         while (priv->i2o_fbl_tail >= 0) // free buffered buckets
1262                 dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
1263
1264         i2o_lan_reset(dev);             // to OPERATIONAL state
1265         i2o_set_ddm_parameters(dev);    // reset some parameters
1266         i2o_lan_receive_post(dev);      // post new buckets (new size)
1267
1268         return 0;
1269 }
1270
1271 /* Functions to initialize I2O LAN OSM:
1272 ======================================*/
1273
1274 /*
1275  * i2o_lan_register_device(): Register LAN class device to kernel.
1276  */
1277 struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev)
1278 {
1279         struct net_device *dev = NULL;
1280         struct i2o_lan_local *priv = NULL;
1281         u8 hw_addr[8];
1282         u32 tx_max_out = 0;
1283         unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
1284         void (*unregister_dev)(struct net_device *dev);
1285
1286         switch (i2o_dev->lct_data.sub_class) {
1287         case I2O_LAN_ETHERNET:
1288                 dev = init_etherdev(NULL, sizeof(struct i2o_lan_local));
1289                 if (dev == NULL)
1290                         return NULL;
1291                 type_trans = eth_type_trans;
1292                 unregister_dev = unregister_netdev;
1293                 break;
1294
1295 #ifdef CONFIG_ANYLAN
1296         case I2O_LAN_100VG:
1297                 printk(KERN_ERR "i2o_lan: 100base VG not yet supported.\n");
1298                 return NULL;
1299                 break;
1300 #endif
1301
1302 #ifdef CONFIG_TR
1303         case I2O_LAN_TR:
1304                 dev = init_trdev(NULL, sizeof(struct i2o_lan_local));
1305                 if (dev==NULL)
1306                         return NULL;
1307                 type_trans = tr_type_trans;
1308                 unregister_dev = unregister_trdev;
1309                 break;
1310 #endif
1311
1312 #ifdef CONFIG_FDDI
1313         case I2O_LAN_FDDI:
1314         {
1315                 int size = sizeof(struct net_device) + sizeof(struct i2o_lan_local);
1316
1317                 dev = (struct net_device *) kmalloc(size, GFP_KERNEL);
1318                 if (dev == NULL)
1319                         return NULL;
1320                 memset((char *)dev, 0, size);
1321                 dev->priv = (void *)(dev + 1);
1322
1323                 if (dev_alloc_name(dev, "fddi%d") < 0) {
1324                         printk(KERN_WARNING "i2o_lan: Too many FDDI devices.\n");
1325                         kfree(dev);
1326                         return NULL;
1327                 }
1328                 type_trans = fddi_type_trans;
1329                 unregister_dev = (void *)unregister_netdevice;
1330
1331                 fddi_setup(dev);
1332                 register_netdev(dev);
1333         }
1334         break;
1335 #endif
1336
1337 #ifdef CONFIG_NET_FC
1338         case I2O_LAN_FIBRE_CHANNEL:
1339                 dev = init_fcdev(NULL, sizeof(struct i2o_lan_local));
1340                 if (dev == NULL)
1341                         return NULL;
1342                 type_trans = NULL;
1343 /* FIXME: Move fc_type_trans() from drivers/net/fc/iph5526.c to net/802/fc.c
1344  * and export it in include/linux/fcdevice.h
1345  *              type_trans = fc_type_trans;
1346  */
1347                 unregister_dev = (void *)unregister_fcdev;
1348                 break;
1349 #endif
1350
1351         case I2O_LAN_UNKNOWN:
1352         default:
1353                 printk(KERN_ERR "i2o_lan: LAN type 0x%04x not supported.\n",
1354                        i2o_dev->lct_data.sub_class);
1355                 return NULL;
1356         }
1357
1358         priv = (struct i2o_lan_local *)dev->priv;
1359         priv->i2o_dev = i2o_dev;
1360         priv->type_trans = type_trans;
1361         priv->sgl_max = (i2o_dev->controller->status_block->inbound_frame_size - 4) / 3;
1362         atomic_set(&priv->buckets_out, 0);
1363
1364         /* Set default values for user configurable parameters */
1365         /* Private values are changed via /proc file system */
1366
1367         priv->max_buckets_out = max_buckets_out;
1368         priv->bucket_thresh   = bucket_thresh;
1369         priv->rx_copybreak    = rx_copybreak;
1370         priv->tx_batch_mode   = tx_batch_mode & 0x03;
1371         priv->i2o_event_mask  = i2o_event_mask;
1372
1373         priv->tx_lock         = SPIN_LOCK_UNLOCKED;
1374         priv->fbl_lock        = SPIN_LOCK_UNLOCKED;
1375
1376         unit++;
1377         i2o_landevs[unit] = dev;
1378         priv->unit = unit;
1379
1380         if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1381                              0x0001, 0, &hw_addr, sizeof(hw_addr)) < 0) {
1382                 printk(KERN_ERR "%s: Unable to query hardware address.\n", dev->name);
1383                 unit--;
1384                 unregister_dev(dev);
1385                 kfree(dev);
1386                 return NULL;
1387         }
1388         dprintk(KERN_DEBUG "%s: hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1389                 dev->name, hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
1390                 hw_addr[4], hw_addr[5]);
1391
1392         dev->addr_len = 6;
1393         memcpy(dev->dev_addr, hw_addr, 6);
1394
1395         if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1396                              0x0007, 2, &tx_max_out, sizeof(tx_max_out)) < 0) {
1397                 printk(KERN_ERR "%s: Unable to query max TX queue.\n", dev->name);
1398                 unit--;
1399                 unregister_dev(dev);
1400                 kfree(dev);
1401                 return NULL;
1402         }
1403         dprintk(KERN_INFO "%s: Max TX Outstanding = %d.\n", dev->name, tx_max_out);
1404         priv->tx_max_out = tx_max_out;
1405         atomic_set(&priv->tx_out, 0);
1406         priv->tx_count = 0;
1407
1408         INIT_LIST_HEAD(&priv->i2o_batch_send_task.list);
1409         priv->i2o_batch_send_task.sync    = 0;
1410         priv->i2o_batch_send_task.routine = (void *)i2o_lan_batch_send;
1411         priv->i2o_batch_send_task.data    = (void *)dev;
1412
1413         dev->open               = i2o_lan_open;
1414         dev->stop               = i2o_lan_close;
1415         dev->get_stats          = i2o_lan_get_stats;
1416         dev->set_multicast_list = i2o_lan_set_multicast_list;
1417         dev->tx_timeout         = i2o_lan_tx_timeout;
1418         dev->watchdog_timeo     = I2O_LAN_TX_TIMEOUT;
1419
1420 #ifdef CONFIG_NET_FC
1421         if (i2o_dev->lct_data.sub_class == I2O_LAN_FIBRE_CHANNEL)
1422                 dev->hard_start_xmit = i2o_lan_sdu_send;
1423         else
1424 #endif
1425                 dev->hard_start_xmit = i2o_lan_packet_send;
1426
1427         if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET)
1428                 dev->change_mtu = i2o_lan_change_mtu;
1429
1430         return dev;
1431 }
1432
1433 #ifdef MODULE
1434 #define i2o_lan_init    init_module
1435 #endif
1436
1437 int __init i2o_lan_init(void)
1438 {
1439         struct net_device *dev;
1440         int i;
1441
1442         printk(KERN_INFO "I2O LAN OSM (C) 1999 University of Helsinki.\n");
1443
1444         /* Module params are used as global defaults for private values */
1445
1446         if (max_buckets_out > I2O_LAN_MAX_BUCKETS_OUT)
1447                 max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
1448         if (bucket_thresh > max_buckets_out)
1449                 bucket_thresh = max_buckets_out;
1450
1451         /* Install handlers for incoming replies */
1452
1453         if (i2o_install_handler(&i2o_lan_send_handler) < 0) {
1454                 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1455                 return -EINVAL;
1456         }
1457         lan_send_context = i2o_lan_send_handler.context;
1458
1459         if (i2o_install_handler(&i2o_lan_receive_handler) < 0) {
1460                 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1461                 return -EINVAL;
1462         }
1463         lan_receive_context = i2o_lan_receive_handler.context;
1464
1465         if (i2o_install_handler(&i2o_lan_handler) < 0) {
1466                 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1467                 return -EINVAL;
1468         }
1469         lan_context = i2o_lan_handler.context;
1470
1471         for(i=0; i <= MAX_LAN_CARDS; i++)
1472                 i2o_landevs[i] = NULL;
1473
1474         for (i=0; i < MAX_I2O_CONTROLLERS; i++) {
1475                 struct i2o_controller *iop = i2o_find_controller(i);
1476                 struct i2o_device *i2o_dev;
1477
1478                 if (iop==NULL)
1479                         continue;
1480
1481                 for (i2o_dev=iop->devices;i2o_dev != NULL;i2o_dev=i2o_dev->next) {
1482
1483                         if (i2o_dev->lct_data.class_id != I2O_CLASS_LAN)
1484                                 continue;
1485
1486                         /* Make sure device not already claimed by an ISM */
1487                         if (i2o_dev->lct_data.user_tid != 0xFFF)
1488                                 continue;
1489
1490                         if (unit == MAX_LAN_CARDS) {
1491                                 i2o_unlock_controller(iop);
1492                                 printk(KERN_WARNING "i2o_lan: Too many I2O LAN devices.\n");
1493                                 return -EINVAL;
1494                         }
1495
1496                         dev = i2o_lan_register_device(i2o_dev);
1497                         if (dev == NULL) {
1498                                 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN device 0x%04x.\n",
1499                                        i2o_dev->lct_data.sub_class);
1500                                 continue;
1501                         }
1502
1503                         printk(KERN_INFO "%s: I2O LAN device registered, "
1504                                 "subclass = 0x%04x, unit = %d, tid = %d.\n",
1505                                 dev->name, i2o_dev->lct_data.sub_class,
1506                                 ((struct i2o_lan_local *)dev->priv)->unit,
1507                                 i2o_dev->lct_data.tid);
1508                 }
1509
1510                 i2o_unlock_controller(iop);
1511         }
1512
1513         dprintk(KERN_INFO "%d I2O LAN devices found and registered.\n", unit+1);
1514
1515         return 0;
1516 }
1517
1518 #ifdef MODULE
1519
1520 void cleanup_module(void)
1521 {
1522         int i;
1523
1524         for (i = 0; i <= unit; i++) {
1525                 struct net_device *dev = i2o_landevs[i];
1526                 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1527                 struct i2o_device *i2o_dev = priv->i2o_dev;
1528
1529                 switch (i2o_dev->lct_data.sub_class) {
1530                 case I2O_LAN_ETHERNET:
1531                         unregister_netdev(dev);
1532                         break;
1533 #ifdef CONFIG_FDDI
1534                 case I2O_LAN_FDDI:
1535                         unregister_netdevice(dev);
1536                         break;
1537 #endif
1538 #ifdef CONFIG_TR
1539                 case I2O_LAN_TR:
1540                         unregister_trdev(dev);
1541                         break;
1542 #endif
1543 #ifdef CONFIG_NET_FC
1544                 case I2O_LAN_FIBRE_CHANNEL:
1545                         unregister_fcdev(dev);
1546                         break;
1547 #endif
1548                 default:
1549                         printk(KERN_WARNING "%s: Spurious I2O LAN subclass 0x%08x.\n",
1550                                dev->name, i2o_dev->lct_data.sub_class);
1551                 }
1552
1553                 dprintk(KERN_INFO "%s: I2O LAN device unregistered.\n",
1554                         dev->name);
1555                 kfree(dev);
1556         }
1557
1558         i2o_remove_handler(&i2o_lan_handler);
1559         i2o_remove_handler(&i2o_lan_send_handler);
1560         i2o_remove_handler(&i2o_lan_receive_handler);
1561 }
1562
1563 EXPORT_NO_SYMBOLS;
1564
1565 MODULE_AUTHOR("University of Helsinki, Department of Computer Science");
1566 MODULE_DESCRIPTION("I2O Lan OSM");
1567
1568 MODULE_PARM(max_buckets_out, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
1569 MODULE_PARM_DESC(max_buckets_out, "Total number of buckets to post (1-)");
1570 MODULE_PARM(bucket_thresh, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
1571 MODULE_PARM_DESC(bucket_thresh, "Bucket post threshold (1-)");
1572 MODULE_PARM(rx_copybreak, "1-" "i");
1573 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy only small frames (1-)");
1574 MODULE_PARM(tx_batch_mode, "0-2" "i");
1575 MODULE_PARM_DESC(tx_batch_mode, "0=Send immediatelly, 1=Send in batches, 2=Switch automatically");
1576
1577 #endif