qlge: Fix receive path to drop error frames
[linux-omap-dss2:linux.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378                         status =
379                             ql_wait_reg_rdy(qdev,
380                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381                         if (status)
382                                 goto exit;
383                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
385                                    type);       /* type */
386                         ql_write32(qdev, MAC_ADDR_DATA, lower);
387                         status =
388                             ql_wait_reg_rdy(qdev,
389                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390                         if (status)
391                                 goto exit;
392                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
394                                    type);       /* type */
395                         ql_write32(qdev, MAC_ADDR_DATA, upper);
396                         status =
397                             ql_wait_reg_rdy(qdev,
398                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399                         if (status)
400                                 goto exit;
401                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
402                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
403                                    type);       /* type */
404                         /* This field should also include the queue id
405                            and possibly the function id.  Right now we hardcode
406                            the route field to NIC core.
407                          */
408                         cam_output = (CAM_OUT_ROUTE_NIC |
409                                       (qdev->
410                                        func << CAM_OUT_FUNC_SHIFT) |
411                                         (0 << CAM_OUT_CQ_ID_SHIFT));
412                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413                                 cam_output |= CAM_OUT_RV;
414                         /* route to NIC core */
415                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416                         break;
417                 }
418         case MAC_ADDR_TYPE_VLAN:
419                 {
420                         u32 enable_bit = *((u32 *) &addr[0]);
421                         /* For VLAN, the addr actually holds a bit that
422                          * either enables or disables the vlan id we are
423                          * addressing. It's either MAC_ADDR_E on or off.
424                          * That's bit-27 we're talking about.
425                          */
426                         status =
427                             ql_wait_reg_rdy(qdev,
428                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429                         if (status)
430                                 goto exit;
431                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
433                                    type |       /* type */
434                                    enable_bit); /* enable/disable */
435                         break;
436                 }
437         case MAC_ADDR_TYPE_MULTI_FLTR:
438         default:
439                 netif_crit(qdev, ifup, qdev->ndev,
440                            "Address type %d not yet supported.\n", type);
441                 status = -EPERM;
442         }
443 exit:
444         return status;
445 }
446
447 /* Set or clear MAC address in hardware. We sometimes
448  * have to clear it to prevent wrong frame routing
449  * especially in a bonding environment.
450  */
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453         int status;
454         char zero_mac_addr[ETH_ALEN];
455         char *addr;
456
457         if (set) {
458                 addr = &qdev->current_mac_addr[0];
459                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460                              "Set Mac addr %pM\n", addr);
461         } else {
462                 memset(zero_mac_addr, 0, ETH_ALEN);
463                 addr = &zero_mac_addr[0];
464                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465                              "Clearing MAC address\n");
466         }
467         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468         if (status)
469                 return status;
470         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473         if (status)
474                 netif_err(qdev, ifup, qdev->ndev,
475                           "Failed to init mac address.\n");
476         return status;
477 }
478
479 void ql_link_on(struct ql_adapter *qdev)
480 {
481         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482         netif_carrier_on(qdev->ndev);
483         ql_set_mac_addr(qdev, 1);
484 }
485
486 void ql_link_off(struct ql_adapter *qdev)
487 {
488         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489         netif_carrier_off(qdev->ndev);
490         ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498         int status = 0;
499
500         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501         if (status)
502                 goto exit;
503
504         ql_write32(qdev, RT_IDX,
505                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507         if (status)
508                 goto exit;
509         *value = ql_read32(qdev, RT_DATA);
510 exit:
511         return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520                               int enable)
521 {
522         int status = -EINVAL; /* Return error if no mask match. */
523         u32 value = 0;
524
525         switch (mask) {
526         case RT_IDX_CAM_HIT:
527                 {
528                         value = RT_IDX_DST_CAM_Q |      /* dest */
529                             RT_IDX_TYPE_NICQ |  /* type */
530                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531                         break;
532                 }
533         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
534                 {
535                         value = RT_IDX_DST_DFLT_Q |     /* dest */
536                             RT_IDX_TYPE_NICQ |  /* type */
537                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538                         break;
539                 }
540         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
541                 {
542                         value = RT_IDX_DST_DFLT_Q |     /* dest */
543                             RT_IDX_TYPE_NICQ |  /* type */
544                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545                         break;
546                 }
547         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548                 {
549                         value = RT_IDX_DST_DFLT_Q | /* dest */
550                                 RT_IDX_TYPE_NICQ | /* type */
551                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
552                                 RT_IDX_IDX_SHIFT); /* index */
553                         break;
554                 }
555         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556                 {
557                         value = RT_IDX_DST_DFLT_Q | /* dest */
558                                 RT_IDX_TYPE_NICQ | /* type */
559                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560                                 RT_IDX_IDX_SHIFT); /* index */
561                         break;
562                 }
563         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
564                 {
565                         value = RT_IDX_DST_DFLT_Q |     /* dest */
566                             RT_IDX_TYPE_NICQ |  /* type */
567                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568                         break;
569                 }
570         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
571                 {
572                         value = RT_IDX_DST_DFLT_Q |     /* dest */
573                             RT_IDX_TYPE_NICQ |  /* type */
574                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575                         break;
576                 }
577         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
578                 {
579                         value = RT_IDX_DST_DFLT_Q |     /* dest */
580                             RT_IDX_TYPE_NICQ |  /* type */
581                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582                         break;
583                 }
584         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
585                 {
586                         value = RT_IDX_DST_RSS |        /* dest */
587                             RT_IDX_TYPE_NICQ |  /* type */
588                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589                         break;
590                 }
591         case 0:         /* Clear the E-bit on an entry. */
592                 {
593                         value = RT_IDX_DST_DFLT_Q |     /* dest */
594                             RT_IDX_TYPE_NICQ |  /* type */
595                             (index << RT_IDX_IDX_SHIFT);/* index */
596                         break;
597                 }
598         default:
599                 netif_err(qdev, ifup, qdev->ndev,
600                           "Mask type %d not yet supported.\n", mask);
601                 status = -EPERM;
602                 goto exit;
603         }
604
605         if (value) {
606                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607                 if (status)
608                         goto exit;
609                 value |= (enable ? RT_IDX_E : 0);
610                 ql_write32(qdev, RT_IDX, value);
611                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612         }
613 exit:
614         return status;
615 }
616
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628  * Otherwise, we may have multiple outstanding workers and don't want to
629  * enable until the last one finishes. In this case, the irq_cnt gets
630  * incremented every time we queue a worker and decremented every time
631  * a worker finishes.  Once it hits zero we enable the interrupt.
632  */
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635         u32 var = 0;
636         unsigned long hw_flags = 0;
637         struct intr_context *ctx = qdev->intr_context + intr;
638
639         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640                 /* Always enable if we're MSIX multi interrupts and
641                  * it's not the default (zeroeth) interrupt.
642                  */
643                 ql_write32(qdev, INTR_EN,
644                            ctx->intr_en_mask);
645                 var = ql_read32(qdev, STS);
646                 return var;
647         }
648
649         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650         if (atomic_dec_and_test(&ctx->irq_cnt)) {
651                 ql_write32(qdev, INTR_EN,
652                            ctx->intr_en_mask);
653                 var = ql_read32(qdev, STS);
654         }
655         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656         return var;
657 }
658
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661         u32 var = 0;
662         struct intr_context *ctx;
663
664         /* HW disables for us if we're MSIX multi interrupts and
665          * it's not the default (zeroeth) interrupt.
666          */
667         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668                 return 0;
669
670         ctx = qdev->intr_context + intr;
671         spin_lock(&qdev->hw_lock);
672         if (!atomic_read(&ctx->irq_cnt)) {
673                 ql_write32(qdev, INTR_EN,
674                 ctx->intr_dis_mask);
675                 var = ql_read32(qdev, STS);
676         }
677         atomic_inc(&ctx->irq_cnt);
678         spin_unlock(&qdev->hw_lock);
679         return var;
680 }
681
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684         int i;
685         for (i = 0; i < qdev->intr_count; i++) {
686                 /* The enable call does a atomic_dec_and_test
687                  * and enables only if the result is zero.
688                  * So we precharge it here.
689                  */
690                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691                         i == 0))
692                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693                 ql_enable_completion_interrupt(qdev, i);
694         }
695
696 }
697
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700         int status, i;
701         u16 csum = 0;
702         __le16 *flash = (__le16 *)&qdev->flash;
703
704         status = strncmp((char *)&qdev->flash, str, 4);
705         if (status) {
706                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707                 return  status;
708         }
709
710         for (i = 0; i < size; i++)
711                 csum += le16_to_cpu(*flash++);
712
713         if (csum)
714                 netif_err(qdev, ifup, qdev->ndev,
715                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716
717         return csum;
718 }
719
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722         int status = 0;
723         /* wait for reg to come ready */
724         status = ql_wait_reg_rdy(qdev,
725                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726         if (status)
727                 goto exit;
728         /* set up for reg read */
729         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730         /* wait for reg to come ready */
731         status = ql_wait_reg_rdy(qdev,
732                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733         if (status)
734                 goto exit;
735          /* This data is stored on flash as an array of
736          * __le32.  Since ql_read32() returns cpu endian
737          * we need to swap it back.
738          */
739         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741         return status;
742 }
743
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746         u32 i, size;
747         int status;
748         __le32 *p = (__le32 *)&qdev->flash;
749         u32 offset;
750         u8 mac_addr[6];
751
752         /* Get flash offset for function and adjust
753          * for dword access.
754          */
755         if (!qdev->port)
756                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757         else
758                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761                 return -ETIMEDOUT;
762
763         size = sizeof(struct flash_params_8000) / sizeof(u32);
764         for (i = 0; i < size; i++, p++) {
765                 status = ql_read_flash_word(qdev, i+offset, p);
766                 if (status) {
767                         netif_err(qdev, ifup, qdev->ndev,
768                                   "Error reading flash.\n");
769                         goto exit;
770                 }
771         }
772
773         status = ql_validate_flash(qdev,
774                         sizeof(struct flash_params_8000) / sizeof(u16),
775                         "8000");
776         if (status) {
777                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778                 status = -EINVAL;
779                 goto exit;
780         }
781
782         /* Extract either manufacturer or BOFM modified
783          * MAC address.
784          */
785         if (qdev->flash.flash_params_8000.data_type1 == 2)
786                 memcpy(mac_addr,
787                         qdev->flash.flash_params_8000.mac_addr1,
788                         qdev->ndev->addr_len);
789         else
790                 memcpy(mac_addr,
791                         qdev->flash.flash_params_8000.mac_addr,
792                         qdev->ndev->addr_len);
793
794         if (!is_valid_ether_addr(mac_addr)) {
795                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796                 status = -EINVAL;
797                 goto exit;
798         }
799
800         memcpy(qdev->ndev->dev_addr,
801                 mac_addr,
802                 qdev->ndev->addr_len);
803
804 exit:
805         ql_sem_unlock(qdev, SEM_FLASH_MASK);
806         return status;
807 }
808
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811         int i;
812         int status;
813         __le32 *p = (__le32 *)&qdev->flash;
814         u32 offset = 0;
815         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816
817         /* Second function's parameters follow the first
818          * function's.
819          */
820         if (qdev->port)
821                 offset = size;
822
823         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824                 return -ETIMEDOUT;
825
826         for (i = 0; i < size; i++, p++) {
827                 status = ql_read_flash_word(qdev, i+offset, p);
828                 if (status) {
829                         netif_err(qdev, ifup, qdev->ndev,
830                                   "Error reading flash.\n");
831                         goto exit;
832                 }
833
834         }
835
836         status = ql_validate_flash(qdev,
837                         sizeof(struct flash_params_8012) / sizeof(u16),
838                         "8012");
839         if (status) {
840                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841                 status = -EINVAL;
842                 goto exit;
843         }
844
845         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846                 status = -EINVAL;
847                 goto exit;
848         }
849
850         memcpy(qdev->ndev->dev_addr,
851                 qdev->flash.flash_params_8012.mac_addr,
852                 qdev->ndev->addr_len);
853
854 exit:
855         ql_sem_unlock(qdev, SEM_FLASH_MASK);
856         return status;
857 }
858
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860  * register pair.  Each read/write requires us to wait for the ready
861  * bit before reading/writing the data.
862  */
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865         int status;
866         /* wait for reg to come ready */
867         status = ql_wait_reg_rdy(qdev,
868                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869         if (status)
870                 return status;
871         /* write the data to the data reg */
872         ql_write32(qdev, XGMAC_DATA, data);
873         /* trigger the write */
874         ql_write32(qdev, XGMAC_ADDR, reg);
875         return status;
876 }
877
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879  * register pair.  Each read/write requires us to wait for the ready
880  * bit before reading/writing the data.
881  */
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884         int status = 0;
885         /* wait for reg to come ready */
886         status = ql_wait_reg_rdy(qdev,
887                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888         if (status)
889                 goto exit;
890         /* set up for reg read */
891         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892         /* wait for reg to come ready */
893         status = ql_wait_reg_rdy(qdev,
894                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895         if (status)
896                 goto exit;
897         /* get the data */
898         *data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900         return status;
901 }
902
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906         int status = 0;
907         u32 hi = 0;
908         u32 lo = 0;
909
910         status = ql_read_xgmac_reg(qdev, reg, &lo);
911         if (status)
912                 goto exit;
913
914         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915         if (status)
916                 goto exit;
917
918         *data = (u64) lo | ((u64) hi << 32);
919
920 exit:
921         return status;
922 }
923
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926         int status;
927         /*
928          * Get MPI firmware version for driver banner
929          * and ethool info.
930          */
931         status = ql_mb_about_fw(qdev);
932         if (status)
933                 goto exit;
934         status = ql_mb_get_fw_state(qdev);
935         if (status)
936                 goto exit;
937         /* Wake up a worker to get/set the TX/RX frame sizes. */
938         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940         return status;
941 }
942
943 /* Take the MAC Core out of reset.
944  * Enable statistics counting.
945  * Take the transmitter/receiver out of reset.
946  * This functionality may be done in the MPI firmware at a
947  * later date.
948  */
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951         int status = 0;
952         u32 data;
953
954         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955                 /* Another function has the semaphore, so
956                  * wait for the port init bit to come ready.
957                  */
958                 netif_info(qdev, link, qdev->ndev,
959                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961                 if (status) {
962                         netif_crit(qdev, link, qdev->ndev,
963                                    "Port initialize timed out.\n");
964                 }
965                 return status;
966         }
967
968         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969         /* Set the core reset. */
970         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971         if (status)
972                 goto end;
973         data |= GLOBAL_CFG_RESET;
974         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975         if (status)
976                 goto end;
977
978         /* Clear the core reset and turn on jumbo for receiver. */
979         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
980         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
981         data |= GLOBAL_CFG_TX_STAT_EN;
982         data |= GLOBAL_CFG_RX_STAT_EN;
983         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984         if (status)
985                 goto end;
986
987         /* Enable transmitter, and clear it's reset. */
988         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989         if (status)
990                 goto end;
991         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
992         data |= TX_CFG_EN;      /* Enable the transmitter. */
993         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994         if (status)
995                 goto end;
996
997         /* Enable receiver and clear it's reset. */
998         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999         if (status)
1000                 goto end;
1001         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1002         data |= RX_CFG_EN;      /* Enable the receiver. */
1003         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004         if (status)
1005                 goto end;
1006
1007         /* Turn on jumbo. */
1008         status =
1009             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010         if (status)
1011                 goto end;
1012         status =
1013             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014         if (status)
1015                 goto end;
1016
1017         /* Signal to the world that the port is enabled.        */
1018         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021         return status;
1022 }
1023
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026         return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033         rx_ring->lbq_curr_idx++;
1034         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035                 rx_ring->lbq_curr_idx = 0;
1036         rx_ring->lbq_free_cnt++;
1037         return lbq_desc;
1038 }
1039
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041                 struct rx_ring *rx_ring)
1042 {
1043         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045         pci_dma_sync_single_for_cpu(qdev->pdev,
1046                                         dma_unmap_addr(lbq_desc, mapaddr),
1047                                     rx_ring->lbq_buf_size,
1048                                         PCI_DMA_FROMDEVICE);
1049
1050         /* If it's the last chunk of our master page then
1051          * we unmap it.
1052          */
1053         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054                                         == ql_lbq_block_size(qdev))
1055                 pci_unmap_page(qdev->pdev,
1056                                 lbq_desc->p.pg_chunk.map,
1057                                 ql_lbq_block_size(qdev),
1058                                 PCI_DMA_FROMDEVICE);
1059         return lbq_desc;
1060 }
1061
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066         rx_ring->sbq_curr_idx++;
1067         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068                 rx_ring->sbq_curr_idx = 0;
1069         rx_ring->sbq_free_cnt++;
1070         return sbq_desc;
1071 }
1072
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076         rx_ring->cnsmr_idx++;
1077         rx_ring->curr_entry++;
1078         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079                 rx_ring->cnsmr_idx = 0;
1080                 rx_ring->curr_entry = rx_ring->cq_base;
1081         }
1082 }
1083
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090                                                 struct bq_desc *lbq_desc)
1091 {
1092         if (!rx_ring->pg_chunk.page) {
1093                 u64 map;
1094                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095                                                 GFP_ATOMIC,
1096                                                 qdev->lbq_buf_order);
1097                 if (unlikely(!rx_ring->pg_chunk.page)) {
1098                         netif_err(qdev, drv, qdev->ndev,
1099                                   "page allocation failed.\n");
1100                         return -ENOMEM;
1101                 }
1102                 rx_ring->pg_chunk.offset = 0;
1103                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104                                         0, ql_lbq_block_size(qdev),
1105                                         PCI_DMA_FROMDEVICE);
1106                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107                         __free_pages(rx_ring->pg_chunk.page,
1108                                         qdev->lbq_buf_order);
1109                         netif_err(qdev, drv, qdev->ndev,
1110                                   "PCI mapping failed.\n");
1111                         return -ENOMEM;
1112                 }
1113                 rx_ring->pg_chunk.map = map;
1114                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115         }
1116
1117         /* Copy the current master pg_chunk info
1118          * to the current descriptor.
1119          */
1120         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122         /* Adjust the master page chunk for next
1123          * buffer get.
1124          */
1125         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127                 rx_ring->pg_chunk.page = NULL;
1128                 lbq_desc->p.pg_chunk.last_flag = 1;
1129         } else {
1130                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131                 get_page(rx_ring->pg_chunk.page);
1132                 lbq_desc->p.pg_chunk.last_flag = 0;
1133         }
1134         return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139         u32 clean_idx = rx_ring->lbq_clean_idx;
1140         u32 start_idx = clean_idx;
1141         struct bq_desc *lbq_desc;
1142         u64 map;
1143         int i;
1144
1145         while (rx_ring->lbq_free_cnt > 32) {
1146                 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148                                      "lbq: try cleaning clean_idx = %d.\n",
1149                                      clean_idx);
1150                         lbq_desc = &rx_ring->lbq[clean_idx];
1151                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152                                 rx_ring->lbq_clean_idx = clean_idx;
1153                                 netif_err(qdev, ifup, qdev->ndev,
1154                                                 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155                                                 i, clean_idx);
1156                                 return;
1157                         }
1158
1159                         map = lbq_desc->p.pg_chunk.map +
1160                                 lbq_desc->p.pg_chunk.offset;
1161                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162                         dma_unmap_len_set(lbq_desc, maplen,
1163                                         rx_ring->lbq_buf_size);
1164                                 *lbq_desc->addr = cpu_to_le64(map);
1165
1166                         pci_dma_sync_single_for_device(qdev->pdev, map,
1167                                                 rx_ring->lbq_buf_size,
1168                                                 PCI_DMA_FROMDEVICE);
1169                         clean_idx++;
1170                         if (clean_idx == rx_ring->lbq_len)
1171                                 clean_idx = 0;
1172                 }
1173
1174                 rx_ring->lbq_clean_idx = clean_idx;
1175                 rx_ring->lbq_prod_idx += 16;
1176                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177                         rx_ring->lbq_prod_idx = 0;
1178                 rx_ring->lbq_free_cnt -= 16;
1179         }
1180
1181         if (start_idx != clean_idx) {
1182                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                              "lbq: updating prod idx = %d.\n",
1184                              rx_ring->lbq_prod_idx);
1185                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186                                 rx_ring->lbq_prod_idx_db_reg);
1187         }
1188 }
1189
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193         u32 clean_idx = rx_ring->sbq_clean_idx;
1194         u32 start_idx = clean_idx;
1195         struct bq_desc *sbq_desc;
1196         u64 map;
1197         int i;
1198
1199         while (rx_ring->sbq_free_cnt > 16) {
1200                 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201                         sbq_desc = &rx_ring->sbq[clean_idx];
1202                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203                                      "sbq: try cleaning clean_idx = %d.\n",
1204                                      clean_idx);
1205                         if (sbq_desc->p.skb == NULL) {
1206                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1207                                              qdev->ndev,
1208                                              "sbq: getting new skb for index %d.\n",
1209                                              sbq_desc->index);
1210                                 sbq_desc->p.skb =
1211                                     netdev_alloc_skb(qdev->ndev,
1212                                                      SMALL_BUFFER_SIZE);
1213                                 if (sbq_desc->p.skb == NULL) {
1214                                         netif_err(qdev, probe, qdev->ndev,
1215                                                   "Couldn't get an skb.\n");
1216                                         rx_ring->sbq_clean_idx = clean_idx;
1217                                         return;
1218                                 }
1219                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220                                 map = pci_map_single(qdev->pdev,
1221                                                      sbq_desc->p.skb->data,
1222                                                      rx_ring->sbq_buf_size,
1223                                                      PCI_DMA_FROMDEVICE);
1224                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225                                         netif_err(qdev, ifup, qdev->ndev,
1226                                                   "PCI mapping failed.\n");
1227                                         rx_ring->sbq_clean_idx = clean_idx;
1228                                         dev_kfree_skb_any(sbq_desc->p.skb);
1229                                         sbq_desc->p.skb = NULL;
1230                                         return;
1231                                 }
1232                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233                                 dma_unmap_len_set(sbq_desc, maplen,
1234                                                   rx_ring->sbq_buf_size);
1235                                 *sbq_desc->addr = cpu_to_le64(map);
1236                         }
1237
1238                         clean_idx++;
1239                         if (clean_idx == rx_ring->sbq_len)
1240                                 clean_idx = 0;
1241                 }
1242                 rx_ring->sbq_clean_idx = clean_idx;
1243                 rx_ring->sbq_prod_idx += 16;
1244                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245                         rx_ring->sbq_prod_idx = 0;
1246                 rx_ring->sbq_free_cnt -= 16;
1247         }
1248
1249         if (start_idx != clean_idx) {
1250                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251                              "sbq: updating prod idx = %d.\n",
1252                              rx_ring->sbq_prod_idx);
1253                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254                                 rx_ring->sbq_prod_idx_db_reg);
1255         }
1256 }
1257
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259                                     struct rx_ring *rx_ring)
1260 {
1261         ql_update_sbq(qdev, rx_ring);
1262         ql_update_lbq(qdev, rx_ring);
1263 }
1264
1265 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266  * fails at some stage, or from the interrupt when a tx completes.
1267  */
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269                           struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271         int i;
1272         for (i = 0; i < mapped; i++) {
1273                 if (i == 0 || (i == 7 && mapped > 7)) {
1274                         /*
1275                          * Unmap the skb->data area, or the
1276                          * external sglist (AKA the Outbound
1277                          * Address List (OAL)).
1278                          * If its the zeroeth element, then it's
1279                          * the skb->data area.  If it's the 7th
1280                          * element and there is more than 6 frags,
1281                          * then its an OAL.
1282                          */
1283                         if (i == 7) {
1284                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1285                                              qdev->ndev,
1286                                              "unmapping OAL area.\n");
1287                         }
1288                         pci_unmap_single(qdev->pdev,
1289                                          dma_unmap_addr(&tx_ring_desc->map[i],
1290                                                         mapaddr),
1291                                          dma_unmap_len(&tx_ring_desc->map[i],
1292                                                        maplen),
1293                                          PCI_DMA_TODEVICE);
1294                 } else {
1295                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296                                      "unmapping frag %d.\n", i);
1297                         pci_unmap_page(qdev->pdev,
1298                                        dma_unmap_addr(&tx_ring_desc->map[i],
1299                                                       mapaddr),
1300                                        dma_unmap_len(&tx_ring_desc->map[i],
1301                                                      maplen), PCI_DMA_TODEVICE);
1302                 }
1303         }
1304
1305 }
1306
1307 /* Map the buffers for this transmit.  This will return
1308  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309  */
1310 static int ql_map_send(struct ql_adapter *qdev,
1311                        struct ob_mac_iocb_req *mac_iocb_ptr,
1312                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314         int len = skb_headlen(skb);
1315         dma_addr_t map;
1316         int frag_idx, err, map_idx = 0;
1317         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318         int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320         if (frag_cnt) {
1321                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322                              "frag_cnt = %d.\n", frag_cnt);
1323         }
1324         /*
1325          * Map the skb buffer first.
1326          */
1327         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329         err = pci_dma_mapping_error(qdev->pdev, map);
1330         if (err) {
1331                 netif_err(qdev, tx_queued, qdev->ndev,
1332                           "PCI mapping failed with error: %d\n", err);
1333
1334                 return NETDEV_TX_BUSY;
1335         }
1336
1337         tbd->len = cpu_to_le32(len);
1338         tbd->addr = cpu_to_le64(map);
1339         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341         map_idx++;
1342
1343         /*
1344          * This loop fills the remainder of the 8 address descriptors
1345          * in the IOCB.  If there are more than 7 fragments, then the
1346          * eighth address desc will point to an external list (OAL).
1347          * When this happens, the remainder of the frags will be stored
1348          * in this list.
1349          */
1350         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352                 tbd++;
1353                 if (frag_idx == 6 && frag_cnt > 7) {
1354                         /* Let's tack on an sglist.
1355                          * Our control block will now
1356                          * look like this:
1357                          * iocb->seg[0] = skb->data
1358                          * iocb->seg[1] = frag[0]
1359                          * iocb->seg[2] = frag[1]
1360                          * iocb->seg[3] = frag[2]
1361                          * iocb->seg[4] = frag[3]
1362                          * iocb->seg[5] = frag[4]
1363                          * iocb->seg[6] = frag[5]
1364                          * iocb->seg[7] = ptr to OAL (external sglist)
1365                          * oal->seg[0] = frag[6]
1366                          * oal->seg[1] = frag[7]
1367                          * oal->seg[2] = frag[8]
1368                          * oal->seg[3] = frag[9]
1369                          * oal->seg[4] = frag[10]
1370                          *      etc...
1371                          */
1372                         /* Tack on the OAL in the eighth segment of IOCB. */
1373                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374                                              sizeof(struct oal),
1375                                              PCI_DMA_TODEVICE);
1376                         err = pci_dma_mapping_error(qdev->pdev, map);
1377                         if (err) {
1378                                 netif_err(qdev, tx_queued, qdev->ndev,
1379                                           "PCI mapping outbound address list with error: %d\n",
1380                                           err);
1381                                 goto map_error;
1382                         }
1383
1384                         tbd->addr = cpu_to_le64(map);
1385                         /*
1386                          * The length is the number of fragments
1387                          * that remain to be mapped times the length
1388                          * of our sglist (OAL).
1389                          */
1390                         tbd->len =
1391                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1392                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1393                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394                                            map);
1395                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396                                           sizeof(struct oal));
1397                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398                         map_idx++;
1399                 }
1400
1401                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402                                        DMA_TO_DEVICE);
1403
1404                 err = dma_mapping_error(&qdev->pdev->dev, map);
1405                 if (err) {
1406                         netif_err(qdev, tx_queued, qdev->ndev,
1407                                   "PCI mapping frags failed with error: %d.\n",
1408                                   err);
1409                         goto map_error;
1410                 }
1411
1412                 tbd->addr = cpu_to_le64(map);
1413                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416                                   skb_frag_size(frag));
1417
1418         }
1419         /* Save the number of segments we've mapped. */
1420         tx_ring_desc->map_cnt = map_idx;
1421         /* Terminate the last segment. */
1422         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423         return NETDEV_TX_OK;
1424
1425 map_error:
1426         /*
1427          * If the first frag mapping failed, then i will be zero.
1428          * This causes the unmap of the skb->data area.  Otherwise
1429          * we pass in the number of frags that mapped successfully
1430          * so they can be umapped.
1431          */
1432         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433         return NETDEV_TX_BUSY;
1434 }
1435
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438                                  struct rx_ring *rx_ring)
1439 {
1440         struct nic_stats *stats = &qdev->nic_stats;
1441
1442         stats->rx_err_count++;
1443         rx_ring->rx_errors++;
1444
1445         switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446         case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447                 stats->rx_code_err++;
1448                 break;
1449         case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450                 stats->rx_oversize_err++;
1451                 break;
1452         case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453                 stats->rx_undersize_err++;
1454                 break;
1455         case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456                 stats->rx_preamble_err++;
1457                 break;
1458         case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459                 stats->rx_frame_len_err++;
1460                 break;
1461         case IB_MAC_IOCB_RSP_ERR_CRC:
1462                 stats->rx_crc_err++;
1463         default:
1464                 break;
1465         }
1466 }
1467
1468 /* Process an inbound completion from an rx ring. */
1469 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1470                                         struct rx_ring *rx_ring,
1471                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1472                                         u32 length,
1473                                         u16 vlan_id)
1474 {
1475         struct sk_buff *skb;
1476         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1477         struct napi_struct *napi = &rx_ring->napi;
1478
1479         /* Frame error, so drop the packet. */
1480         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1481                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1482                 put_page(lbq_desc->p.pg_chunk.page);
1483                 return;
1484         }
1485         napi->dev = qdev->ndev;
1486
1487         skb = napi_get_frags(napi);
1488         if (!skb) {
1489                 netif_err(qdev, drv, qdev->ndev,
1490                           "Couldn't get an skb, exiting.\n");
1491                 rx_ring->rx_dropped++;
1492                 put_page(lbq_desc->p.pg_chunk.page);
1493                 return;
1494         }
1495         prefetch(lbq_desc->p.pg_chunk.va);
1496         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1497                              lbq_desc->p.pg_chunk.page,
1498                              lbq_desc->p.pg_chunk.offset,
1499                              length);
1500
1501         skb->len += length;
1502         skb->data_len += length;
1503         skb->truesize += length;
1504         skb_shinfo(skb)->nr_frags++;
1505
1506         rx_ring->rx_packets++;
1507         rx_ring->rx_bytes += length;
1508         skb->ip_summed = CHECKSUM_UNNECESSARY;
1509         skb_record_rx_queue(skb, rx_ring->cq_id);
1510         if (vlan_id != 0xffff)
1511                 __vlan_hwaccel_put_tag(skb, vlan_id);
1512         napi_gro_frags(napi);
1513 }
1514
1515 /* Process an inbound completion from an rx ring. */
1516 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517                                         struct rx_ring *rx_ring,
1518                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1519                                         u32 length,
1520                                         u16 vlan_id)
1521 {
1522         struct net_device *ndev = qdev->ndev;
1523         struct sk_buff *skb = NULL;
1524         void *addr;
1525         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526         struct napi_struct *napi = &rx_ring->napi;
1527
1528         skb = netdev_alloc_skb(ndev, length);
1529         if (!skb) {
1530                 netif_err(qdev, drv, qdev->ndev,
1531                           "Couldn't get an skb, need to unwind!.\n");
1532                 rx_ring->rx_dropped++;
1533                 put_page(lbq_desc->p.pg_chunk.page);
1534                 return;
1535         }
1536
1537         addr = lbq_desc->p.pg_chunk.va;
1538         prefetch(addr);
1539
1540         /* Frame error, so drop the packet. */
1541         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1542                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1543                 goto err_out;
1544         }
1545
1546         /* The max framesize filter on this chip is set higher than
1547          * MTU since FCoE uses 2k frames.
1548          */
1549         if (skb->len > ndev->mtu + ETH_HLEN) {
1550                 netif_err(qdev, drv, qdev->ndev,
1551                           "Segment too small, dropping.\n");
1552                 rx_ring->rx_dropped++;
1553                 goto err_out;
1554         }
1555         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1556         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1557                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1558                      length);
1559         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1560                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1561                                 length-ETH_HLEN);
1562         skb->len += length-ETH_HLEN;
1563         skb->data_len += length-ETH_HLEN;
1564         skb->truesize += length-ETH_HLEN;
1565
1566         rx_ring->rx_packets++;
1567         rx_ring->rx_bytes += skb->len;
1568         skb->protocol = eth_type_trans(skb, ndev);
1569         skb_checksum_none_assert(skb);
1570
1571         if ((ndev->features & NETIF_F_RXCSUM) &&
1572                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1573                 /* TCP frame. */
1574                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1575                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1576                                      "TCP checksum done!\n");
1577                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1578                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1579                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1580                         /* Unfragmented ipv4 UDP frame. */
1581                         struct iphdr *iph =
1582                                 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1583                         if (!(iph->frag_off &
1584                                 htons(IP_MF|IP_OFFSET))) {
1585                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1587                                              qdev->ndev,
1588                                              "UDP checksum done!\n");
1589                         }
1590                 }
1591         }
1592
1593         skb_record_rx_queue(skb, rx_ring->cq_id);
1594         if (vlan_id != 0xffff)
1595                 __vlan_hwaccel_put_tag(skb, vlan_id);
1596         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1597                 napi_gro_receive(napi, skb);
1598         else
1599                 netif_receive_skb(skb);
1600         return;
1601 err_out:
1602         dev_kfree_skb_any(skb);
1603         put_page(lbq_desc->p.pg_chunk.page);
1604 }
1605
1606 /* Process an inbound completion from an rx ring. */
1607 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1608                                         struct rx_ring *rx_ring,
1609                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1610                                         u32 length,
1611                                         u16 vlan_id)
1612 {
1613         struct net_device *ndev = qdev->ndev;
1614         struct sk_buff *skb = NULL;
1615         struct sk_buff *new_skb = NULL;
1616         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1617
1618         skb = sbq_desc->p.skb;
1619         /* Allocate new_skb and copy */
1620         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1621         if (new_skb == NULL) {
1622                 netif_err(qdev, probe, qdev->ndev,
1623                           "No skb available, drop the packet.\n");
1624                 rx_ring->rx_dropped++;
1625                 return;
1626         }
1627         skb_reserve(new_skb, NET_IP_ALIGN);
1628         memcpy(skb_put(new_skb, length), skb->data, length);
1629         skb = new_skb;
1630
1631         /* Frame error, so drop the packet. */
1632         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1633                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1634                 dev_kfree_skb_any(skb);
1635                 return;
1636         }
1637
1638         /* loopback self test for ethtool */
1639         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1640                 ql_check_lb_frame(qdev, skb);
1641                 dev_kfree_skb_any(skb);
1642                 return;
1643         }
1644
1645         /* The max framesize filter on this chip is set higher than
1646          * MTU since FCoE uses 2k frames.
1647          */
1648         if (skb->len > ndev->mtu + ETH_HLEN) {
1649                 dev_kfree_skb_any(skb);
1650                 rx_ring->rx_dropped++;
1651                 return;
1652         }
1653
1654         prefetch(skb->data);
1655         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657                              "%s Multicast.\n",
1658                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1664         }
1665         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667                              "Promiscuous Packet.\n");
1668
1669         rx_ring->rx_packets++;
1670         rx_ring->rx_bytes += skb->len;
1671         skb->protocol = eth_type_trans(skb, ndev);
1672         skb_checksum_none_assert(skb);
1673
1674         /* If rx checksum is on, and there are no
1675          * csum or frame errors.
1676          */
1677         if ((ndev->features & NETIF_F_RXCSUM) &&
1678                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679                 /* TCP frame. */
1680                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682                                      "TCP checksum done!\n");
1683                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1684                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686                         /* Unfragmented ipv4 UDP frame. */
1687                         struct iphdr *iph = (struct iphdr *) skb->data;
1688                         if (!(iph->frag_off &
1689                                 htons(IP_MF|IP_OFFSET))) {
1690                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1692                                              qdev->ndev,
1693                                              "UDP checksum done!\n");
1694                         }
1695                 }
1696         }
1697
1698         skb_record_rx_queue(skb, rx_ring->cq_id);
1699         if (vlan_id != 0xffff)
1700                 __vlan_hwaccel_put_tag(skb, vlan_id);
1701         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702                 napi_gro_receive(&rx_ring->napi, skb);
1703         else
1704                 netif_receive_skb(skb);
1705 }
1706
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1708 {
1709         void *temp_addr = skb->data;
1710
1711         /* Undo the skb_reserve(skb,32) we did before
1712          * giving to hardware, and realign data on
1713          * a 2-byte boundary.
1714          */
1715         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717         skb_copy_to_linear_data(skb, temp_addr,
1718                 (unsigned int)len);
1719 }
1720
1721 /*
1722  * This function builds an skb for the given inbound
1723  * completion.  It will be rewritten for readability in the near
1724  * future, but for not it works well.
1725  */
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727                                        struct rx_ring *rx_ring,
1728                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1729 {
1730         struct bq_desc *lbq_desc;
1731         struct bq_desc *sbq_desc;
1732         struct sk_buff *skb = NULL;
1733         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736         /*
1737          * Handle the header buffer if present.
1738          */
1739         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742                              "Header of %d bytes in small buffer.\n", hdr_len);
1743                 /*
1744                  * Headers fit nicely into a small buffer.
1745                  */
1746                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747                 pci_unmap_single(qdev->pdev,
1748                                 dma_unmap_addr(sbq_desc, mapaddr),
1749                                 dma_unmap_len(sbq_desc, maplen),
1750                                 PCI_DMA_FROMDEVICE);
1751                 skb = sbq_desc->p.skb;
1752                 ql_realign_skb(skb, hdr_len);
1753                 skb_put(skb, hdr_len);
1754                 sbq_desc->p.skb = NULL;
1755         }
1756
1757         /*
1758          * Handle the data buffer(s).
1759          */
1760         if (unlikely(!length)) {        /* Is there data too? */
1761                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762                              "No Data buffer in this packet.\n");
1763                 return skb;
1764         }
1765
1766         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769                                      "Headers in small, data of %d bytes in small, combine them.\n",
1770                                      length);
1771                         /*
1772                          * Data is less than small buffer size so it's
1773                          * stuffed in a small buffer.
1774                          * For this case we append the data
1775                          * from the "data" small buffer to the "header" small
1776                          * buffer.
1777                          */
1778                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1779                         pci_dma_sync_single_for_cpu(qdev->pdev,
1780                                                     dma_unmap_addr
1781                                                     (sbq_desc, mapaddr),
1782                                                     dma_unmap_len
1783                                                     (sbq_desc, maplen),
1784                                                     PCI_DMA_FROMDEVICE);
1785                         memcpy(skb_put(skb, length),
1786                                sbq_desc->p.skb->data, length);
1787                         pci_dma_sync_single_for_device(qdev->pdev,
1788                                                        dma_unmap_addr
1789                                                        (sbq_desc,
1790                                                         mapaddr),
1791                                                        dma_unmap_len
1792                                                        (sbq_desc,
1793                                                         maplen),
1794                                                        PCI_DMA_FROMDEVICE);
1795                 } else {
1796                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797                                      "%d bytes in a single small buffer.\n",
1798                                      length);
1799                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1800                         skb = sbq_desc->p.skb;
1801                         ql_realign_skb(skb, length);
1802                         skb_put(skb, length);
1803                         pci_unmap_single(qdev->pdev,
1804                                          dma_unmap_addr(sbq_desc,
1805                                                         mapaddr),
1806                                          dma_unmap_len(sbq_desc,
1807                                                        maplen),
1808                                          PCI_DMA_FROMDEVICE);
1809                         sbq_desc->p.skb = NULL;
1810                 }
1811         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814                                      "Header in small, %d bytes in large. Chain large to small!\n",
1815                                      length);
1816                         /*
1817                          * The data is in a single large buffer.  We
1818                          * chain it to the header buffer's skb and let
1819                          * it rip.
1820                          */
1821                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1824                                      lbq_desc->p.pg_chunk.offset, length);
1825                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826                                                 lbq_desc->p.pg_chunk.offset,
1827                                                 length);
1828                         skb->len += length;
1829                         skb->data_len += length;
1830                         skb->truesize += length;
1831                 } else {
1832                         /*
1833                          * The headers and data are in a single large buffer. We
1834                          * copy it to a new skb and let it go. This can happen with
1835                          * jumbo mtu on a non-TCP/UDP frame.
1836                          */
1837                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838                         skb = netdev_alloc_skb(qdev->ndev, length);
1839                         if (skb == NULL) {
1840                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841                                              "No skb available, drop the packet.\n");
1842                                 return NULL;
1843                         }
1844                         pci_unmap_page(qdev->pdev,
1845                                        dma_unmap_addr(lbq_desc,
1846                                                       mapaddr),
1847                                        dma_unmap_len(lbq_desc, maplen),
1848                                        PCI_DMA_FROMDEVICE);
1849                         skb_reserve(skb, NET_IP_ALIGN);
1850                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852                                      length);
1853                         skb_fill_page_desc(skb, 0,
1854                                                 lbq_desc->p.pg_chunk.page,
1855                                                 lbq_desc->p.pg_chunk.offset,
1856                                                 length);
1857                         skb->len += length;
1858                         skb->data_len += length;
1859                         skb->truesize += length;
1860                         length -= length;
1861                         __pskb_pull_tail(skb,
1862                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863                                 VLAN_ETH_HLEN : ETH_HLEN);
1864                 }
1865         } else {
1866                 /*
1867                  * The data is in a chain of large buffers
1868                  * pointed to by a small buffer.  We loop
1869                  * thru and chain them to the our small header
1870                  * buffer's skb.
1871                  * frags:  There are 18 max frags and our small
1872                  *         buffer will hold 32 of them. The thing is,
1873                  *         we'll use 3 max for our 9000 byte jumbo
1874                  *         frames.  If the MTU goes up we could
1875                  *          eventually be in trouble.
1876                  */
1877                 int size, i = 0;
1878                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879                 pci_unmap_single(qdev->pdev,
1880                                  dma_unmap_addr(sbq_desc, mapaddr),
1881                                  dma_unmap_len(sbq_desc, maplen),
1882                                  PCI_DMA_FROMDEVICE);
1883                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884                         /*
1885                          * This is an non TCP/UDP IP frame, so
1886                          * the headers aren't split into a small
1887                          * buffer.  We have to use the small buffer
1888                          * that contains our sg list as our skb to
1889                          * send upstairs. Copy the sg list here to
1890                          * a local buffer and use it to find the
1891                          * pages to chain.
1892                          */
1893                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894                                      "%d bytes of headers & data in chain of large.\n",
1895                                      length);
1896                         skb = sbq_desc->p.skb;
1897                         sbq_desc->p.skb = NULL;
1898                         skb_reserve(skb, NET_IP_ALIGN);
1899                 }
1900                 while (length > 0) {
1901                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902                         size = (length < rx_ring->lbq_buf_size) ? length :
1903                                 rx_ring->lbq_buf_size;
1904
1905                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906                                      "Adding page %d to skb for %d bytes.\n",
1907                                      i, size);
1908                         skb_fill_page_desc(skb, i,
1909                                                 lbq_desc->p.pg_chunk.page,
1910                                                 lbq_desc->p.pg_chunk.offset,
1911                                                 size);
1912                         skb->len += size;
1913                         skb->data_len += size;
1914                         skb->truesize += size;
1915                         length -= size;
1916                         i++;
1917                 }
1918                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919                                 VLAN_ETH_HLEN : ETH_HLEN);
1920         }
1921         return skb;
1922 }
1923
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926                                    struct rx_ring *rx_ring,
1927                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1928                                    u16 vlan_id)
1929 {
1930         struct net_device *ndev = qdev->ndev;
1931         struct sk_buff *skb = NULL;
1932
1933         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936         if (unlikely(!skb)) {
1937                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938                              "No skb available, drop packet.\n");
1939                 rx_ring->rx_dropped++;
1940                 return;
1941         }
1942
1943         /* Frame error, so drop the packet. */
1944         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1946                 dev_kfree_skb_any(skb);
1947                 return;
1948         }
1949
1950         /* The max framesize filter on this chip is set higher than
1951          * MTU since FCoE uses 2k frames.
1952          */
1953         if (skb->len > ndev->mtu + ETH_HLEN) {
1954                 dev_kfree_skb_any(skb);
1955                 rx_ring->rx_dropped++;
1956                 return;
1957         }
1958
1959         /* loopback self test for ethtool */
1960         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1961                 ql_check_lb_frame(qdev, skb);
1962                 dev_kfree_skb_any(skb);
1963                 return;
1964         }
1965
1966         prefetch(skb->data);
1967         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1968                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1969                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1970                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1971                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1972                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1973                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1975                 rx_ring->rx_multicast++;
1976         }
1977         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1978                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979                              "Promiscuous Packet.\n");
1980         }
1981
1982         skb->protocol = eth_type_trans(skb, ndev);
1983         skb_checksum_none_assert(skb);
1984
1985         /* If rx checksum is on, and there are no
1986          * csum or frame errors.
1987          */
1988         if ((ndev->features & NETIF_F_RXCSUM) &&
1989                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1990                 /* TCP frame. */
1991                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1992                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993                                      "TCP checksum done!\n");
1994                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1995                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1996                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1997                 /* Unfragmented ipv4 UDP frame. */
1998                         struct iphdr *iph = (struct iphdr *) skb->data;
1999                         if (!(iph->frag_off &
2000                                 htons(IP_MF|IP_OFFSET))) {
2001                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2002                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003                                              "TCP checksum done!\n");
2004                         }
2005                 }
2006         }
2007
2008         rx_ring->rx_packets++;
2009         rx_ring->rx_bytes += skb->len;
2010         skb_record_rx_queue(skb, rx_ring->cq_id);
2011         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2012                 __vlan_hwaccel_put_tag(skb, vlan_id);
2013         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2014                 napi_gro_receive(&rx_ring->napi, skb);
2015         else
2016                 netif_receive_skb(skb);
2017 }
2018
2019 /* Process an inbound completion from an rx ring. */
2020 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2021                                         struct rx_ring *rx_ring,
2022                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2023 {
2024         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2025         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2026                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2027                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2028
2029         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2030
2031         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2032                 /* The data and headers are split into
2033                  * separate buffers.
2034                  */
2035                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2036                                                 vlan_id);
2037         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2038                 /* The data fit in a single small buffer.
2039                  * Allocate a new skb, copy the data and
2040                  * return the buffer to the free pool.
2041                  */
2042                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2043                                                 length, vlan_id);
2044         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2045                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2046                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2047                 /* TCP packet in a page chunk that's been checksummed.
2048                  * Tack it on to our GRO skb and let it go.
2049                  */
2050                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2051                                                 length, vlan_id);
2052         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2053                 /* Non-TCP packet in a page chunk. Allocate an
2054                  * skb, tack it on frags, and send it up.
2055                  */
2056                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2057                                                 length, vlan_id);
2058         } else {
2059                 /* Non-TCP/UDP large frames that span multiple buffers
2060                  * can be processed corrrectly by the split frame logic.
2061                  */
2062                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2063                                                 vlan_id);
2064         }
2065
2066         return (unsigned long)length;
2067 }
2068
2069 /* Process an outbound completion from an rx ring. */
2070 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2071                                    struct ob_mac_iocb_rsp *mac_rsp)
2072 {
2073         struct tx_ring *tx_ring;
2074         struct tx_ring_desc *tx_ring_desc;
2075
2076         QL_DUMP_OB_MAC_RSP(mac_rsp);
2077         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2078         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2079         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2080         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2081         tx_ring->tx_packets++;
2082         dev_kfree_skb(tx_ring_desc->skb);
2083         tx_ring_desc->skb = NULL;
2084
2085         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2086                                         OB_MAC_IOCB_RSP_S |
2087                                         OB_MAC_IOCB_RSP_L |
2088                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2089                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2090                         netif_warn(qdev, tx_done, qdev->ndev,
2091                                    "Total descriptor length did not match transfer length.\n");
2092                 }
2093                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2094                         netif_warn(qdev, tx_done, qdev->ndev,
2095                                    "Frame too short to be valid, not sent.\n");
2096                 }
2097                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2098                         netif_warn(qdev, tx_done, qdev->ndev,
2099                                    "Frame too long, but sent anyway.\n");
2100                 }
2101                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2102                         netif_warn(qdev, tx_done, qdev->ndev,
2103                                    "PCI backplane error. Frame not sent.\n");
2104                 }
2105         }
2106         atomic_inc(&tx_ring->tx_count);
2107 }
2108
2109 /* Fire up a handler to reset the MPI processor. */
2110 void ql_queue_fw_error(struct ql_adapter *qdev)
2111 {
2112         ql_link_off(qdev);
2113         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2114 }
2115
2116 void ql_queue_asic_error(struct ql_adapter *qdev)
2117 {
2118         ql_link_off(qdev);
2119         ql_disable_interrupts(qdev);
2120         /* Clear adapter up bit to signal the recovery
2121          * process that it shouldn't kill the reset worker
2122          * thread
2123          */
2124         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2125         /* Set asic recovery bit to indicate reset process that we are
2126          * in fatal error recovery process rather than normal close
2127          */
2128         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2129         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2130 }
2131
2132 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2133                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2134 {
2135         switch (ib_ae_rsp->event) {
2136         case MGMT_ERR_EVENT:
2137                 netif_err(qdev, rx_err, qdev->ndev,
2138                           "Management Processor Fatal Error.\n");
2139                 ql_queue_fw_error(qdev);
2140                 return;
2141
2142         case CAM_LOOKUP_ERR_EVENT:
2143                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2144                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2145                 ql_queue_asic_error(qdev);
2146                 return;
2147
2148         case SOFT_ECC_ERROR_EVENT:
2149                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2150                 ql_queue_asic_error(qdev);
2151                 break;
2152
2153         case PCI_ERR_ANON_BUF_RD:
2154                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2155                                         "anonymous buffers from rx_ring %d.\n",
2156                                         ib_ae_rsp->q_id);
2157                 ql_queue_asic_error(qdev);
2158                 break;
2159
2160         default:
2161                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2162                           ib_ae_rsp->event);
2163                 ql_queue_asic_error(qdev);
2164                 break;
2165         }
2166 }
2167
2168 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2169 {
2170         struct ql_adapter *qdev = rx_ring->qdev;
2171         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2172         struct ob_mac_iocb_rsp *net_rsp = NULL;
2173         int count = 0;
2174
2175         struct tx_ring *tx_ring;
2176         /* While there are entries in the completion queue. */
2177         while (prod != rx_ring->cnsmr_idx) {
2178
2179                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2180                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2181                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2182
2183                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2184                 rmb();
2185                 switch (net_rsp->opcode) {
2186
2187                 case OPCODE_OB_MAC_TSO_IOCB:
2188                 case OPCODE_OB_MAC_IOCB:
2189                         ql_process_mac_tx_intr(qdev, net_rsp);
2190                         break;
2191                 default:
2192                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2193                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2194                                      net_rsp->opcode);
2195                 }
2196                 count++;
2197                 ql_update_cq(rx_ring);
2198                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2199         }
2200         if (!net_rsp)
2201                 return 0;
2202         ql_write_cq_idx(rx_ring);
2203         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2204         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2205                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2206                         /*
2207                          * The queue got stopped because the tx_ring was full.
2208                          * Wake it up, because it's now at least 25% empty.
2209                          */
2210                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2211         }
2212
2213         return count;
2214 }
2215
2216 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2217 {
2218         struct ql_adapter *qdev = rx_ring->qdev;
2219         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2220         struct ql_net_rsp_iocb *net_rsp;
2221         int count = 0;
2222
2223         /* While there are entries in the completion queue. */
2224         while (prod != rx_ring->cnsmr_idx) {
2225
2226                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2228                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2229
2230                 net_rsp = rx_ring->curr_entry;
2231                 rmb();
2232                 switch (net_rsp->opcode) {
2233                 case OPCODE_IB_MAC_IOCB:
2234                         ql_process_mac_rx_intr(qdev, rx_ring,
2235                                                (struct ib_mac_iocb_rsp *)
2236                                                net_rsp);
2237                         break;
2238
2239                 case OPCODE_IB_AE_IOCB:
2240                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2241                                                 net_rsp);
2242                         break;
2243                 default:
2244                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2245                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2246                                      net_rsp->opcode);
2247                         break;
2248                 }
2249                 count++;
2250                 ql_update_cq(rx_ring);
2251                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2252                 if (count == budget)
2253                         break;
2254         }
2255         ql_update_buffer_queues(qdev, rx_ring);
2256         ql_write_cq_idx(rx_ring);
2257         return count;
2258 }
2259
2260 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2261 {
2262         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2263         struct ql_adapter *qdev = rx_ring->qdev;
2264         struct rx_ring *trx_ring;
2265         int i, work_done = 0;
2266         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2267
2268         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2269                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2270
2271         /* Service the TX rings first.  They start
2272          * right after the RSS rings. */
2273         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2274                 trx_ring = &qdev->rx_ring[i];
2275                 /* If this TX completion ring belongs to this vector and
2276                  * it's not empty then service it.
2277                  */
2278                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2279                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2280                                         trx_ring->cnsmr_idx)) {
2281                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2282                                      "%s: Servicing TX completion ring %d.\n",
2283                                      __func__, trx_ring->cq_id);
2284                         ql_clean_outbound_rx_ring(trx_ring);
2285                 }
2286         }
2287
2288         /*
2289          * Now service the RSS ring if it's active.
2290          */
2291         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2292                                         rx_ring->cnsmr_idx) {
2293                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2294                              "%s: Servicing RX completion ring %d.\n",
2295                              __func__, rx_ring->cq_id);
2296                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2297         }
2298
2299         if (work_done < budget) {
2300                 napi_complete(napi);
2301                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2302         }
2303         return work_done;
2304 }
2305
2306 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2307 {
2308         struct ql_adapter *qdev = netdev_priv(ndev);
2309
2310         if (features & NETIF_F_HW_VLAN_RX) {
2311                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2312                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2313         } else {
2314                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2315         }
2316 }
2317
2318 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2319         netdev_features_t features)
2320 {
2321         /*
2322          * Since there is no support for separate rx/tx vlan accel
2323          * enable/disable make sure tx flag is always in same state as rx.
2324          */
2325         if (features & NETIF_F_HW_VLAN_RX)
2326                 features |= NETIF_F_HW_VLAN_TX;
2327         else
2328                 features &= ~NETIF_F_HW_VLAN_TX;
2329
2330         return features;
2331 }
2332
2333 static int qlge_set_features(struct net_device *ndev,
2334         netdev_features_t features)
2335 {
2336         netdev_features_t changed = ndev->features ^ features;
2337
2338         if (changed & NETIF_F_HW_VLAN_RX)
2339                 qlge_vlan_mode(ndev, features);
2340
2341         return 0;
2342 }
2343
2344 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2345 {
2346         u32 enable_bit = MAC_ADDR_E;
2347         int err;
2348
2349         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2350                                   MAC_ADDR_TYPE_VLAN, vid);
2351         if (err)
2352                 netif_err(qdev, ifup, qdev->ndev,
2353                           "Failed to init vlan address.\n");
2354         return err;
2355 }
2356
2357 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2358 {
2359         struct ql_adapter *qdev = netdev_priv(ndev);
2360         int status;
2361         int err;
2362
2363         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2364         if (status)
2365                 return status;
2366
2367         err = __qlge_vlan_rx_add_vid(qdev, vid);
2368         set_bit(vid, qdev->active_vlans);
2369
2370         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2371