]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - drivers/net/ethernet/qlogic/qlge/qlge_main.c
qlge: remove NETIF_F_TSO6 flag
[~shefty/rdma-dev.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378                         status =
379                             ql_wait_reg_rdy(qdev,
380                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381                         if (status)
382                                 goto exit;
383                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
385                                    type);       /* type */
386                         ql_write32(qdev, MAC_ADDR_DATA, lower);
387                         status =
388                             ql_wait_reg_rdy(qdev,
389                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390                         if (status)
391                                 goto exit;
392                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
394                                    type);       /* type */
395                         ql_write32(qdev, MAC_ADDR_DATA, upper);
396                         status =
397                             ql_wait_reg_rdy(qdev,
398                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399                         if (status)
400                                 goto exit;
401                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
402                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
403                                    type);       /* type */
404                         /* This field should also include the queue id
405                            and possibly the function id.  Right now we hardcode
406                            the route field to NIC core.
407                          */
408                         cam_output = (CAM_OUT_ROUTE_NIC |
409                                       (qdev->
410                                        func << CAM_OUT_FUNC_SHIFT) |
411                                         (0 << CAM_OUT_CQ_ID_SHIFT));
412                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413                                 cam_output |= CAM_OUT_RV;
414                         /* route to NIC core */
415                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416                         break;
417                 }
418         case MAC_ADDR_TYPE_VLAN:
419                 {
420                         u32 enable_bit = *((u32 *) &addr[0]);
421                         /* For VLAN, the addr actually holds a bit that
422                          * either enables or disables the vlan id we are
423                          * addressing. It's either MAC_ADDR_E on or off.
424                          * That's bit-27 we're talking about.
425                          */
426                         status =
427                             ql_wait_reg_rdy(qdev,
428                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429                         if (status)
430                                 goto exit;
431                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
433                                    type |       /* type */
434                                    enable_bit); /* enable/disable */
435                         break;
436                 }
437         case MAC_ADDR_TYPE_MULTI_FLTR:
438         default:
439                 netif_crit(qdev, ifup, qdev->ndev,
440                            "Address type %d not yet supported.\n", type);
441                 status = -EPERM;
442         }
443 exit:
444         return status;
445 }
446
447 /* Set or clear MAC address in hardware. We sometimes
448  * have to clear it to prevent wrong frame routing
449  * especially in a bonding environment.
450  */
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453         int status;
454         char zero_mac_addr[ETH_ALEN];
455         char *addr;
456
457         if (set) {
458                 addr = &qdev->current_mac_addr[0];
459                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460                              "Set Mac addr %pM\n", addr);
461         } else {
462                 memset(zero_mac_addr, 0, ETH_ALEN);
463                 addr = &zero_mac_addr[0];
464                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465                              "Clearing MAC address\n");
466         }
467         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468         if (status)
469                 return status;
470         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473         if (status)
474                 netif_err(qdev, ifup, qdev->ndev,
475                           "Failed to init mac address.\n");
476         return status;
477 }
478
479 void ql_link_on(struct ql_adapter *qdev)
480 {
481         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482         netif_carrier_on(qdev->ndev);
483         ql_set_mac_addr(qdev, 1);
484 }
485
486 void ql_link_off(struct ql_adapter *qdev)
487 {
488         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489         netif_carrier_off(qdev->ndev);
490         ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498         int status = 0;
499
500         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501         if (status)
502                 goto exit;
503
504         ql_write32(qdev, RT_IDX,
505                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507         if (status)
508                 goto exit;
509         *value = ql_read32(qdev, RT_DATA);
510 exit:
511         return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520                               int enable)
521 {
522         int status = -EINVAL; /* Return error if no mask match. */
523         u32 value = 0;
524
525         switch (mask) {
526         case RT_IDX_CAM_HIT:
527                 {
528                         value = RT_IDX_DST_CAM_Q |      /* dest */
529                             RT_IDX_TYPE_NICQ |  /* type */
530                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531                         break;
532                 }
533         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
534                 {
535                         value = RT_IDX_DST_DFLT_Q |     /* dest */
536                             RT_IDX_TYPE_NICQ |  /* type */
537                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538                         break;
539                 }
540         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
541                 {
542                         value = RT_IDX_DST_DFLT_Q |     /* dest */
543                             RT_IDX_TYPE_NICQ |  /* type */
544                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545                         break;
546                 }
547         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548                 {
549                         value = RT_IDX_DST_DFLT_Q | /* dest */
550                                 RT_IDX_TYPE_NICQ | /* type */
551                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
552                                 RT_IDX_IDX_SHIFT); /* index */
553                         break;
554                 }
555         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556                 {
557                         value = RT_IDX_DST_DFLT_Q | /* dest */
558                                 RT_IDX_TYPE_NICQ | /* type */
559                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560                                 RT_IDX_IDX_SHIFT); /* index */
561                         break;
562                 }
563         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
564                 {
565                         value = RT_IDX_DST_DFLT_Q |     /* dest */
566                             RT_IDX_TYPE_NICQ |  /* type */
567                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568                         break;
569                 }
570         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
571                 {
572                         value = RT_IDX_DST_DFLT_Q |     /* dest */
573                             RT_IDX_TYPE_NICQ |  /* type */
574                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575                         break;
576                 }
577         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
578                 {
579                         value = RT_IDX_DST_DFLT_Q |     /* dest */
580                             RT_IDX_TYPE_NICQ |  /* type */
581                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582                         break;
583                 }
584         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
585                 {
586                         value = RT_IDX_DST_RSS |        /* dest */
587                             RT_IDX_TYPE_NICQ |  /* type */
588                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589                         break;
590                 }
591         case 0:         /* Clear the E-bit on an entry. */
592                 {
593                         value = RT_IDX_DST_DFLT_Q |     /* dest */
594                             RT_IDX_TYPE_NICQ |  /* type */
595                             (index << RT_IDX_IDX_SHIFT);/* index */
596                         break;
597                 }
598         default:
599                 netif_err(qdev, ifup, qdev->ndev,
600                           "Mask type %d not yet supported.\n", mask);
601                 status = -EPERM;
602                 goto exit;
603         }
604
605         if (value) {
606                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607                 if (status)
608                         goto exit;
609                 value |= (enable ? RT_IDX_E : 0);
610                 ql_write32(qdev, RT_IDX, value);
611                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612         }
613 exit:
614         return status;
615 }
616
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628  * Otherwise, we may have multiple outstanding workers and don't want to
629  * enable until the last one finishes. In this case, the irq_cnt gets
630  * incremented every time we queue a worker and decremented every time
631  * a worker finishes.  Once it hits zero we enable the interrupt.
632  */
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635         u32 var = 0;
636         unsigned long hw_flags = 0;
637         struct intr_context *ctx = qdev->intr_context + intr;
638
639         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640                 /* Always enable if we're MSIX multi interrupts and
641                  * it's not the default (zeroeth) interrupt.
642                  */
643                 ql_write32(qdev, INTR_EN,
644                            ctx->intr_en_mask);
645                 var = ql_read32(qdev, STS);
646                 return var;
647         }
648
649         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650         if (atomic_dec_and_test(&ctx->irq_cnt)) {
651                 ql_write32(qdev, INTR_EN,
652                            ctx->intr_en_mask);
653                 var = ql_read32(qdev, STS);
654         }
655         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656         return var;
657 }
658
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661         u32 var = 0;
662         struct intr_context *ctx;
663
664         /* HW disables for us if we're MSIX multi interrupts and
665          * it's not the default (zeroeth) interrupt.
666          */
667         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668                 return 0;
669
670         ctx = qdev->intr_context + intr;
671         spin_lock(&qdev->hw_lock);
672         if (!atomic_read(&ctx->irq_cnt)) {
673                 ql_write32(qdev, INTR_EN,
674                 ctx->intr_dis_mask);
675                 var = ql_read32(qdev, STS);
676         }
677         atomic_inc(&ctx->irq_cnt);
678         spin_unlock(&qdev->hw_lock);
679         return var;
680 }
681
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684         int i;
685         for (i = 0; i < qdev->intr_count; i++) {
686                 /* The enable call does a atomic_dec_and_test
687                  * and enables only if the result is zero.
688                  * So we precharge it here.
689                  */
690                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691                         i == 0))
692                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693                 ql_enable_completion_interrupt(qdev, i);
694         }
695
696 }
697
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700         int status, i;
701         u16 csum = 0;
702         __le16 *flash = (__le16 *)&qdev->flash;
703
704         status = strncmp((char *)&qdev->flash, str, 4);
705         if (status) {
706                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707                 return  status;
708         }
709
710         for (i = 0; i < size; i++)
711                 csum += le16_to_cpu(*flash++);
712
713         if (csum)
714                 netif_err(qdev, ifup, qdev->ndev,
715                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716
717         return csum;
718 }
719
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722         int status = 0;
723         /* wait for reg to come ready */
724         status = ql_wait_reg_rdy(qdev,
725                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726         if (status)
727                 goto exit;
728         /* set up for reg read */
729         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730         /* wait for reg to come ready */
731         status = ql_wait_reg_rdy(qdev,
732                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733         if (status)
734                 goto exit;
735          /* This data is stored on flash as an array of
736          * __le32.  Since ql_read32() returns cpu endian
737          * we need to swap it back.
738          */
739         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741         return status;
742 }
743
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746         u32 i, size;
747         int status;
748         __le32 *p = (__le32 *)&qdev->flash;
749         u32 offset;
750         u8 mac_addr[6];
751
752         /* Get flash offset for function and adjust
753          * for dword access.
754          */
755         if (!qdev->port)
756                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757         else
758                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761                 return -ETIMEDOUT;
762
763         size = sizeof(struct flash_params_8000) / sizeof(u32);
764         for (i = 0; i < size; i++, p++) {
765                 status = ql_read_flash_word(qdev, i+offset, p);
766                 if (status) {
767                         netif_err(qdev, ifup, qdev->ndev,
768                                   "Error reading flash.\n");
769                         goto exit;
770                 }
771         }
772
773         status = ql_validate_flash(qdev,
774                         sizeof(struct flash_params_8000) / sizeof(u16),
775                         "8000");
776         if (status) {
777                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778                 status = -EINVAL;
779                 goto exit;
780         }
781
782         /* Extract either manufacturer or BOFM modified
783          * MAC address.
784          */
785         if (qdev->flash.flash_params_8000.data_type1 == 2)
786                 memcpy(mac_addr,
787                         qdev->flash.flash_params_8000.mac_addr1,
788                         qdev->ndev->addr_len);
789         else
790                 memcpy(mac_addr,
791                         qdev->flash.flash_params_8000.mac_addr,
792                         qdev->ndev->addr_len);
793
794         if (!is_valid_ether_addr(mac_addr)) {
795                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796                 status = -EINVAL;
797                 goto exit;
798         }
799
800         memcpy(qdev->ndev->dev_addr,
801                 mac_addr,
802                 qdev->ndev->addr_len);
803
804 exit:
805         ql_sem_unlock(qdev, SEM_FLASH_MASK);
806         return status;
807 }
808
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811         int i;
812         int status;
813         __le32 *p = (__le32 *)&qdev->flash;
814         u32 offset = 0;
815         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816
817         /* Second function's parameters follow the first
818          * function's.
819          */
820         if (qdev->port)
821                 offset = size;
822
823         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824                 return -ETIMEDOUT;
825
826         for (i = 0; i < size; i++, p++) {
827                 status = ql_read_flash_word(qdev, i+offset, p);
828                 if (status) {
829                         netif_err(qdev, ifup, qdev->ndev,
830                                   "Error reading flash.\n");
831                         goto exit;
832                 }
833
834         }
835
836         status = ql_validate_flash(qdev,
837                         sizeof(struct flash_params_8012) / sizeof(u16),
838                         "8012");
839         if (status) {
840                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841                 status = -EINVAL;
842                 goto exit;
843         }
844
845         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846                 status = -EINVAL;
847                 goto exit;
848         }
849
850         memcpy(qdev->ndev->dev_addr,
851                 qdev->flash.flash_params_8012.mac_addr,
852                 qdev->ndev->addr_len);
853
854 exit:
855         ql_sem_unlock(qdev, SEM_FLASH_MASK);
856         return status;
857 }
858
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860  * register pair.  Each read/write requires us to wait for the ready
861  * bit before reading/writing the data.
862  */
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865         int status;
866         /* wait for reg to come ready */
867         status = ql_wait_reg_rdy(qdev,
868                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869         if (status)
870                 return status;
871         /* write the data to the data reg */
872         ql_write32(qdev, XGMAC_DATA, data);
873         /* trigger the write */
874         ql_write32(qdev, XGMAC_ADDR, reg);
875         return status;
876 }
877
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879  * register pair.  Each read/write requires us to wait for the ready
880  * bit before reading/writing the data.
881  */
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884         int status = 0;
885         /* wait for reg to come ready */
886         status = ql_wait_reg_rdy(qdev,
887                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888         if (status)
889                 goto exit;
890         /* set up for reg read */
891         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892         /* wait for reg to come ready */
893         status = ql_wait_reg_rdy(qdev,
894                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895         if (status)
896                 goto exit;
897         /* get the data */
898         *data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900         return status;
901 }
902
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906         int status = 0;
907         u32 hi = 0;
908         u32 lo = 0;
909
910         status = ql_read_xgmac_reg(qdev, reg, &lo);
911         if (status)
912                 goto exit;
913
914         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915         if (status)
916                 goto exit;
917
918         *data = (u64) lo | ((u64) hi << 32);
919
920 exit:
921         return status;
922 }
923
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926         int status;
927         /*
928          * Get MPI firmware version for driver banner
929          * and ethool info.
930          */
931         status = ql_mb_about_fw(qdev);
932         if (status)
933                 goto exit;
934         status = ql_mb_get_fw_state(qdev);
935         if (status)
936                 goto exit;
937         /* Wake up a worker to get/set the TX/RX frame sizes. */
938         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940         return status;
941 }
942
943 /* Take the MAC Core out of reset.
944  * Enable statistics counting.
945  * Take the transmitter/receiver out of reset.
946  * This functionality may be done in the MPI firmware at a
947  * later date.
948  */
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951         int status = 0;
952         u32 data;
953
954         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955                 /* Another function has the semaphore, so
956                  * wait for the port init bit to come ready.
957                  */
958                 netif_info(qdev, link, qdev->ndev,
959                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961                 if (status) {
962                         netif_crit(qdev, link, qdev->ndev,
963                                    "Port initialize timed out.\n");
964                 }
965                 return status;
966         }
967
968         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969         /* Set the core reset. */
970         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971         if (status)
972                 goto end;
973         data |= GLOBAL_CFG_RESET;
974         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975         if (status)
976                 goto end;
977
978         /* Clear the core reset and turn on jumbo for receiver. */
979         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
980         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
981         data |= GLOBAL_CFG_TX_STAT_EN;
982         data |= GLOBAL_CFG_RX_STAT_EN;
983         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984         if (status)
985                 goto end;
986
987         /* Enable transmitter, and clear it's reset. */
988         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989         if (status)
990                 goto end;
991         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
992         data |= TX_CFG_EN;      /* Enable the transmitter. */
993         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994         if (status)
995                 goto end;
996
997         /* Enable receiver and clear it's reset. */
998         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999         if (status)
1000                 goto end;
1001         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1002         data |= RX_CFG_EN;      /* Enable the receiver. */
1003         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004         if (status)
1005                 goto end;
1006
1007         /* Turn on jumbo. */
1008         status =
1009             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010         if (status)
1011                 goto end;
1012         status =
1013             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014         if (status)
1015                 goto end;
1016
1017         /* Signal to the world that the port is enabled.        */
1018         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021         return status;
1022 }
1023
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026         return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033         rx_ring->lbq_curr_idx++;
1034         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035                 rx_ring->lbq_curr_idx = 0;
1036         rx_ring->lbq_free_cnt++;
1037         return lbq_desc;
1038 }
1039
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041                 struct rx_ring *rx_ring)
1042 {
1043         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045         pci_dma_sync_single_for_cpu(qdev->pdev,
1046                                         dma_unmap_addr(lbq_desc, mapaddr),
1047                                     rx_ring->lbq_buf_size,
1048                                         PCI_DMA_FROMDEVICE);
1049
1050         /* If it's the last chunk of our master page then
1051          * we unmap it.
1052          */
1053         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054                                         == ql_lbq_block_size(qdev))
1055                 pci_unmap_page(qdev->pdev,
1056                                 lbq_desc->p.pg_chunk.map,
1057                                 ql_lbq_block_size(qdev),
1058                                 PCI_DMA_FROMDEVICE);
1059         return lbq_desc;
1060 }
1061
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066         rx_ring->sbq_curr_idx++;
1067         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068                 rx_ring->sbq_curr_idx = 0;
1069         rx_ring->sbq_free_cnt++;
1070         return sbq_desc;
1071 }
1072
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076         rx_ring->cnsmr_idx++;
1077         rx_ring->curr_entry++;
1078         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079                 rx_ring->cnsmr_idx = 0;
1080                 rx_ring->curr_entry = rx_ring->cq_base;
1081         }
1082 }
1083
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090                                                 struct bq_desc *lbq_desc)
1091 {
1092         if (!rx_ring->pg_chunk.page) {
1093                 u64 map;
1094                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095                                                 GFP_ATOMIC,
1096                                                 qdev->lbq_buf_order);
1097                 if (unlikely(!rx_ring->pg_chunk.page)) {
1098                         netif_err(qdev, drv, qdev->ndev,
1099                                   "page allocation failed.\n");
1100                         return -ENOMEM;
1101                 }
1102                 rx_ring->pg_chunk.offset = 0;
1103                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104                                         0, ql_lbq_block_size(qdev),
1105                                         PCI_DMA_FROMDEVICE);
1106                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107                         __free_pages(rx_ring->pg_chunk.page,
1108                                         qdev->lbq_buf_order);
1109                         netif_err(qdev, drv, qdev->ndev,
1110                                   "PCI mapping failed.\n");
1111                         return -ENOMEM;
1112                 }
1113                 rx_ring->pg_chunk.map = map;
1114                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115         }
1116
1117         /* Copy the current master pg_chunk info
1118          * to the current descriptor.
1119          */
1120         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122         /* Adjust the master page chunk for next
1123          * buffer get.
1124          */
1125         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127                 rx_ring->pg_chunk.page = NULL;
1128                 lbq_desc->p.pg_chunk.last_flag = 1;
1129         } else {
1130                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131                 get_page(rx_ring->pg_chunk.page);
1132                 lbq_desc->p.pg_chunk.last_flag = 0;
1133         }
1134         return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139         u32 clean_idx = rx_ring->lbq_clean_idx;
1140         u32 start_idx = clean_idx;
1141         struct bq_desc *lbq_desc;
1142         u64 map;
1143         int i;
1144
1145         while (rx_ring->lbq_free_cnt > 32) {
1146                 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148                                      "lbq: try cleaning clean_idx = %d.\n",
1149                                      clean_idx);
1150                         lbq_desc = &rx_ring->lbq[clean_idx];
1151                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152                                 rx_ring->lbq_clean_idx = clean_idx;
1153                                 netif_err(qdev, ifup, qdev->ndev,
1154                                                 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155                                                 i, clean_idx);
1156                                 return;
1157                         }
1158
1159                         map = lbq_desc->p.pg_chunk.map +
1160                                 lbq_desc->p.pg_chunk.offset;
1161                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162                         dma_unmap_len_set(lbq_desc, maplen,
1163                                         rx_ring->lbq_buf_size);
1164                                 *lbq_desc->addr = cpu_to_le64(map);
1165
1166                         pci_dma_sync_single_for_device(qdev->pdev, map,
1167                                                 rx_ring->lbq_buf_size,
1168                                                 PCI_DMA_FROMDEVICE);
1169                         clean_idx++;
1170                         if (clean_idx == rx_ring->lbq_len)
1171                                 clean_idx = 0;
1172                 }
1173
1174                 rx_ring->lbq_clean_idx = clean_idx;
1175                 rx_ring->lbq_prod_idx += 16;
1176                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177                         rx_ring->lbq_prod_idx = 0;
1178                 rx_ring->lbq_free_cnt -= 16;
1179         }
1180
1181         if (start_idx != clean_idx) {
1182                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                              "lbq: updating prod idx = %d.\n",
1184                              rx_ring->lbq_prod_idx);
1185                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186                                 rx_ring->lbq_prod_idx_db_reg);
1187         }
1188 }
1189
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193         u32 clean_idx = rx_ring->sbq_clean_idx;
1194         u32 start_idx = clean_idx;
1195         struct bq_desc *sbq_desc;
1196         u64 map;
1197         int i;
1198
1199         while (rx_ring->sbq_free_cnt > 16) {
1200                 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201                         sbq_desc = &rx_ring->sbq[clean_idx];
1202                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203                                      "sbq: try cleaning clean_idx = %d.\n",
1204                                      clean_idx);
1205                         if (sbq_desc->p.skb == NULL) {
1206                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1207                                              qdev->ndev,
1208                                              "sbq: getting new skb for index %d.\n",
1209                                              sbq_desc->index);
1210                                 sbq_desc->p.skb =
1211                                     netdev_alloc_skb(qdev->ndev,
1212                                                      SMALL_BUFFER_SIZE);
1213                                 if (sbq_desc->p.skb == NULL) {
1214                                         netif_err(qdev, probe, qdev->ndev,
1215                                                   "Couldn't get an skb.\n");
1216                                         rx_ring->sbq_clean_idx = clean_idx;
1217                                         return;
1218                                 }
1219                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220                                 map = pci_map_single(qdev->pdev,
1221                                                      sbq_desc->p.skb->data,
1222                                                      rx_ring->sbq_buf_size,
1223                                                      PCI_DMA_FROMDEVICE);
1224                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225                                         netif_err(qdev, ifup, qdev->ndev,
1226                                                   "PCI mapping failed.\n");
1227                                         rx_ring->sbq_clean_idx = clean_idx;
1228                                         dev_kfree_skb_any(sbq_desc->p.skb);
1229                                         sbq_desc->p.skb = NULL;
1230                                         return;
1231                                 }
1232                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233                                 dma_unmap_len_set(sbq_desc, maplen,
1234                                                   rx_ring->sbq_buf_size);
1235                                 *sbq_desc->addr = cpu_to_le64(map);
1236                         }
1237
1238                         clean_idx++;
1239                         if (clean_idx == rx_ring->sbq_len)
1240                                 clean_idx = 0;
1241                 }
1242                 rx_ring->sbq_clean_idx = clean_idx;
1243                 rx_ring->sbq_prod_idx += 16;
1244                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245                         rx_ring->sbq_prod_idx = 0;
1246                 rx_ring->sbq_free_cnt -= 16;
1247         }
1248
1249         if (start_idx != clean_idx) {
1250                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251                              "sbq: updating prod idx = %d.\n",
1252                              rx_ring->sbq_prod_idx);
1253                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254                                 rx_ring->sbq_prod_idx_db_reg);
1255         }
1256 }
1257
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259                                     struct rx_ring *rx_ring)
1260 {
1261         ql_update_sbq(qdev, rx_ring);
1262         ql_update_lbq(qdev, rx_ring);
1263 }
1264
1265 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266  * fails at some stage, or from the interrupt when a tx completes.
1267  */
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269                           struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271         int i;
1272         for (i = 0; i < mapped; i++) {
1273                 if (i == 0 || (i == 7 && mapped > 7)) {
1274                         /*
1275                          * Unmap the skb->data area, or the
1276                          * external sglist (AKA the Outbound
1277                          * Address List (OAL)).
1278                          * If its the zeroeth element, then it's
1279                          * the skb->data area.  If it's the 7th
1280                          * element and there is more than 6 frags,
1281                          * then its an OAL.
1282                          */
1283                         if (i == 7) {
1284                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1285                                              qdev->ndev,
1286                                              "unmapping OAL area.\n");
1287                         }
1288                         pci_unmap_single(qdev->pdev,
1289                                          dma_unmap_addr(&tx_ring_desc->map[i],
1290                                                         mapaddr),
1291                                          dma_unmap_len(&tx_ring_desc->map[i],
1292                                                        maplen),
1293                                          PCI_DMA_TODEVICE);
1294                 } else {
1295                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296                                      "unmapping frag %d.\n", i);
1297                         pci_unmap_page(qdev->pdev,
1298                                        dma_unmap_addr(&tx_ring_desc->map[i],
1299                                                       mapaddr),
1300                                        dma_unmap_len(&tx_ring_desc->map[i],
1301                                                      maplen), PCI_DMA_TODEVICE);
1302                 }
1303         }
1304
1305 }
1306
1307 /* Map the buffers for this transmit.  This will return
1308  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309  */
1310 static int ql_map_send(struct ql_adapter *qdev,
1311                        struct ob_mac_iocb_req *mac_iocb_ptr,
1312                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314         int len = skb_headlen(skb);
1315         dma_addr_t map;
1316         int frag_idx, err, map_idx = 0;
1317         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318         int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320         if (frag_cnt) {
1321                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322                              "frag_cnt = %d.\n", frag_cnt);
1323         }
1324         /*
1325          * Map the skb buffer first.
1326          */
1327         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329         err = pci_dma_mapping_error(qdev->pdev, map);
1330         if (err) {
1331                 netif_err(qdev, tx_queued, qdev->ndev,
1332                           "PCI mapping failed with error: %d\n", err);
1333
1334                 return NETDEV_TX_BUSY;
1335         }
1336
1337         tbd->len = cpu_to_le32(len);
1338         tbd->addr = cpu_to_le64(map);
1339         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341         map_idx++;
1342
1343         /*
1344          * This loop fills the remainder of the 8 address descriptors
1345          * in the IOCB.  If there are more than 7 fragments, then the
1346          * eighth address desc will point to an external list (OAL).
1347          * When this happens, the remainder of the frags will be stored
1348          * in this list.
1349          */
1350         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352                 tbd++;
1353                 if (frag_idx == 6 && frag_cnt > 7) {
1354                         /* Let's tack on an sglist.
1355                          * Our control block will now
1356                          * look like this:
1357                          * iocb->seg[0] = skb->data
1358                          * iocb->seg[1] = frag[0]
1359                          * iocb->seg[2] = frag[1]
1360                          * iocb->seg[3] = frag[2]
1361                          * iocb->seg[4] = frag[3]
1362                          * iocb->seg[5] = frag[4]
1363                          * iocb->seg[6] = frag[5]
1364                          * iocb->seg[7] = ptr to OAL (external sglist)
1365                          * oal->seg[0] = frag[6]
1366                          * oal->seg[1] = frag[7]
1367                          * oal->seg[2] = frag[8]
1368                          * oal->seg[3] = frag[9]
1369                          * oal->seg[4] = frag[10]
1370                          *      etc...
1371                          */
1372                         /* Tack on the OAL in the eighth segment of IOCB. */
1373                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374                                              sizeof(struct oal),
1375                                              PCI_DMA_TODEVICE);
1376                         err = pci_dma_mapping_error(qdev->pdev, map);
1377                         if (err) {
1378                                 netif_err(qdev, tx_queued, qdev->ndev,
1379                                           "PCI mapping outbound address list with error: %d\n",
1380                                           err);
1381                                 goto map_error;
1382                         }
1383
1384                         tbd->addr = cpu_to_le64(map);
1385                         /*
1386                          * The length is the number of fragments
1387                          * that remain to be mapped times the length
1388                          * of our sglist (OAL).
1389                          */
1390                         tbd->len =
1391                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1392                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1393                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394                                            map);
1395                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396                                           sizeof(struct oal));
1397                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398                         map_idx++;
1399                 }
1400
1401                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402                                        DMA_TO_DEVICE);
1403
1404                 err = dma_mapping_error(&qdev->pdev->dev, map);
1405                 if (err) {
1406                         netif_err(qdev, tx_queued, qdev->ndev,
1407                                   "PCI mapping frags failed with error: %d.\n",
1408                                   err);
1409                         goto map_error;
1410                 }
1411
1412                 tbd->addr = cpu_to_le64(map);
1413                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416                                   skb_frag_size(frag));
1417
1418         }
1419         /* Save the number of segments we've mapped. */
1420         tx_ring_desc->map_cnt = map_idx;
1421         /* Terminate the last segment. */
1422         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423         return NETDEV_TX_OK;
1424
1425 map_error:
1426         /*
1427          * If the first frag mapping failed, then i will be zero.
1428          * This causes the unmap of the skb->data area.  Otherwise
1429          * we pass in the number of frags that mapped successfully
1430          * so they can be umapped.
1431          */
1432         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433         return NETDEV_TX_BUSY;
1434 }
1435
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1438 {
1439         struct nic_stats *stats = &qdev->nic_stats;
1440
1441         stats->rx_err_count++;
1442
1443         switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444         case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445                 stats->rx_code_err++;
1446                 break;
1447         case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448                 stats->rx_oversize_err++;
1449                 break;
1450         case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451                 stats->rx_undersize_err++;
1452                 break;
1453         case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454                 stats->rx_preamble_err++;
1455                 break;
1456         case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457                 stats->rx_frame_len_err++;
1458                 break;
1459         case IB_MAC_IOCB_RSP_ERR_CRC:
1460                 stats->rx_crc_err++;
1461         default:
1462                 break;
1463         }
1464 }
1465
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468                                         struct rx_ring *rx_ring,
1469                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1470                                         u32 length,
1471                                         u16 vlan_id)
1472 {
1473         struct sk_buff *skb;
1474         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475         struct napi_struct *napi = &rx_ring->napi;
1476
1477         napi->dev = qdev->ndev;
1478
1479         skb = napi_get_frags(napi);
1480         if (!skb) {
1481                 netif_err(qdev, drv, qdev->ndev,
1482                           "Couldn't get an skb, exiting.\n");
1483                 rx_ring->rx_dropped++;
1484                 put_page(lbq_desc->p.pg_chunk.page);
1485                 return;
1486         }
1487         prefetch(lbq_desc->p.pg_chunk.va);
1488         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1489                              lbq_desc->p.pg_chunk.page,
1490                              lbq_desc->p.pg_chunk.offset,
1491                              length);
1492
1493         skb->len += length;
1494         skb->data_len += length;
1495         skb->truesize += length;
1496         skb_shinfo(skb)->nr_frags++;
1497
1498         rx_ring->rx_packets++;
1499         rx_ring->rx_bytes += length;
1500         skb->ip_summed = CHECKSUM_UNNECESSARY;
1501         skb_record_rx_queue(skb, rx_ring->cq_id);
1502         if (vlan_id != 0xffff)
1503                 __vlan_hwaccel_put_tag(skb, vlan_id);
1504         napi_gro_frags(napi);
1505 }
1506
1507 /* Process an inbound completion from an rx ring. */
1508 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1509                                         struct rx_ring *rx_ring,
1510                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1511                                         u32 length,
1512                                         u16 vlan_id)
1513 {
1514         struct net_device *ndev = qdev->ndev;
1515         struct sk_buff *skb = NULL;
1516         void *addr;
1517         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1518         struct napi_struct *napi = &rx_ring->napi;
1519
1520         skb = netdev_alloc_skb(ndev, length);
1521         if (!skb) {
1522                 netif_err(qdev, drv, qdev->ndev,
1523                           "Couldn't get an skb, need to unwind!.\n");
1524                 rx_ring->rx_dropped++;
1525                 put_page(lbq_desc->p.pg_chunk.page);
1526                 return;
1527         }
1528
1529         addr = lbq_desc->p.pg_chunk.va;
1530         prefetch(addr);
1531
1532         /* The max framesize filter on this chip is set higher than
1533          * MTU since FCoE uses 2k frames.
1534          */
1535         if (skb->len > ndev->mtu + ETH_HLEN) {
1536                 netif_err(qdev, drv, qdev->ndev,
1537                           "Segment too small, dropping.\n");
1538                 rx_ring->rx_dropped++;
1539                 goto err_out;
1540         }
1541         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1542         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1543                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1544                      length);
1545         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1546                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1547                                 length-ETH_HLEN);
1548         skb->len += length-ETH_HLEN;
1549         skb->data_len += length-ETH_HLEN;
1550         skb->truesize += length-ETH_HLEN;
1551
1552         rx_ring->rx_packets++;
1553         rx_ring->rx_bytes += skb->len;
1554         skb->protocol = eth_type_trans(skb, ndev);
1555         skb_checksum_none_assert(skb);
1556
1557         if ((ndev->features & NETIF_F_RXCSUM) &&
1558                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1559                 /* TCP frame. */
1560                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1561                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562                                      "TCP checksum done!\n");
1563                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1564                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1565                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1566                         /* Unfragmented ipv4 UDP frame. */
1567                         struct iphdr *iph =
1568                                 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1569                         if (!(iph->frag_off &
1570                                 htons(IP_MF|IP_OFFSET))) {
1571                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1572                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1573                                              qdev->ndev,
1574                                              "UDP checksum done!\n");
1575                         }
1576                 }
1577         }
1578
1579         skb_record_rx_queue(skb, rx_ring->cq_id);
1580         if (vlan_id != 0xffff)
1581                 __vlan_hwaccel_put_tag(skb, vlan_id);
1582         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1583                 napi_gro_receive(napi, skb);
1584         else
1585                 netif_receive_skb(skb);
1586         return;
1587 err_out:
1588         dev_kfree_skb_any(skb);
1589         put_page(lbq_desc->p.pg_chunk.page);
1590 }
1591
1592 /* Process an inbound completion from an rx ring. */
1593 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1594                                         struct rx_ring *rx_ring,
1595                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1596                                         u32 length,
1597                                         u16 vlan_id)
1598 {
1599         struct net_device *ndev = qdev->ndev;
1600         struct sk_buff *skb = NULL;
1601         struct sk_buff *new_skb = NULL;
1602         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1603
1604         skb = sbq_desc->p.skb;
1605         /* Allocate new_skb and copy */
1606         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1607         if (new_skb == NULL) {
1608                 netif_err(qdev, probe, qdev->ndev,
1609                           "No skb available, drop the packet.\n");
1610                 rx_ring->rx_dropped++;
1611                 return;
1612         }
1613         skb_reserve(new_skb, NET_IP_ALIGN);
1614         memcpy(skb_put(new_skb, length), skb->data, length);
1615         skb = new_skb;
1616
1617         /* loopback self test for ethtool */
1618         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1619                 ql_check_lb_frame(qdev, skb);
1620                 dev_kfree_skb_any(skb);
1621                 return;
1622         }
1623
1624         /* The max framesize filter on this chip is set higher than
1625          * MTU since FCoE uses 2k frames.
1626          */
1627         if (skb->len > ndev->mtu + ETH_HLEN) {
1628                 dev_kfree_skb_any(skb);
1629                 rx_ring->rx_dropped++;
1630                 return;
1631         }
1632
1633         prefetch(skb->data);
1634         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1635                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1636                              "%s Multicast.\n",
1637                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1638                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1639                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1641                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1643         }
1644         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1645                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646                              "Promiscuous Packet.\n");
1647
1648         rx_ring->rx_packets++;
1649         rx_ring->rx_bytes += skb->len;
1650         skb->protocol = eth_type_trans(skb, ndev);
1651         skb_checksum_none_assert(skb);
1652
1653         /* If rx checksum is on, and there are no
1654          * csum or frame errors.
1655          */
1656         if ((ndev->features & NETIF_F_RXCSUM) &&
1657                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1658                 /* TCP frame. */
1659                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1660                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1661                                      "TCP checksum done!\n");
1662                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1663                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665                         /* Unfragmented ipv4 UDP frame. */
1666                         struct iphdr *iph = (struct iphdr *) skb->data;
1667                         if (!(iph->frag_off &
1668                                 htons(IP_MF|IP_OFFSET))) {
1669                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1670                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1671                                              qdev->ndev,
1672                                              "UDP checksum done!\n");
1673                         }
1674                 }
1675         }
1676
1677         skb_record_rx_queue(skb, rx_ring->cq_id);
1678         if (vlan_id != 0xffff)
1679                 __vlan_hwaccel_put_tag(skb, vlan_id);
1680         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1681                 napi_gro_receive(&rx_ring->napi, skb);
1682         else
1683                 netif_receive_skb(skb);
1684 }
1685
1686 static void ql_realign_skb(struct sk_buff *skb, int len)
1687 {
1688         void *temp_addr = skb->data;
1689
1690         /* Undo the skb_reserve(skb,32) we did before
1691          * giving to hardware, and realign data on
1692          * a 2-byte boundary.
1693          */
1694         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1695         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1696         skb_copy_to_linear_data(skb, temp_addr,
1697                 (unsigned int)len);
1698 }
1699
1700 /*
1701  * This function builds an skb for the given inbound
1702  * completion.  It will be rewritten for readability in the near
1703  * future, but for not it works well.
1704  */
1705 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1706                                        struct rx_ring *rx_ring,
1707                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1708 {
1709         struct bq_desc *lbq_desc;
1710         struct bq_desc *sbq_desc;
1711         struct sk_buff *skb = NULL;
1712         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1713        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1714
1715         /*
1716          * Handle the header buffer if present.
1717          */
1718         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1719             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1720                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1721                              "Header of %d bytes in small buffer.\n", hdr_len);
1722                 /*
1723                  * Headers fit nicely into a small buffer.
1724                  */
1725                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1726                 pci_unmap_single(qdev->pdev,
1727                                 dma_unmap_addr(sbq_desc, mapaddr),
1728                                 dma_unmap_len(sbq_desc, maplen),
1729                                 PCI_DMA_FROMDEVICE);
1730                 skb = sbq_desc->p.skb;
1731                 ql_realign_skb(skb, hdr_len);
1732                 skb_put(skb, hdr_len);
1733                 sbq_desc->p.skb = NULL;
1734         }
1735
1736         /*
1737          * Handle the data buffer(s).
1738          */
1739         if (unlikely(!length)) {        /* Is there data too? */
1740                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741                              "No Data buffer in this packet.\n");
1742                 return skb;
1743         }
1744
1745         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1746                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1747                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748                                      "Headers in small, data of %d bytes in small, combine them.\n",
1749                                      length);
1750                         /*
1751                          * Data is less than small buffer size so it's
1752                          * stuffed in a small buffer.
1753                          * For this case we append the data
1754                          * from the "data" small buffer to the "header" small
1755                          * buffer.
1756                          */
1757                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1758                         pci_dma_sync_single_for_cpu(qdev->pdev,
1759                                                     dma_unmap_addr
1760                                                     (sbq_desc, mapaddr),
1761                                                     dma_unmap_len
1762                                                     (sbq_desc, maplen),
1763                                                     PCI_DMA_FROMDEVICE);
1764                         memcpy(skb_put(skb, length),
1765                                sbq_desc->p.skb->data, length);
1766                         pci_dma_sync_single_for_device(qdev->pdev,
1767                                                        dma_unmap_addr
1768                                                        (sbq_desc,
1769                                                         mapaddr),
1770                                                        dma_unmap_len
1771                                                        (sbq_desc,
1772                                                         maplen),
1773                                                        PCI_DMA_FROMDEVICE);
1774                 } else {
1775                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776                                      "%d bytes in a single small buffer.\n",
1777                                      length);
1778                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1779                         skb = sbq_desc->p.skb;
1780                         ql_realign_skb(skb, length);
1781                         skb_put(skb, length);
1782                         pci_unmap_single(qdev->pdev,
1783                                          dma_unmap_addr(sbq_desc,
1784                                                         mapaddr),
1785                                          dma_unmap_len(sbq_desc,
1786                                                        maplen),
1787                                          PCI_DMA_FROMDEVICE);
1788                         sbq_desc->p.skb = NULL;
1789                 }
1790         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1791                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793                                      "Header in small, %d bytes in large. Chain large to small!\n",
1794                                      length);
1795                         /*
1796                          * The data is in a single large buffer.  We
1797                          * chain it to the header buffer's skb and let
1798                          * it rip.
1799                          */
1800                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1801                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1803                                      lbq_desc->p.pg_chunk.offset, length);
1804                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1805                                                 lbq_desc->p.pg_chunk.offset,
1806                                                 length);
1807                         skb->len += length;
1808                         skb->data_len += length;
1809                         skb->truesize += length;
1810                 } else {
1811                         /*
1812                          * The headers and data are in a single large buffer. We
1813                          * copy it to a new skb and let it go. This can happen with
1814                          * jumbo mtu on a non-TCP/UDP frame.
1815                          */
1816                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1817                         skb = netdev_alloc_skb(qdev->ndev, length);
1818                         if (skb == NULL) {
1819                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1820                                              "No skb available, drop the packet.\n");
1821                                 return NULL;
1822                         }
1823                         pci_unmap_page(qdev->pdev,
1824                                        dma_unmap_addr(lbq_desc,
1825                                                       mapaddr),
1826                                        dma_unmap_len(lbq_desc, maplen),
1827                                        PCI_DMA_FROMDEVICE);
1828                         skb_reserve(skb, NET_IP_ALIGN);
1829                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1831                                      length);
1832                         skb_fill_page_desc(skb, 0,
1833                                                 lbq_desc->p.pg_chunk.page,
1834                                                 lbq_desc->p.pg_chunk.offset,
1835                                                 length);
1836                         skb->len += length;
1837                         skb->data_len += length;
1838                         skb->truesize += length;
1839                         length -= length;
1840                         __pskb_pull_tail(skb,
1841                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1842                                 VLAN_ETH_HLEN : ETH_HLEN);
1843                 }
1844         } else {
1845                 /*
1846                  * The data is in a chain of large buffers
1847                  * pointed to by a small buffer.  We loop
1848                  * thru and chain them to the our small header
1849                  * buffer's skb.
1850                  * frags:  There are 18 max frags and our small
1851                  *         buffer will hold 32 of them. The thing is,
1852                  *         we'll use 3 max for our 9000 byte jumbo
1853                  *         frames.  If the MTU goes up we could
1854                  *          eventually be in trouble.
1855                  */
1856                 int size, i = 0;
1857                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1858                 pci_unmap_single(qdev->pdev,
1859                                  dma_unmap_addr(sbq_desc, mapaddr),
1860                                  dma_unmap_len(sbq_desc, maplen),
1861                                  PCI_DMA_FROMDEVICE);
1862                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1863                         /*
1864                          * This is an non TCP/UDP IP frame, so
1865                          * the headers aren't split into a small
1866                          * buffer.  We have to use the small buffer
1867                          * that contains our sg list as our skb to
1868                          * send upstairs. Copy the sg list here to
1869                          * a local buffer and use it to find the
1870                          * pages to chain.
1871                          */
1872                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873                                      "%d bytes of headers & data in chain of large.\n",
1874                                      length);
1875                         skb = sbq_desc->p.skb;
1876                         sbq_desc->p.skb = NULL;
1877                         skb_reserve(skb, NET_IP_ALIGN);
1878                 }
1879                 while (length > 0) {
1880                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1881                         size = (length < rx_ring->lbq_buf_size) ? length :
1882                                 rx_ring->lbq_buf_size;
1883
1884                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885                                      "Adding page %d to skb for %d bytes.\n",
1886                                      i, size);
1887                         skb_fill_page_desc(skb, i,
1888                                                 lbq_desc->p.pg_chunk.page,
1889                                                 lbq_desc->p.pg_chunk.offset,
1890                                                 size);
1891                         skb->len += size;
1892                         skb->data_len += size;
1893                         skb->truesize += size;
1894                         length -= size;
1895                         i++;
1896                 }
1897                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1898                                 VLAN_ETH_HLEN : ETH_HLEN);
1899         }
1900         return skb;
1901 }
1902
1903 /* Process an inbound completion from an rx ring. */
1904 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1905                                    struct rx_ring *rx_ring,
1906                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1907                                    u16 vlan_id)
1908 {
1909         struct net_device *ndev = qdev->ndev;
1910         struct sk_buff *skb = NULL;
1911
1912         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1913
1914         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1915         if (unlikely(!skb)) {
1916                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917                              "No skb available, drop packet.\n");
1918                 rx_ring->rx_dropped++;
1919                 return;
1920         }
1921
1922         /* The max framesize filter on this chip is set higher than
1923          * MTU since FCoE uses 2k frames.
1924          */
1925         if (skb->len > ndev->mtu + ETH_HLEN) {
1926                 dev_kfree_skb_any(skb);
1927                 rx_ring->rx_dropped++;
1928                 return;
1929         }
1930
1931         /* loopback self test for ethtool */
1932         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1933                 ql_check_lb_frame(qdev, skb);
1934                 dev_kfree_skb_any(skb);
1935                 return;
1936         }
1937
1938         prefetch(skb->data);
1939         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1940                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1941                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1942                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1943                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1944                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1945                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1946                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1947                 rx_ring->rx_multicast++;
1948         }
1949         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1950                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951                              "Promiscuous Packet.\n");
1952         }
1953
1954         skb->protocol = eth_type_trans(skb, ndev);
1955         skb_checksum_none_assert(skb);
1956
1957         /* If rx checksum is on, and there are no
1958          * csum or frame errors.
1959          */
1960         if ((ndev->features & NETIF_F_RXCSUM) &&
1961                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1962                 /* TCP frame. */
1963                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1964                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965                                      "TCP checksum done!\n");
1966                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1967                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1968                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1969                 /* Unfragmented ipv4 UDP frame. */
1970                         struct iphdr *iph = (struct iphdr *) skb->data;
1971                         if (!(iph->frag_off &
1972                                 htons(IP_MF|IP_OFFSET))) {
1973                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1974                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1975                                              "TCP checksum done!\n");
1976                         }
1977                 }
1978         }
1979
1980         rx_ring->rx_packets++;
1981         rx_ring->rx_bytes += skb->len;
1982         skb_record_rx_queue(skb, rx_ring->cq_id);
1983         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1984                 __vlan_hwaccel_put_tag(skb, vlan_id);
1985         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1986                 napi_gro_receive(&rx_ring->napi, skb);
1987         else
1988                 netif_receive_skb(skb);
1989 }
1990
1991 /* Process an inbound completion from an rx ring. */
1992 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1993                                         struct rx_ring *rx_ring,
1994                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
1995 {
1996         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1997         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1998                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1999                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2000
2001         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2002
2003         /* Frame error, so drop the packet. */
2004         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006                 return (unsigned long)length;
2007         }
2008
2009         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2010                 /* The data and headers are split into
2011                  * separate buffers.
2012                  */
2013                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2014                                                 vlan_id);
2015         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2016                 /* The data fit in a single small buffer.
2017                  * Allocate a new skb, copy the data and
2018                  * return the buffer to the free pool.
2019                  */
2020                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2021                                                 length, vlan_id);
2022         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2023                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2024                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2025                 /* TCP packet in a page chunk that's been checksummed.
2026                  * Tack it on to our GRO skb and let it go.
2027                  */
2028                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2029                                                 length, vlan_id);
2030         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2031                 /* Non-TCP packet in a page chunk. Allocate an
2032                  * skb, tack it on frags, and send it up.
2033                  */
2034                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2035                                                 length, vlan_id);
2036         } else {
2037                 /* Non-TCP/UDP large frames that span multiple buffers
2038                  * can be processed corrrectly by the split frame logic.
2039                  */
2040                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2041                                                 vlan_id);
2042         }
2043
2044         return (unsigned long)length;
2045 }
2046
2047 /* Process an outbound completion from an rx ring. */
2048 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2049                                    struct ob_mac_iocb_rsp *mac_rsp)
2050 {
2051         struct tx_ring *tx_ring;
2052         struct tx_ring_desc *tx_ring_desc;
2053
2054         QL_DUMP_OB_MAC_RSP(mac_rsp);
2055         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2056         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2057         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2058         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2059         tx_ring->tx_packets++;
2060         dev_kfree_skb(tx_ring_desc->skb);
2061         tx_ring_desc->skb = NULL;
2062
2063         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2064                                         OB_MAC_IOCB_RSP_S |
2065                                         OB_MAC_IOCB_RSP_L |
2066                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2067                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2068                         netif_warn(qdev, tx_done, qdev->ndev,
2069                                    "Total descriptor length did not match transfer length.\n");
2070                 }
2071                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2072                         netif_warn(qdev, tx_done, qdev->ndev,
2073                                    "Frame too short to be valid, not sent.\n");
2074                 }
2075                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2076                         netif_warn(qdev, tx_done, qdev->ndev,
2077                                    "Frame too long, but sent anyway.\n");
2078                 }
2079                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2080                         netif_warn(qdev, tx_done, qdev->ndev,
2081                                    "PCI backplane error. Frame not sent.\n");
2082                 }
2083         }
2084         atomic_inc(&tx_ring->tx_count);
2085 }
2086
2087 /* Fire up a handler to reset the MPI processor. */
2088 void ql_queue_fw_error(struct ql_adapter *qdev)
2089 {
2090         ql_link_off(qdev);
2091         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2092 }
2093
2094 void ql_queue_asic_error(struct ql_adapter *qdev)
2095 {
2096         ql_link_off(qdev);
2097         ql_disable_interrupts(qdev);
2098         /* Clear adapter up bit to signal the recovery
2099          * process that it shouldn't kill the reset worker
2100          * thread
2101          */
2102         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2103         /* Set asic recovery bit to indicate reset process that we are
2104          * in fatal error recovery process rather than normal close
2105          */
2106         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2107         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2108 }
2109
2110 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2111                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2112 {
2113         switch (ib_ae_rsp->event) {
2114         case MGMT_ERR_EVENT:
2115                 netif_err(qdev, rx_err, qdev->ndev,
2116                           "Management Processor Fatal Error.\n");
2117                 ql_queue_fw_error(qdev);
2118                 return;
2119
2120         case CAM_LOOKUP_ERR_EVENT:
2121                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2122                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2123                 ql_queue_asic_error(qdev);
2124                 return;
2125
2126         case SOFT_ECC_ERROR_EVENT:
2127                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2128                 ql_queue_asic_error(qdev);
2129                 break;
2130
2131         case PCI_ERR_ANON_BUF_RD:
2132                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2133                                         "anonymous buffers from rx_ring %d.\n",
2134                                         ib_ae_rsp->q_id);
2135                 ql_queue_asic_error(qdev);
2136                 break;
2137
2138         default:
2139                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2140                           ib_ae_rsp->event);
2141                 ql_queue_asic_error(qdev);
2142                 break;
2143         }
2144 }
2145
2146 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2147 {
2148         struct ql_adapter *qdev = rx_ring->qdev;
2149         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2150         struct ob_mac_iocb_rsp *net_rsp = NULL;
2151         int count = 0;
2152
2153         struct tx_ring *tx_ring;
2154         /* While there are entries in the completion queue. */
2155         while (prod != rx_ring->cnsmr_idx) {
2156
2157                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2158                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2159                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2160
2161                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2162                 rmb();
2163                 switch (net_rsp->opcode) {
2164
2165                 case OPCODE_OB_MAC_TSO_IOCB:
2166                 case OPCODE_OB_MAC_IOCB:
2167                         ql_process_mac_tx_intr(qdev, net_rsp);
2168                         break;
2169                 default:
2170                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2171                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2172                                      net_rsp->opcode);
2173                 }
2174                 count++;
2175                 ql_update_cq(rx_ring);
2176                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2177         }
2178         if (!net_rsp)
2179                 return 0;
2180         ql_write_cq_idx(rx_ring);
2181         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2182         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2183                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2184                         /*
2185                          * The queue got stopped because the tx_ring was full.
2186                          * Wake it up, because it's now at least 25% empty.
2187                          */
2188                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2189         }
2190
2191         return count;
2192 }
2193
2194 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2195 {
2196         struct ql_adapter *qdev = rx_ring->qdev;
2197         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2198         struct ql_net_rsp_iocb *net_rsp;
2199         int count = 0;
2200
2201         /* While there are entries in the completion queue. */
2202         while (prod != rx_ring->cnsmr_idx) {
2203
2204                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2207
2208                 net_rsp = rx_ring->curr_entry;
2209                 rmb();
2210                 switch (net_rsp->opcode) {
2211                 case OPCODE_IB_MAC_IOCB:
2212                         ql_process_mac_rx_intr(qdev, rx_ring,
2213                                                (struct ib_mac_iocb_rsp *)
2214                                                net_rsp);
2215                         break;
2216
2217                 case OPCODE_IB_AE_IOCB:
2218                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2219                                                 net_rsp);
2220                         break;
2221                 default:
2222                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2223                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2224                                      net_rsp->opcode);
2225                         break;
2226                 }
2227                 count++;
2228                 ql_update_cq(rx_ring);
2229                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230                 if (count == budget)
2231                         break;
2232         }
2233         ql_update_buffer_queues(qdev, rx_ring);
2234         ql_write_cq_idx(rx_ring);
2235         return count;
2236 }
2237
2238 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2239 {
2240         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2241         struct ql_adapter *qdev = rx_ring->qdev;
2242         struct rx_ring *trx_ring;
2243         int i, work_done = 0;
2244         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2245
2246         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2247                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2248
2249         /* Service the TX rings first.  They start
2250          * right after the RSS rings. */
2251         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2252                 trx_ring = &qdev->rx_ring[i];
2253                 /* If this TX completion ring belongs to this vector and
2254                  * it's not empty then service it.
2255                  */
2256                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2257                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2258                                         trx_ring->cnsmr_idx)) {
2259                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2260                                      "%s: Servicing TX completion ring %d.\n",
2261                                      __func__, trx_ring->cq_id);
2262                         ql_clean_outbound_rx_ring(trx_ring);
2263                 }
2264         }
2265
2266         /*
2267          * Now service the RSS ring if it's active.
2268          */
2269         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2270                                         rx_ring->cnsmr_idx) {
2271                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2272                              "%s: Servicing RX completion ring %d.\n",
2273                              __func__, rx_ring->cq_id);
2274                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2275         }
2276
2277         if (work_done < budget) {
2278                 napi_complete(napi);
2279                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2280         }
2281         return work_done;
2282 }
2283
2284 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2285 {
2286         struct ql_adapter *qdev = netdev_priv(ndev);
2287
2288         if (features & NETIF_F_HW_VLAN_RX) {
2289                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2290                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2291         } else {
2292                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2293         }
2294 }
2295
2296 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2297         netdev_features_t features)
2298 {
2299         /*
2300          * Since there is no support for separate rx/tx vlan accel
2301          * enable/disable make sure tx flag is always in same state as rx.
2302          */
2303         if (features & NETIF_F_HW_VLAN_RX)
2304                 features |= NETIF_F_HW_VLAN_TX;
2305         else
2306                 features &= ~NETIF_F_HW_VLAN_TX;
2307
2308         return features;
2309 }
2310
2311 static int qlge_set_features(struct net_device *ndev,
2312         netdev_features_t features)
2313 {
2314         netdev_features_t changed = ndev->features ^ features;
2315
2316         if (changed & NETIF_F_HW_VLAN_RX)
2317                 qlge_vlan_mode(ndev, features);
2318
2319         return 0;
2320 }
2321
2322 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2323 {
2324         u32 enable_bit = MAC_ADDR_E;
2325         int err;
2326
2327         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2328                                   MAC_ADDR_TYPE_VLAN, vid);
2329         if (err)
2330                 netif_err(qdev, ifup, qdev->ndev,
2331                           "Failed to init vlan address.\n");
2332         return err;
2333 }
2334
2335 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2336 {
2337         struct ql_adapter *qdev = netdev_priv(ndev);
2338         int status;
2339         int err;
2340
2341         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2342         if (status)
2343                 return status;
2344
2345         err = __qlge_vlan_rx_add_vid(qdev, vid);
2346         set_bit(vid, qdev->active_vlans);
2347
2348         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2349
2350         return err;
2351 }
2352
2353 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2354 {
2355         u32 enable_bit = 0;
2356         int err;
2357
2358         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2359                                   MAC_ADDR_TYPE_VLAN, vid);
2360         if (err)
2361                 netif_err(qdev, ifup, qdev->ndev,
2362                           "Failed to clear vlan address.\n");
2363         return err;
2364 }
2365
2366 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2367 {
2368         struct ql_adapter *qdev = netdev_priv(ndev);
2369         int status;
2370         int err;
2371
2372         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373         if (status)
2374                 return status;
2375
2376         err = __qlge_vlan_rx_kill_vid(qdev, vid);
2377         clear_bit(vid, qdev->active_vlans);
2378
2379         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2380
2381         return err;
2382 }
2383
2384 static void qlge_restore_vlan(struct ql_adapter *qdev)
2385 {
2386         int status;
2387         u16 vid;
2388
2389         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2390         if (status)
2391                 return;
2392
2393         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2394                 __qlge_vlan_rx_add_vid(qdev, vid);
2395
2396         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2397 }
2398
2399 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2400 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2401 {
2402         struct rx_ring *rx_ring = dev_id;
2403         napi_schedule(&rx_ring->napi);
2404         return IRQ_HANDLED;
2405 }
2406
2407 /* This handles a fatal error, MPI activity, and the default
2408  * rx_ring in an MSI-X multiple vector environment.
2409  * In MSI/Legacy environment it also process the rest of
2410  * the rx_rings.
2411  */
2412 static irqreturn_t qlge_isr(int irq, void *dev_id)
2413 {
2414         struct rx_ring *rx_ring = dev_id;
2415         struct ql_adapter *qdev = rx_ring->qdev;
2416         struct intr_context *intr_context = &qdev->intr_context[0];
2417         u32 var;
2418         int work_done = 0;
2419
2420         spin_lock(&qdev->hw_lock);
2421         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2422                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2423                              "Shared Interrupt, Not ours!\n");
2424                 spin_unlock(&qdev->hw_lock);
2425                 return IRQ_NONE;
2426         }
2427         spin_unlock(&qdev->hw_lock);
2428
2429         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2430
2431         /*
2432          * Check for fatal error.
2433          */
2434         if (var & STS_FE) {
2435                 ql_queue_asic_error(qdev);
2436                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2437                 var = ql_read32(qdev, ERR_STS);
2438                 netdev_err(qdev->ndev, "Resetting chip. "
2439                                         "Error Status Register = 0x%x\n", var);
2440                 return IRQ_HANDLED;
2441         }
2442
2443         /*
2444          * Check MPI processor activity.
2445          */
2446         if ((var & STS_PI) &&
2447                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2448                 /*
2449                  * We've got an async event or mailbox completion.
2450                  * Handle it and clear the source of the interrupt.
2451                  */
2452                 netif_err(qdev, intr, qdev->ndev,
2453                           "Got MPI processor interrupt.\n");
2454                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2455                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2456                 queue_delayed_work_on(smp_processor_id(),
2457                                 qdev->workqueue, &qdev->mpi_work, 0);
2458                 work_done++;
2459         }
2460
2461         /*
2462          * Get the bit-mask that shows the active queues for this
2463          * pass.  Compare it to the queues that this irq services
2464          * and call napi if there's a match.
2465          */
2466         var = ql_read32(qdev, ISR1);
2467         if (var & intr_context->irq_mask) {
2468                 netif_info(qdev, intr, qdev->ndev,
2469                            "Waking handler for rx_ring[0].\n");
2470                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2471                 napi_schedule(&rx_ring->napi);
2472                 work_done++;
2473         }
2474         ql_enable_completion_interrupt(qdev, intr_context->intr);
2475         return work_done ? IRQ_HANDLED : IRQ_NONE;
2476 }
2477
2478 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2479 {
2480
2481         if (skb_is_gso(skb)) {
2482                 int err;
2483                 if (skb_header_cloned(skb)) {
2484                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2485                         if (err)
2486                                 return err;
2487                 }
2488
2489                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2490                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2491                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2492                 mac_iocb_ptr->total_hdrs_len =
2493                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2494                 mac_iocb_ptr->net_trans_offset =
2495                     cpu_to_le16(skb_network_offset(skb) |
2496                                 skb_transport_offset(skb)
2497                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2498                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2499                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2500                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2501                         struct iphdr *iph = ip_hdr(skb);
2502                         iph->check = 0;
2503                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2504                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2505                                                                  iph->daddr, 0,
2506                                                                  IPPROTO_TCP,
2507                                                                  0);
2508                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2509                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2510                         tcp_hdr(skb)->check =
2511                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2512                                              &ipv6_hdr(skb)->daddr,
2513                                              0, IPPROTO_TCP, 0);
2514                 }
2515                 return 1;
2516         }
2517         return 0;
2518 }
2519
2520 static void ql_hw_csum_setup(struct sk_buff *skb,
2521                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2522 {
2523         int len;
2524         struct iphdr *iph = ip_hdr(skb);
2525         __sum16 *check;
2526         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2527         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2528         mac_iocb_ptr->net_trans_offset =
2529                 cpu_to_le16(skb_network_offset(skb) |
2530                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2531
2532         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2534         if (likely(iph->protocol == IPPROTO_TCP)) {
2535                 check = &(tcp_hdr(skb)->check);
2536                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2537                 mac_iocb_ptr->total_hdrs_len =
2538                     cpu_to_le16(skb_transport_offset(skb) +
2539                                 (tcp_hdr(skb)->doff << 2));
2540         } else {
2541                 check = &(udp_hdr(skb)->check);
2542                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2543                 mac_iocb_ptr->total_hdrs_len =
2544                     cpu_to_le16(skb_transport_offset(skb) +
2545                                 sizeof(struct udphdr));
2546         }
2547         *check = ~csum_tcpudp_magic(iph->saddr,
2548                                     iph->daddr, len, iph->protocol, 0);
2549 }
2550
2551 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2552 {
2553         struct tx_ring_desc *tx_ring_desc;
2554         struct ob_mac_iocb_req *mac_iocb_ptr;
2555         struct ql_adapter *qdev = netdev_priv(ndev);
2556         int tso;
2557         struct tx_ring *tx_ring;
2558         u32 tx_ring_idx = (u32) skb->queue_mapping;
2559
2560         tx_ring = &qdev->tx_ring[tx_ring_idx];
2561
2562         if (skb_padto(skb, ETH_ZLEN))
2563                 return NETDEV_TX_OK;
2564
2565         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2566                 netif_info(qdev, tx_queued, qdev->ndev,
2567                            "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2568                            __func__, tx_ring_idx);
2569                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2570                 tx_ring->tx_errors++;
2571                 return NETDEV_TX_BUSY;
2572         }
2573         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2574         mac_iocb_ptr = tx_ring_desc->queue_entry;
2575         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2576
2577         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2578         mac_iocb_ptr->tid = tx_ring_desc->index;
2579         /* We use the upper 32-bits to store the tx queue for this IO.
2580          * When we get the completion we can use it to establish the context.
2581          */
2582         mac_iocb_ptr->txq_idx = tx_ring_idx;
2583         tx_ring_desc->skb = skb;
2584
2585         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2586
2587         if (vlan_tx_tag_present(skb)) {
2588                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2590                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2591                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2592         }
2593         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2594         if (tso < 0) {
2595                 dev_kfree_skb_any(skb);
2596                 return NETDEV_TX_OK;
2597         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2598                 ql_hw_csum_setup(skb,
2599                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2600         }
2601         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2602                         NETDEV_TX_OK) {
2603                 netif_err(qdev, tx_queued, qdev->ndev,
2604                           "Could not map the segments.\n");
2605                 tx_ring->tx_errors++;
2606                 return NETDEV_TX_BUSY;
2607         }
2608         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2609         tx_ring->prod_idx++;
2610         if (tx_ring->prod_idx == tx_ring->wq_len)
2611                 tx_ring->prod_idx = 0;
2612         wmb();
2613
2614         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2615         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2616                      "tx queued, slot %d, len %d\n",
2617                      tx_ring->prod_idx, skb->len);
2618
2619         atomic_dec(&tx_ring->tx_count);
2620
2621         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2623                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2624                         /*
2625                          * The queue got stopped because the tx_ring was full.
2626                          * Wake it up, because it's now at least 25% empty.
2627                          */
2628                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2629         }
2630         return NETDEV_TX_OK;
2631 }
2632
2633
2634 static void ql_free_shadow_space(struct ql_adapter *qdev)
2635 {
2636         if (qdev->rx_ring_shadow_reg_area) {
2637                 pci_free_consistent(qdev->pdev,
2638                                     PAGE_SIZE,
2639                                     qdev->rx_ring_shadow_reg_area,
2640                                     qdev->rx_ring_shadow_reg_dma);
2641                 qdev->rx_ring_shadow_reg_area = NULL;
2642         }
2643         if (qdev->tx_ring_shadow_reg_area) {
2644                 pci_free_consistent(qdev->pdev,
2645                                     PAGE_SIZE,
2646                                     qdev->tx_ring_shadow_reg_area,
2647                                     qdev->tx_ring_shadow_reg_dma);
2648                 qdev->tx_ring_shadow_reg_area = NULL;
2649         }
2650 }
2651
2652 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2653 {
2654         qdev->rx_ring_shadow_reg_area =
2655             pci_alloc_consistent(qdev->pdev,
2656                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2657         if (qdev->rx_ring_shadow_reg_area == NULL) {
2658                 netif_err(qdev, ifup, qdev->ndev,
2659                           "Allocation of RX shadow space failed.\n");
2660                 return -ENOMEM;
2661         }
2662         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2663         qdev->tx_ring_shadow_reg_area =
2664             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2665                                  &qdev->tx_ring_shadow_reg_dma);
2666         if (qdev->tx_ring_shadow_reg_area == NULL) {
2667                 netif_err(qdev, ifup, qdev->ndev,
2668                           "Allocation of TX shadow space failed.\n");
2669                 goto err_wqp_sh_area;
2670         }
2671         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2672         return 0;
2673
2674 err_wqp_sh_area:
2675         pci_free_consistent(qdev->pdev,
2676                             PAGE_SIZE,
2677                             qdev->rx_ring_shadow_reg_area,
2678                             qdev->rx_ring_shadow_reg_dma);
2679         return -ENOMEM;
2680 }
2681
2682 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2683 {
2684         struct tx_ring_desc *tx_ring_desc;
2685         int i;
2686         struct ob_mac_iocb_req *mac_iocb_ptr;
2687
2688         mac_iocb_ptr = tx_ring->wq_base;
2689         tx_ring_desc = tx_ring->q;
2690         for (i = 0; i < tx_ring->wq_len; i++) {
2691                 tx_ring_desc->index = i;
2692                 tx_ring_desc->skb = NULL;
2693                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2694                 mac_iocb_ptr++;
2695                 tx_ring_desc++;
2696         }
2697         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2698 }
2699
2700 static void ql_free_tx_resources(struct ql_adapter *qdev,
2701                                  struct tx_ring *tx_ring)
2702 {
2703         if (tx_ring->wq_base) {
2704                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2705                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2706                 tx_ring->wq_base = NULL;
2707         }
2708         kfree(tx_ring->q);
2709         tx_ring->q = NULL;
2710 }
2711
2712 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2713                                  struct tx_ring *tx_ring)
2714 {
2715         tx_ring->wq_base =
2716             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2717                                  &tx_ring->wq_base_dma);
2718
2719         if ((tx_ring->wq_base == NULL) ||
2720             tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2721                 goto pci_alloc_err;
2722
2723         tx_ring->q =
2724             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2725         if (tx_ring->q == NULL)
2726                 goto err;
2727
2728         return 0;
2729 err:
2730         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2731                             tx_ring->wq_base, tx_ring->wq_base_dma);
2732         tx_ring->wq_base = NULL;
2733 pci_alloc_err:
2734         netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2735         return -ENOMEM;
2736 }
2737
2738 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2739 {
2740         struct bq_desc *lbq_desc;
2741
2742         uint32_t  curr_idx, clean_idx;
2743
2744         curr_idx = rx_ring->lbq_curr_idx;
2745         clean_idx = rx_ring->lbq_clean_idx;
2746         while (curr_idx != clean_idx) {
2747                 lbq_desc = &rx_ring->lbq[curr_idx];
2748
2749                 if (lbq_desc->p.pg_chunk.last_flag) {
2750                         pci_unmap_page(qdev->pdev,
2751                                 lbq_desc->p.pg_chunk.map,
2752                                 ql_lbq_block_size(qdev),
2753                                        PCI_DMA_FROMDEVICE);
2754                         lbq_desc->p.pg_chunk.last_flag = 0;
2755                 }
2756
2757                 put_page(lbq_desc->p.pg_chunk.page);
2758                 lbq_desc->p.pg_chunk.page = NULL;
2759
2760                 if (++curr_idx == rx_ring->lbq_len)
2761                         curr_idx = 0;
2762
2763         }
2764 }
2765
2766 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2767 {
2768         int i;
2769         struct bq_desc *sbq_desc;
2770
2771         for (i = 0; i < rx_ring->sbq_len; i++) {
2772                 sbq_desc = &rx_ring->sbq[i];
2773                 if (sbq_desc == NULL) {
2774                         netif_err(qdev, ifup, qdev->ndev,
2775                                   "sbq_desc %d is NULL.\n", i);
2776                         return;
2777                 }
2778                 if (sbq_desc->p.skb) {
2779                         pci_unmap_single(qdev->pdev,
2780                                          dma_unmap_addr(sbq_desc, mapaddr),
2781                                          dma_unmap_len(sbq_desc, maplen),
2782                                          PCI_DMA_FROMDEVICE);
2783                         dev_kfree_skb(sbq_desc->p.skb);
2784                         sbq_desc->p.skb = NULL;
2785                 }
2786         }
2787 }
2788
2789 /* Free all large and small rx buffers associated
2790  * with the completion queues for this device.
2791  */
2792 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2793 {
2794         int i;
2795         struct rx_ring *rx_ring;
2796
2797         for (i = 0; i < qdev->rx_ring_count; i++) {
2798                 rx_ring = &qdev->rx_ring[i];
2799                 if (rx_ring->lbq)
2800                         ql_free_lbq_buffers(qdev, rx_ring);
2801                 if (rx_ring->sbq)
2802                         ql_free_sbq_buffers(qdev, rx_ring);
2803         }
2804 }
2805
2806 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2807 {
2808         struct rx_ring *rx_ring;
2809         int i;
2810
2811         for (i = 0; i < qdev->rx_ring_count; i++) {
2812                 rx_ring = &qdev->rx_ring[i];
2813                 if (rx_ring->type != TX_Q)
2814                         ql_update_buffer_queues(qdev, rx_ring);
2815         }
2816 }
2817
2818 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2819                                 struct rx_ring *rx_ring)
2820 {
2821         int i;
2822         struct bq_desc *lbq_desc;
2823         __le64 *bq = rx_ring->lbq_base;
2824
2825         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2826         for (i = 0; i < rx_ring->lbq_len; i++) {
2827                 lbq_desc = &rx_ring->lbq[i];
2828                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2829                 lbq_desc->index = i;
2830                 lbq_desc->addr = bq;
2831                 bq++;
2832         }
2833 }
2834
2835 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2836                                 struct rx_ring *rx_ring)
2837 {
2838         int i;
2839         struct bq_desc *sbq_desc;
2840         __le64 *bq = rx_ring->sbq_base;
2841
2842         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2843         for (i = 0; i < rx_ring->sbq_len; i++) {
2844                 sbq_desc = &rx_ring->sbq[i];
2845                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2846                 sbq_desc->index = i;
2847                 sbq_desc->addr = bq;
2848                 bq++;
2849         }
2850 }
2851
2852 static void ql_free_rx_resources(struct ql_adapter *qdev,
2853                                  struct rx_ring *rx_ring)
2854 {
2855         /* Free the small buffer queue. */
2856         if (rx_ring->sbq_base) {
2857                 pci_free_consistent(qdev->pdev,
2858                                     rx_ring->sbq_size,
2859                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2860                 rx_ring->sbq_base = NULL;
2861         }
2862
2863         /* Free the small buffer queue control blocks. */
2864         kfree(rx_ring->sbq);
2865         rx_ring->sbq = NULL;
2866
2867         /* Free the large buffer queue. */
2868         if (rx_ring->lbq_base) {
2869                 pci_free_consistent(qdev->pdev,
2870                                     rx_ring->lbq_size,
2871                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2872                 rx_ring->lbq_base = NULL;
2873         }
2874
2875         /* Free the large buffer queue control blocks. */
2876         kfree(rx_ring->lbq);
2877         rx_ring->lbq = NULL;
2878
2879         /* Free the rx queue. */
2880         if (rx_ring->cq_base) {
2881                 pci_free_consistent(qdev->pdev,
2882                                     rx_ring->cq_size,
2883                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2884                 rx_ring->cq_base = NULL;
2885         }
2886 }
2887
2888 /* Allocate queues and buffers for this completions queue based
2889  * on the values in the parameter structure. */
2890 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2891                                  struct rx_ring *rx_ring)
2892 {
2893
2894         /*
2895          * Allocate the completion queue for this rx_ring.
2896          */
2897         rx_ring->cq_base =
2898             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2899                                  &rx_ring->cq_base_dma);