2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 } while (--wait_count);
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169 int count = UDELAY_COUNT;
172 temp = ql_read32(qdev, reg);
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp & bit)
182 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 int count = UDELAY_COUNT;
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
237 status = ql_wait_cfg(qdev, bit);
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
252 * Wait for the bit to clear after signaling hw.
254 status = ql_wait_cfg(qdev, bit);
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
339 case MAC_ADDR_TYPE_MULTI_MAC:
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
371 case MAC_ADDR_TYPE_CAM_MAC:
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 ql_wait_reg_rdy(qdev,
380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 ql_wait_reg_rdy(qdev,
389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 ql_wait_reg_rdy(qdev,
398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
408 cam_output = (CAM_OUT_ROUTE_NIC |
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
418 case MAC_ADDR_TYPE_VLAN:
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
427 ql_wait_reg_rdy(qdev,
428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 enable_bit); /* enable/disable */
437 case MAC_ADDR_TYPE_MULTI_FLTR:
439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
447 /* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
454 char zero_mac_addr[ETH_ALEN];
458 addr = &qdev->current_mac_addr[0];
459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
479 void ql_link_on(struct ql_adapter *qdev)
481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
486 void ql_link_off(struct ql_adapter *qdev)
488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
509 *value = ql_read32(qdev, RT_DATA);
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
522 int status = -EINVAL; /* Return error if no mask match. */
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 case 0: /* Clear the E-bit on an entry. */
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
630 * incremented every time we queue a worker and decremented every time
631 * a worker finishes. Once it hits zero we enable the interrupt.
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
643 ql_write32(qdev, INTR_EN,
645 var = ql_read32(qdev, STS);
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
653 var = ql_read32(qdev, STS);
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
662 struct intr_context *ctx;
664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
670 ctx = qdev->intr_context + intr;
671 spin_lock(&qdev->hw_lock);
672 if (!atomic_read(&ctx->irq_cnt)) {
673 ql_write32(qdev, INTR_EN,
675 var = ql_read32(qdev, STS);
677 atomic_inc(&ctx->irq_cnt);
678 spin_unlock(&qdev->hw_lock);
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693 ql_enable_completion_interrupt(qdev, i);
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
702 __le16 *flash = (__le16 *)&qdev->flash;
704 status = strncmp((char *)&qdev->flash, str, 4);
706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
748 __le32 *p = (__le32 *)&qdev->flash;
752 /* Get flash offset for function and adjust
756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
782 /* Extract either manufacturer or BOFM modified
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
794 if (!is_valid_ether_addr(mac_addr)) {
795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
800 memcpy(qdev->ndev->dev_addr,
802 qdev->ndev->addr_len);
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
813 __le32 *p = (__le32 *)&qdev->flash;
815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
817 /* Second function's parameters follow the first
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
826 for (i = 0; i < size; i++, p++) {
827 status = ql_read_flash_word(qdev, i+offset, p);
829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
898 *data = ql_read32(qdev, XGMAC_DATA);
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
918 *data = (u64) lo | ((u64) hi << 32);
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
928 * Get MPI firmware version for driver banner
931 status = ql_mb_about_fw(qdev);
934 status = ql_mb_get_fw_state(qdev);
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943 /* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1007 /* Turn on jumbo. */
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
1046 dma_unmap_addr(lbq_desc, mapaddr),
1047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1050 /* If it's the last chunk of our master page then
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1092 if (!rx_ring->pg_chunk.page) {
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
1098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
1109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122 /* Adjust the master page chunk for next
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
1141 struct bq_desc *lbq_desc;
1145 while (rx_ring->lbq_free_cnt > 32) {
1146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1150 lbq_desc = &rx_ring->lbq[clean_idx];
1151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152 rx_ring->lbq_clean_idx = clean_idx;
1153 netif_err(qdev, ifup, qdev->ndev,
1154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
1161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
1163 rx_ring->lbq_buf_size);
1164 *lbq_desc->addr = cpu_to_le64(map);
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
1170 if (clean_idx == rx_ring->lbq_len)
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
1178 rx_ring->lbq_free_cnt -= 16;
1181 if (start_idx != clean_idx) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
1185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
1195 struct bq_desc *sbq_desc;
1199 while (rx_ring->sbq_free_cnt > 16) {
1200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201 sbq_desc = &rx_ring->sbq[clean_idx];
1202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1205 if (sbq_desc->p.skb == NULL) {
1206 netif_printk(qdev, rx_status, KERN_DEBUG,
1208 "sbq: getting new skb for index %d.\n",
1211 netdev_alloc_skb(qdev->ndev,
1213 if (sbq_desc->p.skb == NULL) {
1214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
1216 rx_ring->sbq_clean_idx = clean_idx;
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1239 if (clean_idx == rx_ring->sbq_len)
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 "unmapping OAL area.\n");
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1291 dma_unmap_len(&tx_ring_desc->map[i],
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314 int len = skb_headlen(skb);
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1325 * Map the skb buffer first.
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1334 return NETDEV_TX_BUSY;
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1384 tbd->addr = cpu_to_le64(map);
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1439 struct nic_stats *stats = &qdev->nic_stats;
1441 stats->rx_err_count++;
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445 stats->rx_code_err++;
1447 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448 stats->rx_oversize_err++;
1450 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451 stats->rx_undersize_err++;
1453 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454 stats->rx_preamble_err++;
1456 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457 stats->rx_frame_len_err++;
1459 case IB_MAC_IOCB_RSP_ERR_CRC:
1460 stats->rx_crc_err++;
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct napi_struct *napi = &rx_ring->napi;
1477 napi->dev = qdev->ndev;
1479 skb = napi_get_frags(napi);
1481 netif_err(qdev, drv, qdev->ndev,
1482 "Couldn't get an skb, exiting.\n");
1483 rx_ring->rx_dropped++;
1484 put_page(lbq_desc->p.pg_chunk.page);
1487 prefetch(lbq_desc->p.pg_chunk.va);
1488 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1489 lbq_desc->p.pg_chunk.page,
1490 lbq_desc->p.pg_chunk.offset,
1494 skb->data_len += length;
1495 skb->truesize += length;
1496 skb_shinfo(skb)->nr_frags++;
1498 rx_ring->rx_packets++;
1499 rx_ring->rx_bytes += length;
1500 skb->ip_summed = CHECKSUM_UNNECESSARY;
1501 skb_record_rx_queue(skb, rx_ring->cq_id);
1502 if (vlan_id != 0xffff)
1503 __vlan_hwaccel_put_tag(skb, vlan_id);
1504 napi_gro_frags(napi);
1507 /* Process an inbound completion from an rx ring. */
1508 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1509 struct rx_ring *rx_ring,
1510 struct ib_mac_iocb_rsp *ib_mac_rsp,
1514 struct net_device *ndev = qdev->ndev;
1515 struct sk_buff *skb = NULL;
1517 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1518 struct napi_struct *napi = &rx_ring->napi;
1520 skb = netdev_alloc_skb(ndev, length);
1522 netif_err(qdev, drv, qdev->ndev,
1523 "Couldn't get an skb, need to unwind!.\n");
1524 rx_ring->rx_dropped++;
1525 put_page(lbq_desc->p.pg_chunk.page);
1529 addr = lbq_desc->p.pg_chunk.va;
1532 /* The max framesize filter on this chip is set higher than
1533 * MTU since FCoE uses 2k frames.
1535 if (skb->len > ndev->mtu + ETH_HLEN) {
1536 netif_err(qdev, drv, qdev->ndev,
1537 "Segment too small, dropping.\n");
1538 rx_ring->rx_dropped++;
1541 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1542 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1543 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1545 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1546 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1548 skb->len += length-ETH_HLEN;
1549 skb->data_len += length-ETH_HLEN;
1550 skb->truesize += length-ETH_HLEN;
1552 rx_ring->rx_packets++;
1553 rx_ring->rx_bytes += skb->len;
1554 skb->protocol = eth_type_trans(skb, ndev);
1555 skb_checksum_none_assert(skb);
1557 if ((ndev->features & NETIF_F_RXCSUM) &&
1558 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1560 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1561 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562 "TCP checksum done!\n");
1563 skb->ip_summed = CHECKSUM_UNNECESSARY;
1564 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1565 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1566 /* Unfragmented ipv4 UDP frame. */
1568 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1569 if (!(iph->frag_off &
1570 htons(IP_MF|IP_OFFSET))) {
1571 skb->ip_summed = CHECKSUM_UNNECESSARY;
1572 netif_printk(qdev, rx_status, KERN_DEBUG,
1574 "UDP checksum done!\n");
1579 skb_record_rx_queue(skb, rx_ring->cq_id);
1580 if (vlan_id != 0xffff)
1581 __vlan_hwaccel_put_tag(skb, vlan_id);
1582 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1583 napi_gro_receive(napi, skb);
1585 netif_receive_skb(skb);
1588 dev_kfree_skb_any(skb);
1589 put_page(lbq_desc->p.pg_chunk.page);
1592 /* Process an inbound completion from an rx ring. */
1593 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1594 struct rx_ring *rx_ring,
1595 struct ib_mac_iocb_rsp *ib_mac_rsp,
1599 struct net_device *ndev = qdev->ndev;
1600 struct sk_buff *skb = NULL;
1601 struct sk_buff *new_skb = NULL;
1602 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1604 skb = sbq_desc->p.skb;
1605 /* Allocate new_skb and copy */
1606 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1607 if (new_skb == NULL) {
1608 netif_err(qdev, probe, qdev->ndev,
1609 "No skb available, drop the packet.\n");
1610 rx_ring->rx_dropped++;
1613 skb_reserve(new_skb, NET_IP_ALIGN);
1614 memcpy(skb_put(new_skb, length), skb->data, length);
1617 /* loopback self test for ethtool */
1618 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1619 ql_check_lb_frame(qdev, skb);
1620 dev_kfree_skb_any(skb);
1624 /* The max framesize filter on this chip is set higher than
1625 * MTU since FCoE uses 2k frames.
1627 if (skb->len > ndev->mtu + ETH_HLEN) {
1628 dev_kfree_skb_any(skb);
1629 rx_ring->rx_dropped++;
1633 prefetch(skb->data);
1634 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1635 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1637 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1638 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1639 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1644 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1645 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646 "Promiscuous Packet.\n");
1648 rx_ring->rx_packets++;
1649 rx_ring->rx_bytes += skb->len;
1650 skb->protocol = eth_type_trans(skb, ndev);
1651 skb_checksum_none_assert(skb);
1653 /* If rx checksum is on, and there are no
1654 * csum or frame errors.
1656 if ((ndev->features & NETIF_F_RXCSUM) &&
1657 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1659 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1660 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1661 "TCP checksum done!\n");
1662 skb->ip_summed = CHECKSUM_UNNECESSARY;
1663 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665 /* Unfragmented ipv4 UDP frame. */
1666 struct iphdr *iph = (struct iphdr *) skb->data;
1667 if (!(iph->frag_off &
1668 htons(IP_MF|IP_OFFSET))) {
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
1670 netif_printk(qdev, rx_status, KERN_DEBUG,
1672 "UDP checksum done!\n");
1677 skb_record_rx_queue(skb, rx_ring->cq_id);
1678 if (vlan_id != 0xffff)
1679 __vlan_hwaccel_put_tag(skb, vlan_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1681 napi_gro_receive(&rx_ring->napi, skb);
1683 netif_receive_skb(skb);
1686 static void ql_realign_skb(struct sk_buff *skb, int len)
1688 void *temp_addr = skb->data;
1690 /* Undo the skb_reserve(skb,32) we did before
1691 * giving to hardware, and realign data on
1692 * a 2-byte boundary.
1694 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1695 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1696 skb_copy_to_linear_data(skb, temp_addr,
1701 * This function builds an skb for the given inbound
1702 * completion. It will be rewritten for readability in the near
1703 * future, but for not it works well.
1705 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1706 struct rx_ring *rx_ring,
1707 struct ib_mac_iocb_rsp *ib_mac_rsp)
1709 struct bq_desc *lbq_desc;
1710 struct bq_desc *sbq_desc;
1711 struct sk_buff *skb = NULL;
1712 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1713 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1716 * Handle the header buffer if present.
1718 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1719 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1720 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1721 "Header of %d bytes in small buffer.\n", hdr_len);
1723 * Headers fit nicely into a small buffer.
1725 sbq_desc = ql_get_curr_sbuf(rx_ring);
1726 pci_unmap_single(qdev->pdev,
1727 dma_unmap_addr(sbq_desc, mapaddr),
1728 dma_unmap_len(sbq_desc, maplen),
1729 PCI_DMA_FROMDEVICE);
1730 skb = sbq_desc->p.skb;
1731 ql_realign_skb(skb, hdr_len);
1732 skb_put(skb, hdr_len);
1733 sbq_desc->p.skb = NULL;
1737 * Handle the data buffer(s).
1739 if (unlikely(!length)) { /* Is there data too? */
1740 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741 "No Data buffer in this packet.\n");
1745 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1746 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1747 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748 "Headers in small, data of %d bytes in small, combine them.\n",
1751 * Data is less than small buffer size so it's
1752 * stuffed in a small buffer.
1753 * For this case we append the data
1754 * from the "data" small buffer to the "header" small
1757 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758 pci_dma_sync_single_for_cpu(qdev->pdev,
1760 (sbq_desc, mapaddr),
1763 PCI_DMA_FROMDEVICE);
1764 memcpy(skb_put(skb, length),
1765 sbq_desc->p.skb->data, length);
1766 pci_dma_sync_single_for_device(qdev->pdev,
1773 PCI_DMA_FROMDEVICE);
1775 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776 "%d bytes in a single small buffer.\n",
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 skb = sbq_desc->p.skb;
1780 ql_realign_skb(skb, length);
1781 skb_put(skb, length);
1782 pci_unmap_single(qdev->pdev,
1783 dma_unmap_addr(sbq_desc,
1785 dma_unmap_len(sbq_desc,
1787 PCI_DMA_FROMDEVICE);
1788 sbq_desc->p.skb = NULL;
1790 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 "Header in small, %d bytes in large. Chain large to small!\n",
1796 * The data is in a single large buffer. We
1797 * chain it to the header buffer's skb and let
1800 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1801 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802 "Chaining page at offset = %d, for %d bytes to skb.\n",
1803 lbq_desc->p.pg_chunk.offset, length);
1804 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1805 lbq_desc->p.pg_chunk.offset,
1808 skb->data_len += length;
1809 skb->truesize += length;
1812 * The headers and data are in a single large buffer. We
1813 * copy it to a new skb and let it go. This can happen with
1814 * jumbo mtu on a non-TCP/UDP frame.
1816 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1817 skb = netdev_alloc_skb(qdev->ndev, length);
1819 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1820 "No skb available, drop the packet.\n");
1823 pci_unmap_page(qdev->pdev,
1824 dma_unmap_addr(lbq_desc,
1826 dma_unmap_len(lbq_desc, maplen),
1827 PCI_DMA_FROMDEVICE);
1828 skb_reserve(skb, NET_IP_ALIGN);
1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1832 skb_fill_page_desc(skb, 0,
1833 lbq_desc->p.pg_chunk.page,
1834 lbq_desc->p.pg_chunk.offset,
1837 skb->data_len += length;
1838 skb->truesize += length;
1840 __pskb_pull_tail(skb,
1841 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1842 VLAN_ETH_HLEN : ETH_HLEN);
1846 * The data is in a chain of large buffers
1847 * pointed to by a small buffer. We loop
1848 * thru and chain them to the our small header
1850 * frags: There are 18 max frags and our small
1851 * buffer will hold 32 of them. The thing is,
1852 * we'll use 3 max for our 9000 byte jumbo
1853 * frames. If the MTU goes up we could
1854 * eventually be in trouble.
1857 sbq_desc = ql_get_curr_sbuf(rx_ring);
1858 pci_unmap_single(qdev->pdev,
1859 dma_unmap_addr(sbq_desc, mapaddr),
1860 dma_unmap_len(sbq_desc, maplen),
1861 PCI_DMA_FROMDEVICE);
1862 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1864 * This is an non TCP/UDP IP frame, so
1865 * the headers aren't split into a small
1866 * buffer. We have to use the small buffer
1867 * that contains our sg list as our skb to
1868 * send upstairs. Copy the sg list here to
1869 * a local buffer and use it to find the
1872 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873 "%d bytes of headers & data in chain of large.\n",
1875 skb = sbq_desc->p.skb;
1876 sbq_desc->p.skb = NULL;
1877 skb_reserve(skb, NET_IP_ALIGN);
1879 while (length > 0) {
1880 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1881 size = (length < rx_ring->lbq_buf_size) ? length :
1882 rx_ring->lbq_buf_size;
1884 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885 "Adding page %d to skb for %d bytes.\n",
1887 skb_fill_page_desc(skb, i,
1888 lbq_desc->p.pg_chunk.page,
1889 lbq_desc->p.pg_chunk.offset,
1892 skb->data_len += size;
1893 skb->truesize += size;
1897 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1898 VLAN_ETH_HLEN : ETH_HLEN);
1903 /* Process an inbound completion from an rx ring. */
1904 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1905 struct rx_ring *rx_ring,
1906 struct ib_mac_iocb_rsp *ib_mac_rsp,
1909 struct net_device *ndev = qdev->ndev;
1910 struct sk_buff *skb = NULL;
1912 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1914 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1915 if (unlikely(!skb)) {
1916 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917 "No skb available, drop packet.\n");
1918 rx_ring->rx_dropped++;
1922 /* The max framesize filter on this chip is set higher than
1923 * MTU since FCoE uses 2k frames.
1925 if (skb->len > ndev->mtu + ETH_HLEN) {
1926 dev_kfree_skb_any(skb);
1927 rx_ring->rx_dropped++;
1931 /* loopback self test for ethtool */
1932 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1933 ql_check_lb_frame(qdev, skb);
1934 dev_kfree_skb_any(skb);
1938 prefetch(skb->data);
1939 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1941 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1942 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1943 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1944 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1945 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1946 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1947 rx_ring->rx_multicast++;
1949 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951 "Promiscuous Packet.\n");
1954 skb->protocol = eth_type_trans(skb, ndev);
1955 skb_checksum_none_assert(skb);
1957 /* If rx checksum is on, and there are no
1958 * csum or frame errors.
1960 if ((ndev->features & NETIF_F_RXCSUM) &&
1961 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1963 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1964 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965 "TCP checksum done!\n");
1966 skb->ip_summed = CHECKSUM_UNNECESSARY;
1967 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1968 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1969 /* Unfragmented ipv4 UDP frame. */
1970 struct iphdr *iph = (struct iphdr *) skb->data;
1971 if (!(iph->frag_off &
1972 htons(IP_MF|IP_OFFSET))) {
1973 skb->ip_summed = CHECKSUM_UNNECESSARY;
1974 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1975 "TCP checksum done!\n");
1980 rx_ring->rx_packets++;
1981 rx_ring->rx_bytes += skb->len;
1982 skb_record_rx_queue(skb, rx_ring->cq_id);
1983 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1984 __vlan_hwaccel_put_tag(skb, vlan_id);
1985 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1986 napi_gro_receive(&rx_ring->napi, skb);
1988 netif_receive_skb(skb);
1991 /* Process an inbound completion from an rx ring. */
1992 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1993 struct rx_ring *rx_ring,
1994 struct ib_mac_iocb_rsp *ib_mac_rsp)
1996 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1997 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1998 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1999 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2001 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2003 /* Frame error, so drop the packet. */
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006 return (unsigned long)length;
2009 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2010 /* The data and headers are split into
2013 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2015 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2016 /* The data fit in a single small buffer.
2017 * Allocate a new skb, copy the data and
2018 * return the buffer to the free pool.
2020 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2022 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2023 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2024 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2025 /* TCP packet in a page chunk that's been checksummed.
2026 * Tack it on to our GRO skb and let it go.
2028 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2030 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2031 /* Non-TCP packet in a page chunk. Allocate an
2032 * skb, tack it on frags, and send it up.
2034 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2037 /* Non-TCP/UDP large frames that span multiple buffers
2038 * can be processed corrrectly by the split frame logic.
2040 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2044 return (unsigned long)length;
2047 /* Process an outbound completion from an rx ring. */
2048 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2049 struct ob_mac_iocb_rsp *mac_rsp)
2051 struct tx_ring *tx_ring;
2052 struct tx_ring_desc *tx_ring_desc;
2054 QL_DUMP_OB_MAC_RSP(mac_rsp);
2055 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2056 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2057 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2058 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2059 tx_ring->tx_packets++;
2060 dev_kfree_skb(tx_ring_desc->skb);
2061 tx_ring_desc->skb = NULL;
2063 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2066 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2067 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2068 netif_warn(qdev, tx_done, qdev->ndev,
2069 "Total descriptor length did not match transfer length.\n");
2071 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2072 netif_warn(qdev, tx_done, qdev->ndev,
2073 "Frame too short to be valid, not sent.\n");
2075 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2076 netif_warn(qdev, tx_done, qdev->ndev,
2077 "Frame too long, but sent anyway.\n");
2079 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2080 netif_warn(qdev, tx_done, qdev->ndev,
2081 "PCI backplane error. Frame not sent.\n");
2084 atomic_inc(&tx_ring->tx_count);
2087 /* Fire up a handler to reset the MPI processor. */
2088 void ql_queue_fw_error(struct ql_adapter *qdev)
2091 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2094 void ql_queue_asic_error(struct ql_adapter *qdev)
2097 ql_disable_interrupts(qdev);
2098 /* Clear adapter up bit to signal the recovery
2099 * process that it shouldn't kill the reset worker
2102 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2103 /* Set asic recovery bit to indicate reset process that we are
2104 * in fatal error recovery process rather than normal close
2106 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2107 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2110 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2111 struct ib_ae_iocb_rsp *ib_ae_rsp)
2113 switch (ib_ae_rsp->event) {
2114 case MGMT_ERR_EVENT:
2115 netif_err(qdev, rx_err, qdev->ndev,
2116 "Management Processor Fatal Error.\n");
2117 ql_queue_fw_error(qdev);
2120 case CAM_LOOKUP_ERR_EVENT:
2121 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2122 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2123 ql_queue_asic_error(qdev);
2126 case SOFT_ECC_ERROR_EVENT:
2127 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2128 ql_queue_asic_error(qdev);
2131 case PCI_ERR_ANON_BUF_RD:
2132 netdev_err(qdev->ndev, "PCI error occurred when reading "
2133 "anonymous buffers from rx_ring %d.\n",
2135 ql_queue_asic_error(qdev);
2139 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2141 ql_queue_asic_error(qdev);
2146 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2148 struct ql_adapter *qdev = rx_ring->qdev;
2149 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2150 struct ob_mac_iocb_rsp *net_rsp = NULL;
2153 struct tx_ring *tx_ring;
2154 /* While there are entries in the completion queue. */
2155 while (prod != rx_ring->cnsmr_idx) {
2157 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2158 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2159 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2161 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2163 switch (net_rsp->opcode) {
2165 case OPCODE_OB_MAC_TSO_IOCB:
2166 case OPCODE_OB_MAC_IOCB:
2167 ql_process_mac_tx_intr(qdev, net_rsp);
2170 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2171 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2175 ql_update_cq(rx_ring);
2176 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2180 ql_write_cq_idx(rx_ring);
2181 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2182 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2183 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2185 * The queue got stopped because the tx_ring was full.
2186 * Wake it up, because it's now at least 25% empty.
2188 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2194 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2196 struct ql_adapter *qdev = rx_ring->qdev;
2197 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2198 struct ql_net_rsp_iocb *net_rsp;
2201 /* While there are entries in the completion queue. */
2202 while (prod != rx_ring->cnsmr_idx) {
2204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2208 net_rsp = rx_ring->curr_entry;
2210 switch (net_rsp->opcode) {
2211 case OPCODE_IB_MAC_IOCB:
2212 ql_process_mac_rx_intr(qdev, rx_ring,
2213 (struct ib_mac_iocb_rsp *)
2217 case OPCODE_IB_AE_IOCB:
2218 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2222 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2223 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2228 ql_update_cq(rx_ring);
2229 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230 if (count == budget)
2233 ql_update_buffer_queues(qdev, rx_ring);
2234 ql_write_cq_idx(rx_ring);
2238 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2240 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2241 struct ql_adapter *qdev = rx_ring->qdev;
2242 struct rx_ring *trx_ring;
2243 int i, work_done = 0;
2244 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2246 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2247 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2249 /* Service the TX rings first. They start
2250 * right after the RSS rings. */
2251 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2252 trx_ring = &qdev->rx_ring[i];
2253 /* If this TX completion ring belongs to this vector and
2254 * it's not empty then service it.
2256 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2257 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2258 trx_ring->cnsmr_idx)) {
2259 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2260 "%s: Servicing TX completion ring %d.\n",
2261 __func__, trx_ring->cq_id);
2262 ql_clean_outbound_rx_ring(trx_ring);
2267 * Now service the RSS ring if it's active.
2269 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2270 rx_ring->cnsmr_idx) {
2271 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2272 "%s: Servicing RX completion ring %d.\n",
2273 __func__, rx_ring->cq_id);
2274 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2277 if (work_done < budget) {
2278 napi_complete(napi);
2279 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2284 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2286 struct ql_adapter *qdev = netdev_priv(ndev);
2288 if (features & NETIF_F_HW_VLAN_RX) {
2289 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2290 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2292 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2296 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2297 netdev_features_t features)
2300 * Since there is no support for separate rx/tx vlan accel
2301 * enable/disable make sure tx flag is always in same state as rx.
2303 if (features & NETIF_F_HW_VLAN_RX)
2304 features |= NETIF_F_HW_VLAN_TX;
2306 features &= ~NETIF_F_HW_VLAN_TX;
2311 static int qlge_set_features(struct net_device *ndev,
2312 netdev_features_t features)
2314 netdev_features_t changed = ndev->features ^ features;
2316 if (changed & NETIF_F_HW_VLAN_RX)
2317 qlge_vlan_mode(ndev, features);
2322 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2324 u32 enable_bit = MAC_ADDR_E;
2327 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to init vlan address.\n");
2335 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2337 struct ql_adapter *qdev = netdev_priv(ndev);
2341 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2345 err = __qlge_vlan_rx_add_vid(qdev, vid);
2346 set_bit(vid, qdev->active_vlans);
2348 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2353 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2358 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2359 MAC_ADDR_TYPE_VLAN, vid);
2361 netif_err(qdev, ifup, qdev->ndev,
2362 "Failed to clear vlan address.\n");
2366 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2368 struct ql_adapter *qdev = netdev_priv(ndev);
2372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2377 clear_bit(vid, qdev->active_vlans);
2379 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2384 static void qlge_restore_vlan(struct ql_adapter *qdev)
2389 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2393 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2394 __qlge_vlan_rx_add_vid(qdev, vid);
2396 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2399 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2400 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2402 struct rx_ring *rx_ring = dev_id;
2403 napi_schedule(&rx_ring->napi);
2407 /* This handles a fatal error, MPI activity, and the default
2408 * rx_ring in an MSI-X multiple vector environment.
2409 * In MSI/Legacy environment it also process the rest of
2412 static irqreturn_t qlge_isr(int irq, void *dev_id)
2414 struct rx_ring *rx_ring = dev_id;
2415 struct ql_adapter *qdev = rx_ring->qdev;
2416 struct intr_context *intr_context = &qdev->intr_context[0];
2420 spin_lock(&qdev->hw_lock);
2421 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2422 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2423 "Shared Interrupt, Not ours!\n");
2424 spin_unlock(&qdev->hw_lock);
2427 spin_unlock(&qdev->hw_lock);
2429 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2432 * Check for fatal error.
2435 ql_queue_asic_error(qdev);
2436 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2437 var = ql_read32(qdev, ERR_STS);
2438 netdev_err(qdev->ndev, "Resetting chip. "
2439 "Error Status Register = 0x%x\n", var);
2444 * Check MPI processor activity.
2446 if ((var & STS_PI) &&
2447 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2449 * We've got an async event or mailbox completion.
2450 * Handle it and clear the source of the interrupt.
2452 netif_err(qdev, intr, qdev->ndev,
2453 "Got MPI processor interrupt.\n");
2454 ql_disable_completion_interrupt(qdev, intr_context->intr);
2455 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2456 queue_delayed_work_on(smp_processor_id(),
2457 qdev->workqueue, &qdev->mpi_work, 0);
2462 * Get the bit-mask that shows the active queues for this
2463 * pass. Compare it to the queues that this irq services
2464 * and call napi if there's a match.
2466 var = ql_read32(qdev, ISR1);
2467 if (var & intr_context->irq_mask) {
2468 netif_info(qdev, intr, qdev->ndev,
2469 "Waking handler for rx_ring[0].\n");
2470 ql_disable_completion_interrupt(qdev, intr_context->intr);
2471 napi_schedule(&rx_ring->napi);
2474 ql_enable_completion_interrupt(qdev, intr_context->intr);
2475 return work_done ? IRQ_HANDLED : IRQ_NONE;
2478 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2481 if (skb_is_gso(skb)) {
2483 if (skb_header_cloned(skb)) {
2484 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2489 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2490 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2491 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2492 mac_iocb_ptr->total_hdrs_len =
2493 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2494 mac_iocb_ptr->net_trans_offset =
2495 cpu_to_le16(skb_network_offset(skb) |
2496 skb_transport_offset(skb)
2497 << OB_MAC_TRANSPORT_HDR_SHIFT);
2498 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2499 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2500 if (likely(skb->protocol == htons(ETH_P_IP))) {
2501 struct iphdr *iph = ip_hdr(skb);
2503 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2504 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2508 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2510 tcp_hdr(skb)->check =
2511 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2512 &ipv6_hdr(skb)->daddr,
2520 static void ql_hw_csum_setup(struct sk_buff *skb,
2521 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2524 struct iphdr *iph = ip_hdr(skb);
2526 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2527 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2528 mac_iocb_ptr->net_trans_offset =
2529 cpu_to_le16(skb_network_offset(skb) |
2530 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2532 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2534 if (likely(iph->protocol == IPPROTO_TCP)) {
2535 check = &(tcp_hdr(skb)->check);
2536 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2537 mac_iocb_ptr->total_hdrs_len =
2538 cpu_to_le16(skb_transport_offset(skb) +
2539 (tcp_hdr(skb)->doff << 2));
2541 check = &(udp_hdr(skb)->check);
2542 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2543 mac_iocb_ptr->total_hdrs_len =
2544 cpu_to_le16(skb_transport_offset(skb) +
2545 sizeof(struct udphdr));
2547 *check = ~csum_tcpudp_magic(iph->saddr,
2548 iph->daddr, len, iph->protocol, 0);
2551 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2553 struct tx_ring_desc *tx_ring_desc;
2554 struct ob_mac_iocb_req *mac_iocb_ptr;
2555 struct ql_adapter *qdev = netdev_priv(ndev);
2557 struct tx_ring *tx_ring;
2558 u32 tx_ring_idx = (u32) skb->queue_mapping;
2560 tx_ring = &qdev->tx_ring[tx_ring_idx];
2562 if (skb_padto(skb, ETH_ZLEN))
2563 return NETDEV_TX_OK;
2565 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2566 netif_info(qdev, tx_queued, qdev->ndev,
2567 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2568 __func__, tx_ring_idx);
2569 netif_stop_subqueue(ndev, tx_ring->wq_id);
2570 tx_ring->tx_errors++;
2571 return NETDEV_TX_BUSY;
2573 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2574 mac_iocb_ptr = tx_ring_desc->queue_entry;
2575 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2577 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2578 mac_iocb_ptr->tid = tx_ring_desc->index;
2579 /* We use the upper 32-bits to store the tx queue for this IO.
2580 * When we get the completion we can use it to establish the context.
2582 mac_iocb_ptr->txq_idx = tx_ring_idx;
2583 tx_ring_desc->skb = skb;
2585 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2587 if (vlan_tx_tag_present(skb)) {
2588 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2590 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2591 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2593 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2595 dev_kfree_skb_any(skb);
2596 return NETDEV_TX_OK;
2597 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2598 ql_hw_csum_setup(skb,
2599 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2601 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2603 netif_err(qdev, tx_queued, qdev->ndev,
2604 "Could not map the segments.\n");
2605 tx_ring->tx_errors++;
2606 return NETDEV_TX_BUSY;
2608 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2609 tx_ring->prod_idx++;
2610 if (tx_ring->prod_idx == tx_ring->wq_len)
2611 tx_ring->prod_idx = 0;
2614 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2615 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2616 "tx queued, slot %d, len %d\n",
2617 tx_ring->prod_idx, skb->len);
2619 atomic_dec(&tx_ring->tx_count);
2621 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622 netif_stop_subqueue(ndev, tx_ring->wq_id);
2623 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2625 * The queue got stopped because the tx_ring was full.
2626 * Wake it up, because it's now at least 25% empty.
2628 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2630 return NETDEV_TX_OK;
2634 static void ql_free_shadow_space(struct ql_adapter *qdev)
2636 if (qdev->rx_ring_shadow_reg_area) {
2637 pci_free_consistent(qdev->pdev,
2639 qdev->rx_ring_shadow_reg_area,
2640 qdev->rx_ring_shadow_reg_dma);
2641 qdev->rx_ring_shadow_reg_area = NULL;
2643 if (qdev->tx_ring_shadow_reg_area) {
2644 pci_free_consistent(qdev->pdev,
2646 qdev->tx_ring_shadow_reg_area,
2647 qdev->tx_ring_shadow_reg_dma);
2648 qdev->tx_ring_shadow_reg_area = NULL;
2652 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2654 qdev->rx_ring_shadow_reg_area =
2655 pci_alloc_consistent(qdev->pdev,
2656 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2657 if (qdev->rx_ring_shadow_reg_area == NULL) {
2658 netif_err(qdev, ifup, qdev->ndev,
2659 "Allocation of RX shadow space failed.\n");
2662 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2663 qdev->tx_ring_shadow_reg_area =
2664 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2665 &qdev->tx_ring_shadow_reg_dma);
2666 if (qdev->tx_ring_shadow_reg_area == NULL) {
2667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of TX shadow space failed.\n");
2669 goto err_wqp_sh_area;
2671 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2675 pci_free_consistent(qdev->pdev,
2677 qdev->rx_ring_shadow_reg_area,
2678 qdev->rx_ring_shadow_reg_dma);
2682 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2684 struct tx_ring_desc *tx_ring_desc;
2686 struct ob_mac_iocb_req *mac_iocb_ptr;
2688 mac_iocb_ptr = tx_ring->wq_base;
2689 tx_ring_desc = tx_ring->q;
2690 for (i = 0; i < tx_ring->wq_len; i++) {
2691 tx_ring_desc->index = i;
2692 tx_ring_desc->skb = NULL;
2693 tx_ring_desc->queue_entry = mac_iocb_ptr;
2697 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2700 static void ql_free_tx_resources(struct ql_adapter *qdev,
2701 struct tx_ring *tx_ring)
2703 if (tx_ring->wq_base) {
2704 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2705 tx_ring->wq_base, tx_ring->wq_base_dma);
2706 tx_ring->wq_base = NULL;
2712 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2713 struct tx_ring *tx_ring)
2716 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2717 &tx_ring->wq_base_dma);
2719 if ((tx_ring->wq_base == NULL) ||
2720 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2724 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2725 if (tx_ring->q == NULL)
2730 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2731 tx_ring->wq_base, tx_ring->wq_base_dma);
2732 tx_ring->wq_base = NULL;
2734 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2738 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2740 struct bq_desc *lbq_desc;
2742 uint32_t curr_idx, clean_idx;
2744 curr_idx = rx_ring->lbq_curr_idx;
2745 clean_idx = rx_ring->lbq_clean_idx;
2746 while (curr_idx != clean_idx) {
2747 lbq_desc = &rx_ring->lbq[curr_idx];
2749 if (lbq_desc->p.pg_chunk.last_flag) {
2750 pci_unmap_page(qdev->pdev,
2751 lbq_desc->p.pg_chunk.map,
2752 ql_lbq_block_size(qdev),
2753 PCI_DMA_FROMDEVICE);
2754 lbq_desc->p.pg_chunk.last_flag = 0;
2757 put_page(lbq_desc->p.pg_chunk.page);
2758 lbq_desc->p.pg_chunk.page = NULL;
2760 if (++curr_idx == rx_ring->lbq_len)
2766 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2769 struct bq_desc *sbq_desc;
2771 for (i = 0; i < rx_ring->sbq_len; i++) {
2772 sbq_desc = &rx_ring->sbq[i];
2773 if (sbq_desc == NULL) {
2774 netif_err(qdev, ifup, qdev->ndev,
2775 "sbq_desc %d is NULL.\n", i);
2778 if (sbq_desc->p.skb) {
2779 pci_unmap_single(qdev->pdev,
2780 dma_unmap_addr(sbq_desc, mapaddr),
2781 dma_unmap_len(sbq_desc, maplen),
2782 PCI_DMA_FROMDEVICE);
2783 dev_kfree_skb(sbq_desc->p.skb);
2784 sbq_desc->p.skb = NULL;
2789 /* Free all large and small rx buffers associated
2790 * with the completion queues for this device.
2792 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2795 struct rx_ring *rx_ring;
2797 for (i = 0; i < qdev->rx_ring_count; i++) {
2798 rx_ring = &qdev->rx_ring[i];
2800 ql_free_lbq_buffers(qdev, rx_ring);
2802 ql_free_sbq_buffers(qdev, rx_ring);
2806 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2808 struct rx_ring *rx_ring;
2811 for (i = 0; i < qdev->rx_ring_count; i++) {
2812 rx_ring = &qdev->rx_ring[i];
2813 if (rx_ring->type != TX_Q)
2814 ql_update_buffer_queues(qdev, rx_ring);
2818 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2819 struct rx_ring *rx_ring)
2822 struct bq_desc *lbq_desc;
2823 __le64 *bq = rx_ring->lbq_base;
2825 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2826 for (i = 0; i < rx_ring->lbq_len; i++) {
2827 lbq_desc = &rx_ring->lbq[i];
2828 memset(lbq_desc, 0, sizeof(*lbq_desc));
2829 lbq_desc->index = i;
2830 lbq_desc->addr = bq;
2835 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2836 struct rx_ring *rx_ring)
2839 struct bq_desc *sbq_desc;
2840 __le64 *bq = rx_ring->sbq_base;
2842 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2843 for (i = 0; i < rx_ring->sbq_len; i++) {
2844 sbq_desc = &rx_ring->sbq[i];
2845 memset(sbq_desc, 0, sizeof(*sbq_desc));
2846 sbq_desc->index = i;
2847 sbq_desc->addr = bq;
2852 static void ql_free_rx_resources(struct ql_adapter *qdev,
2853 struct rx_ring *rx_ring)
2855 /* Free the small buffer queue. */
2856 if (rx_ring->sbq_base) {
2857 pci_free_consistent(qdev->pdev,
2859 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2860 rx_ring->sbq_base = NULL;
2863 /* Free the small buffer queue control blocks. */
2864 kfree(rx_ring->sbq);
2865 rx_ring->sbq = NULL;
2867 /* Free the large buffer queue. */
2868 if (rx_ring->lbq_base) {
2869 pci_free_consistent(qdev->pdev,
2871 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2872 rx_ring->lbq_base = NULL;
2875 /* Free the large buffer queue control blocks. */
2876 kfree(rx_ring->lbq);
2877 rx_ring->lbq = NULL;
2879 /* Free the rx queue. */
2880 if (rx_ring->cq_base) {
2881 pci_free_consistent(qdev->pdev,
2883 rx_ring->cq_base, rx_ring->cq_base_dma);
2884 rx_ring->cq_base = NULL;
2888 /* Allocate queues and buffers for this completions queue based
2889 * on the values in the parameter structure. */
2890 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2891 struct rx_ring *rx_ring)
2895 * Allocate the completion queue for this rx_ring.
2898 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2899 &rx_ring->cq_base_dma);