e1b2d22a285020c3b0c45eb16c52bcd824b0c05c
[~shefty/rdma-dev.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2012 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/sctp.h>
39 #include <linux/pkt_sched.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/ethtool.h>
45 #include <linux/if.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/prefetch.h>
49 #include <scsi/fc/fc_fcoe.h>
50
51 #include "ixgbe.h"
52 #include "ixgbe_common.h"
53 #include "ixgbe_dcb_82599.h"
54 #include "ixgbe_sriov.h"
55
56 char ixgbe_driver_name[] = "ixgbe";
57 static const char ixgbe_driver_string[] =
58                               "Intel(R) 10 Gigabit PCI Express Network Driver";
59 #ifdef IXGBE_FCOE
60 char ixgbe_default_device_descr[] =
61                               "Intel(R) 10 Gigabit Network Connection";
62 #else
63 static char ixgbe_default_device_descr[] =
64                               "Intel(R) 10 Gigabit Network Connection";
65 #endif
66 #define DRV_VERSION "3.11.33-k"
67 const char ixgbe_driver_version[] = DRV_VERSION;
68 static const char ixgbe_copyright[] =
69                                 "Copyright (c) 1999-2012 Intel Corporation.";
70
71 static const struct ixgbe_info *ixgbe_info_tbl[] = {
72         [board_82598] = &ixgbe_82598_info,
73         [board_82599] = &ixgbe_82599_info,
74         [board_X540] = &ixgbe_X540_info,
75 };
76
77 /* ixgbe_pci_tbl - PCI Device ID Table
78  *
79  * Wildcard entries (PCI_ANY_ID) should come last
80  * Last entry must be all 0s
81  *
82  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
83  *   Class, Class Mask, private data (not used) }
84  */
85 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
86         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
87         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
88         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
89         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
90         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
91         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
92         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
93         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
94         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
95         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
96         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
97         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
98         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
99         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
100         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
101         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
102         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
103         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
104         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
105         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
106         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
107         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
108         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
109         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
113         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
114         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
115         /* required last entry */
116         {0, }
117 };
118 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
119
120 #ifdef CONFIG_IXGBE_DCA
121 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
122                             void *p);
123 static struct notifier_block dca_notifier = {
124         .notifier_call = ixgbe_notify_dca,
125         .next          = NULL,
126         .priority      = 0
127 };
128 #endif
129
130 #ifdef CONFIG_PCI_IOV
131 static unsigned int max_vfs;
132 module_param(max_vfs, uint, 0);
133 MODULE_PARM_DESC(max_vfs,
134                  "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
135 #endif /* CONFIG_PCI_IOV */
136
137 static unsigned int allow_unsupported_sfp;
138 module_param(allow_unsupported_sfp, uint, 0);
139 MODULE_PARM_DESC(allow_unsupported_sfp,
140                  "Allow unsupported and untested SFP+ modules on 82599-based adapters");
141
142 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
143 static int debug = -1;
144 module_param(debug, int, 0);
145 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
146
147 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
148 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
149 MODULE_LICENSE("GPL");
150 MODULE_VERSION(DRV_VERSION);
151
152 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
153 {
154         if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
155             !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
156                 schedule_work(&adapter->service_task);
157 }
158
159 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
160 {
161         BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
162
163         /* flush memory to make sure state is correct before next watchdog */
164         smp_mb__before_clear_bit();
165         clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
166 }
167
168 struct ixgbe_reg_info {
169         u32 ofs;
170         char *name;
171 };
172
173 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
174
175         /* General Registers */
176         {IXGBE_CTRL, "CTRL"},
177         {IXGBE_STATUS, "STATUS"},
178         {IXGBE_CTRL_EXT, "CTRL_EXT"},
179
180         /* Interrupt Registers */
181         {IXGBE_EICR, "EICR"},
182
183         /* RX Registers */
184         {IXGBE_SRRCTL(0), "SRRCTL"},
185         {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
186         {IXGBE_RDLEN(0), "RDLEN"},
187         {IXGBE_RDH(0), "RDH"},
188         {IXGBE_RDT(0), "RDT"},
189         {IXGBE_RXDCTL(0), "RXDCTL"},
190         {IXGBE_RDBAL(0), "RDBAL"},
191         {IXGBE_RDBAH(0), "RDBAH"},
192
193         /* TX Registers */
194         {IXGBE_TDBAL(0), "TDBAL"},
195         {IXGBE_TDBAH(0), "TDBAH"},
196         {IXGBE_TDLEN(0), "TDLEN"},
197         {IXGBE_TDH(0), "TDH"},
198         {IXGBE_TDT(0), "TDT"},
199         {IXGBE_TXDCTL(0), "TXDCTL"},
200
201         /* List Terminator */
202         {}
203 };
204
205
206 /*
207  * ixgbe_regdump - register printout routine
208  */
209 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
210 {
211         int i = 0, j = 0;
212         char rname[16];
213         u32 regs[64];
214
215         switch (reginfo->ofs) {
216         case IXGBE_SRRCTL(0):
217                 for (i = 0; i < 64; i++)
218                         regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
219                 break;
220         case IXGBE_DCA_RXCTRL(0):
221                 for (i = 0; i < 64; i++)
222                         regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
223                 break;
224         case IXGBE_RDLEN(0):
225                 for (i = 0; i < 64; i++)
226                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
227                 break;
228         case IXGBE_RDH(0):
229                 for (i = 0; i < 64; i++)
230                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
231                 break;
232         case IXGBE_RDT(0):
233                 for (i = 0; i < 64; i++)
234                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
235                 break;
236         case IXGBE_RXDCTL(0):
237                 for (i = 0; i < 64; i++)
238                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
239                 break;
240         case IXGBE_RDBAL(0):
241                 for (i = 0; i < 64; i++)
242                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
243                 break;
244         case IXGBE_RDBAH(0):
245                 for (i = 0; i < 64; i++)
246                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
247                 break;
248         case IXGBE_TDBAL(0):
249                 for (i = 0; i < 64; i++)
250                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
251                 break;
252         case IXGBE_TDBAH(0):
253                 for (i = 0; i < 64; i++)
254                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
255                 break;
256         case IXGBE_TDLEN(0):
257                 for (i = 0; i < 64; i++)
258                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
259                 break;
260         case IXGBE_TDH(0):
261                 for (i = 0; i < 64; i++)
262                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
263                 break;
264         case IXGBE_TDT(0):
265                 for (i = 0; i < 64; i++)
266                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
267                 break;
268         case IXGBE_TXDCTL(0):
269                 for (i = 0; i < 64; i++)
270                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
271                 break;
272         default:
273                 pr_info("%-15s %08x\n", reginfo->name,
274                         IXGBE_READ_REG(hw, reginfo->ofs));
275                 return;
276         }
277
278         for (i = 0; i < 8; i++) {
279                 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
280                 pr_err("%-15s", rname);
281                 for (j = 0; j < 8; j++)
282                         pr_cont(" %08x", regs[i*8+j]);
283                 pr_cont("\n");
284         }
285
286 }
287
288 /*
289  * ixgbe_dump - Print registers, tx-rings and rx-rings
290  */
291 static void ixgbe_dump(struct ixgbe_adapter *adapter)
292 {
293         struct net_device *netdev = adapter->netdev;
294         struct ixgbe_hw *hw = &adapter->hw;
295         struct ixgbe_reg_info *reginfo;
296         int n = 0;
297         struct ixgbe_ring *tx_ring;
298         struct ixgbe_tx_buffer *tx_buffer;
299         union ixgbe_adv_tx_desc *tx_desc;
300         struct my_u0 { u64 a; u64 b; } *u0;
301         struct ixgbe_ring *rx_ring;
302         union ixgbe_adv_rx_desc *rx_desc;
303         struct ixgbe_rx_buffer *rx_buffer_info;
304         u32 staterr;
305         int i = 0;
306
307         if (!netif_msg_hw(adapter))
308                 return;
309
310         /* Print netdevice Info */
311         if (netdev) {
312                 dev_info(&adapter->pdev->dev, "Net device Info\n");
313                 pr_info("Device Name     state            "
314                         "trans_start      last_rx\n");
315                 pr_info("%-15s %016lX %016lX %016lX\n",
316                         netdev->name,
317                         netdev->state,
318                         netdev->trans_start,
319                         netdev->last_rx);
320         }
321
322         /* Print Registers */
323         dev_info(&adapter->pdev->dev, "Register Dump\n");
324         pr_info(" Register Name   Value\n");
325         for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
326              reginfo->name; reginfo++) {
327                 ixgbe_regdump(hw, reginfo);
328         }
329
330         /* Print TX Ring Summary */
331         if (!netdev || !netif_running(netdev))
332                 goto exit;
333
334         dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
335         pr_info(" %s     %s              %s        %s\n",
336                 "Queue [NTU] [NTC] [bi(ntc)->dma  ]",
337                 "leng", "ntw", "timestamp");
338         for (n = 0; n < adapter->num_tx_queues; n++) {
339                 tx_ring = adapter->tx_ring[n];
340                 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
341                 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
342                            n, tx_ring->next_to_use, tx_ring->next_to_clean,
343                            (u64)dma_unmap_addr(tx_buffer, dma),
344                            dma_unmap_len(tx_buffer, len),
345                            tx_buffer->next_to_watch,
346                            (u64)tx_buffer->time_stamp);
347         }
348
349         /* Print TX Rings */
350         if (!netif_msg_tx_done(adapter))
351                 goto rx_ring_summary;
352
353         dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
354
355         /* Transmit Descriptor Formats
356          *
357          * 82598 Advanced Transmit Descriptor
358          *   +--------------------------------------------------------------+
359          * 0 |         Buffer Address [63:0]                                |
360          *   +--------------------------------------------------------------+
361          * 8 |  PAYLEN  | POPTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
362          *   +--------------------------------------------------------------+
363          *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
364          *
365          * 82598 Advanced Transmit Descriptor (Write-Back Format)
366          *   +--------------------------------------------------------------+
367          * 0 |                          RSV [63:0]                          |
368          *   +--------------------------------------------------------------+
369          * 8 |            RSV           |  STA  |          NXTSEQ           |
370          *   +--------------------------------------------------------------+
371          *   63                       36 35   32 31                         0
372          *
373          * 82599+ Advanced Transmit Descriptor
374          *   +--------------------------------------------------------------+
375          * 0 |         Buffer Address [63:0]                                |
376          *   +--------------------------------------------------------------+
377          * 8 |PAYLEN  |POPTS|CC|IDX  |STA  |DCMD  |DTYP |MAC  |RSV  |DTALEN |
378          *   +--------------------------------------------------------------+
379          *   63     46 45 40 39 38 36 35 32 31  24 23 20 19 18 17 16 15     0
380          *
381          * 82599+ Advanced Transmit Descriptor (Write-Back Format)
382          *   +--------------------------------------------------------------+
383          * 0 |                          RSV [63:0]                          |
384          *   +--------------------------------------------------------------+
385          * 8 |            RSV           |  STA  |           RSV             |
386          *   +--------------------------------------------------------------+
387          *   63                       36 35   32 31                         0
388          */
389
390         for (n = 0; n < adapter->num_tx_queues; n++) {
391                 tx_ring = adapter->tx_ring[n];
392                 pr_info("------------------------------------\n");
393                 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
394                 pr_info("------------------------------------\n");
395                 pr_info("%s%s    %s              %s        %s          %s\n",
396                         "T [desc]     [address 63:0  ] ",
397                         "[PlPOIdStDDt Ln] [bi->dma       ] ",
398                         "leng", "ntw", "timestamp", "bi->skb");
399
400                 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
401                         tx_desc = IXGBE_TX_DESC(tx_ring, i);
402                         tx_buffer = &tx_ring->tx_buffer_info[i];
403                         u0 = (struct my_u0 *)tx_desc;
404                         if (dma_unmap_len(tx_buffer, len) > 0) {
405                                 pr_info("T [0x%03X]    %016llX %016llX %016llX %08X %p %016llX %p",
406                                         i,
407                                         le64_to_cpu(u0->a),
408                                         le64_to_cpu(u0->b),
409                                         (u64)dma_unmap_addr(tx_buffer, dma),
410                                         dma_unmap_len(tx_buffer, len),
411                                         tx_buffer->next_to_watch,
412                                         (u64)tx_buffer->time_stamp,
413                                         tx_buffer->skb);
414                                 if (i == tx_ring->next_to_use &&
415                                         i == tx_ring->next_to_clean)
416                                         pr_cont(" NTC/U\n");
417                                 else if (i == tx_ring->next_to_use)
418                                         pr_cont(" NTU\n");
419                                 else if (i == tx_ring->next_to_clean)
420                                         pr_cont(" NTC\n");
421                                 else
422                                         pr_cont("\n");
423
424                                 if (netif_msg_pktdata(adapter) &&
425                                     tx_buffer->skb)
426                                         print_hex_dump(KERN_INFO, "",
427                                                 DUMP_PREFIX_ADDRESS, 16, 1,
428                                                 tx_buffer->skb->data,
429                                                 dma_unmap_len(tx_buffer, len),
430                                                 true);
431                         }
432                 }
433         }
434
435         /* Print RX Rings Summary */
436 rx_ring_summary:
437         dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
438         pr_info("Queue [NTU] [NTC]\n");
439         for (n = 0; n < adapter->num_rx_queues; n++) {
440                 rx_ring = adapter->rx_ring[n];
441                 pr_info("%5d %5X %5X\n",
442                         n, rx_ring->next_to_use, rx_ring->next_to_clean);
443         }
444
445         /* Print RX Rings */
446         if (!netif_msg_rx_status(adapter))
447                 goto exit;
448
449         dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
450
451         /* Receive Descriptor Formats
452          *
453          * 82598 Advanced Receive Descriptor (Read) Format
454          *    63                                           1        0
455          *    +-----------------------------------------------------+
456          *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
457          *    +----------------------------------------------+------+
458          *  8 |       Header Buffer Address [63:1]           |  DD  |
459          *    +-----------------------------------------------------+
460          *
461          *
462          * 82598 Advanced Receive Descriptor (Write-Back) Format
463          *
464          *   63       48 47    32 31  30      21 20 16 15   4 3     0
465          *   +------------------------------------------------------+
466          * 0 |       RSS Hash /  |SPH| HDR_LEN  | RSV |Packet|  RSS |
467          *   | Packet   | IP     |   |          |     | Type | Type |
468          *   | Checksum | Ident  |   |          |     |      |      |
469          *   +------------------------------------------------------+
470          * 8 | VLAN Tag | Length | Extended Error | Extended Status |
471          *   +------------------------------------------------------+
472          *   63       48 47    32 31            20 19               0
473          *
474          * 82599+ Advanced Receive Descriptor (Read) Format
475          *    63                                           1        0
476          *    +-----------------------------------------------------+
477          *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
478          *    +----------------------------------------------+------+
479          *  8 |       Header Buffer Address [63:1]           |  DD  |
480          *    +-----------------------------------------------------+
481          *
482          *
483          * 82599+ Advanced Receive Descriptor (Write-Back) Format
484          *
485          *   63       48 47    32 31  30      21 20 17 16   4 3     0
486          *   +------------------------------------------------------+
487          * 0 |RSS / Frag Checksum|SPH| HDR_LEN  |RSC- |Packet|  RSS |
488          *   |/ RTT / PCoE_PARAM |   |          | CNT | Type | Type |
489          *   |/ Flow Dir Flt ID  |   |          |     |      |      |
490          *   +------------------------------------------------------+
491          * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
492          *   +------------------------------------------------------+
493          *   63       48 47    32 31          20 19                 0
494          */
495
496         for (n = 0; n < adapter->num_rx_queues; n++) {
497                 rx_ring = adapter->rx_ring[n];
498                 pr_info("------------------------------------\n");
499                 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
500                 pr_info("------------------------------------\n");
501                 pr_info("%s%s%s",
502                         "R  [desc]      [ PktBuf     A0] ",
503                         "[  HeadBuf   DD] [bi->dma       ] [bi->skb       ] ",
504                         "<-- Adv Rx Read format\n");
505                 pr_info("%s%s%s",
506                         "RWB[desc]      [PcsmIpSHl PtRs] ",
507                         "[vl er S cks ln] ---------------- [bi->skb       ] ",
508                         "<-- Adv Rx Write-Back format\n");
509
510                 for (i = 0; i < rx_ring->count; i++) {
511                         rx_buffer_info = &rx_ring->rx_buffer_info[i];
512                         rx_desc = IXGBE_RX_DESC(rx_ring, i);
513                         u0 = (struct my_u0 *)rx_desc;
514                         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
515                         if (staterr & IXGBE_RXD_STAT_DD) {
516                                 /* Descriptor Done */
517                                 pr_info("RWB[0x%03X]     %016llX "
518                                         "%016llX ---------------- %p", i,
519                                         le64_to_cpu(u0->a),
520                                         le64_to_cpu(u0->b),
521                                         rx_buffer_info->skb);
522                         } else {
523                                 pr_info("R  [0x%03X]     %016llX "
524                                         "%016llX %016llX %p", i,
525                                         le64_to_cpu(u0->a),
526                                         le64_to_cpu(u0->b),
527                                         (u64)rx_buffer_info->dma,
528                                         rx_buffer_info->skb);
529
530                                 if (netif_msg_pktdata(adapter) &&
531                                     rx_buffer_info->dma) {
532                                         print_hex_dump(KERN_INFO, "",
533                                            DUMP_PREFIX_ADDRESS, 16, 1,
534                                            page_address(rx_buffer_info->page) +
535                                                     rx_buffer_info->page_offset,
536                                            ixgbe_rx_bufsz(rx_ring), true);
537                                 }
538                         }
539
540                         if (i == rx_ring->next_to_use)
541                                 pr_cont(" NTU\n");
542                         else if (i == rx_ring->next_to_clean)
543                                 pr_cont(" NTC\n");
544                         else
545                                 pr_cont("\n");
546
547                 }
548         }
549
550 exit:
551         return;
552 }
553
554 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
555 {
556         u32 ctrl_ext;
557
558         /* Let firmware take over control of h/w */
559         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
560         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
561                         ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
562 }
563
564 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
565 {
566         u32 ctrl_ext;
567
568         /* Let firmware know the driver has taken over */
569         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
570         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
571                         ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
572 }
573
574 /**
575  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
576  * @adapter: pointer to adapter struct
577  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
578  * @queue: queue to map the corresponding interrupt to
579  * @msix_vector: the vector to map to the corresponding queue
580  *
581  */
582 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
583                            u8 queue, u8 msix_vector)
584 {
585         u32 ivar, index;
586         struct ixgbe_hw *hw = &adapter->hw;
587         switch (hw->mac.type) {
588         case ixgbe_mac_82598EB:
589                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
590                 if (direction == -1)
591                         direction = 0;
592                 index = (((direction * 64) + queue) >> 2) & 0x1F;
593                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
594                 ivar &= ~(0xFF << (8 * (queue & 0x3)));
595                 ivar |= (msix_vector << (8 * (queue & 0x3)));
596                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
597                 break;
598         case ixgbe_mac_82599EB:
599         case ixgbe_mac_X540:
600                 if (direction == -1) {
601                         /* other causes */
602                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
603                         index = ((queue & 1) * 8);
604                         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
605                         ivar &= ~(0xFF << index);
606                         ivar |= (msix_vector << index);
607                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
608                         break;
609                 } else {
610                         /* tx or rx causes */
611                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
612                         index = ((16 * (queue & 1)) + (8 * direction));
613                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
614                         ivar &= ~(0xFF << index);
615                         ivar |= (msix_vector << index);
616                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
617                         break;
618                 }
619         default:
620                 break;
621         }
622 }
623
624 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
625                                           u64 qmask)
626 {
627         u32 mask;
628
629         switch (adapter->hw.mac.type) {
630         case ixgbe_mac_82598EB:
631                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
632                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
633                 break;
634         case ixgbe_mac_82599EB:
635         case ixgbe_mac_X540:
636                 mask = (qmask & 0xFFFFFFFF);
637                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
638                 mask = (qmask >> 32);
639                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
640                 break;
641         default:
642                 break;
643         }
644 }
645
646 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
647                                       struct ixgbe_tx_buffer *tx_buffer)
648 {
649         if (tx_buffer->skb) {
650                 dev_kfree_skb_any(tx_buffer->skb);
651                 if (dma_unmap_len(tx_buffer, len))
652                         dma_unmap_single(ring->dev,
653                                          dma_unmap_addr(tx_buffer, dma),
654                                          dma_unmap_len(tx_buffer, len),
655                                          DMA_TO_DEVICE);
656         } else if (dma_unmap_len(tx_buffer, len)) {
657                 dma_unmap_page(ring->dev,
658                                dma_unmap_addr(tx_buffer, dma),
659                                dma_unmap_len(tx_buffer, len),
660                                DMA_TO_DEVICE);
661         }
662         tx_buffer->next_to_watch = NULL;
663         tx_buffer->skb = NULL;
664         dma_unmap_len_set(tx_buffer, len, 0);
665         /* tx_buffer must be completely set up in the transmit path */
666 }
667
668 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
669 {
670         struct ixgbe_hw *hw = &adapter->hw;
671         struct ixgbe_hw_stats *hwstats = &adapter->stats;
672         int i;
673         u32 data;
674
675         if ((hw->fc.current_mode != ixgbe_fc_full) &&
676             (hw->fc.current_mode != ixgbe_fc_rx_pause))
677                 return;
678
679         switch (hw->mac.type) {
680         case ixgbe_mac_82598EB:
681                 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
682                 break;
683         default:
684                 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
685         }
686         hwstats->lxoffrxc += data;
687
688         /* refill credits (no tx hang) if we received xoff */
689         if (!data)
690                 return;
691
692         for (i = 0; i < adapter->num_tx_queues; i++)
693                 clear_bit(__IXGBE_HANG_CHECK_ARMED,
694                           &adapter->tx_ring[i]->state);
695 }
696
697 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
698 {
699         struct ixgbe_hw *hw = &adapter->hw;
700         struct ixgbe_hw_stats *hwstats = &adapter->stats;
701         u32 xoff[8] = {0};
702         u8 tc;
703         int i;
704         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
705
706         if (adapter->ixgbe_ieee_pfc)
707                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
708
709         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
710                 ixgbe_update_xoff_rx_lfc(adapter);
711                 return;
712         }
713
714         /* update stats for each tc, only valid with PFC enabled */
715         for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
716                 u32 pxoffrxc;
717
718                 switch (hw->mac.type) {
719                 case ixgbe_mac_82598EB:
720                         pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
721                         break;
722                 default:
723                         pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
724                 }
725                 hwstats->pxoffrxc[i] += pxoffrxc;
726                 /* Get the TC for given UP */
727                 tc = netdev_get_prio_tc_map(adapter->netdev, i);
728                 xoff[tc] += pxoffrxc;
729         }
730
731         /* disarm tx queues that have received xoff frames */
732         for (i = 0; i < adapter->num_tx_queues; i++) {
733                 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
734
735                 tc = tx_ring->dcb_tc;
736                 if (xoff[tc])
737                         clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
738         }
739 }
740
741 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
742 {
743         return ring->stats.packets;
744 }
745
746 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
747 {
748         struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
749         struct ixgbe_hw *hw = &adapter->hw;
750
751         u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
752         u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
753
754         if (head != tail)
755                 return (head < tail) ?
756                         tail - head : (tail + ring->count - head);
757
758         return 0;
759 }
760
761 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
762 {
763         u32 tx_done = ixgbe_get_tx_completed(tx_ring);
764         u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
765         u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
766         bool ret = false;
767
768         clear_check_for_tx_hang(tx_ring);
769
770         /*
771          * Check for a hung queue, but be thorough. This verifies
772          * that a transmit has been completed since the previous
773          * check AND there is at least one packet pending. The
774          * ARMED bit is set to indicate a potential hang. The
775          * bit is cleared if a pause frame is received to remove
776          * false hang detection due to PFC or 802.3x frames. By
777          * requiring this to fail twice we avoid races with
778          * pfc clearing the ARMED bit and conditions where we
779          * run the check_tx_hang logic with a transmit completion
780          * pending but without time to complete it yet.
781          */
782         if ((tx_done_old == tx_done) && tx_pending) {
783                 /* make sure it is true for two checks in a row */
784                 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
785                                        &tx_ring->state);
786         } else {
787                 /* update completed stats and continue */
788                 tx_ring->tx_stats.tx_done_old = tx_done;
789                 /* reset the countdown */
790                 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
791         }
792
793         return ret;
794 }
795
796 /**
797  * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
798  * @adapter: driver private struct
799  **/
800 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
801 {
802
803         /* Do the reset outside of interrupt context */
804         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
805                 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
806                 ixgbe_service_event_schedule(adapter);
807         }
808 }
809
810 /**
811  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
812  * @q_vector: structure containing interrupt and ring information
813  * @tx_ring: tx ring to clean
814  **/
815 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
816                                struct ixgbe_ring *tx_ring)
817 {
818         struct ixgbe_adapter *adapter = q_vector->adapter;
819         struct ixgbe_tx_buffer *tx_buffer;
820         union ixgbe_adv_tx_desc *tx_desc;
821         unsigned int total_bytes = 0, total_packets = 0;
822         unsigned int budget = q_vector->tx.work_limit;
823         unsigned int i = tx_ring->next_to_clean;
824
825         if (test_bit(__IXGBE_DOWN, &adapter->state))
826                 return true;
827
828         tx_buffer = &tx_ring->tx_buffer_info[i];
829         tx_desc = IXGBE_TX_DESC(tx_ring, i);
830         i -= tx_ring->count;
831
832         do {
833                 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
834
835                 /* if next_to_watch is not set then there is no work pending */
836                 if (!eop_desc)
837                         break;
838
839                 /* prevent any other reads prior to eop_desc */
840                 rmb();
841
842                 /* if DD is not set pending work has not been completed */
843                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
844                         break;
845
846                 /* clear next_to_watch to prevent false hangs */
847                 tx_buffer->next_to_watch = NULL;
848
849                 /* update the statistics for this packet */
850                 total_bytes += tx_buffer->bytecount;
851                 total_packets += tx_buffer->gso_segs;
852
853                 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
854                         ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
855
856                 /* free the skb */
857                 dev_kfree_skb_any(tx_buffer->skb);
858
859                 /* unmap skb header data */
860                 dma_unmap_single(tx_ring->dev,
861                                  dma_unmap_addr(tx_buffer, dma),
862                                  dma_unmap_len(tx_buffer, len),
863                                  DMA_TO_DEVICE);
864
865                 /* clear tx_buffer data */
866                 tx_buffer->skb = NULL;
867                 dma_unmap_len_set(tx_buffer, len, 0);
868
869                 /* unmap remaining buffers */
870                 while (tx_desc != eop_desc) {
871                         tx_buffer++;
872                         tx_desc++;
873                         i++;
874                         if (unlikely(!i)) {
875                                 i -= tx_ring->count;
876                                 tx_buffer = tx_ring->tx_buffer_info;
877                                 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
878                         }
879
880                         /* unmap any remaining paged data */
881                         if (dma_unmap_len(tx_buffer, len)) {
882                                 dma_unmap_page(tx_ring->dev,
883                                                dma_unmap_addr(tx_buffer, dma),
884                                                dma_unmap_len(tx_buffer, len),
885                                                DMA_TO_DEVICE);
886                                 dma_unmap_len_set(tx_buffer, len, 0);
887                         }
888                 }
889
890                 /* move us one more past the eop_desc for start of next pkt */
891                 tx_buffer++;
892                 tx_desc++;
893                 i++;
894                 if (unlikely(!i)) {
895                         i -= tx_ring->count;
896                         tx_buffer = tx_ring->tx_buffer_info;
897                         tx_desc = IXGBE_TX_DESC(tx_ring, 0);
898                 }
899
900                 /* issue prefetch for next Tx descriptor */
901                 prefetch(tx_desc);
902
903                 /* update budget accounting */
904                 budget--;
905         } while (likely(budget));
906
907         i += tx_ring->count;
908         tx_ring->next_to_clean = i;
909         u64_stats_update_begin(&tx_ring->syncp);
910         tx_ring->stats.bytes += total_bytes;
911         tx_ring->stats.packets += total_packets;
912         u64_stats_update_end(&tx_ring->syncp);
913         q_vector->tx.total_bytes += total_bytes;
914         q_vector->tx.total_packets += total_packets;
915
916         if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
917                 /* schedule immediate reset if we believe we hung */
918                 struct ixgbe_hw *hw = &adapter->hw;
919                 e_err(drv, "Detected Tx Unit Hang\n"
920                         "  Tx Queue             <%d>\n"
921                         "  TDH, TDT             <%x>, <%x>\n"
922                         "  next_to_use          <%x>\n"
923                         "  next_to_clean        <%x>\n"
924                         "tx_buffer_info[next_to_clean]\n"
925                         "  time_stamp           <%lx>\n"
926                         "  jiffies              <%lx>\n",
927                         tx_ring->queue_index,
928                         IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
929                         IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
930                         tx_ring->next_to_use, i,
931                         tx_ring->tx_buffer_info[i].time_stamp, jiffies);
932
933                 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
934
935                 e_info(probe,
936                        "tx hang %d detected on queue %d, resetting adapter\n",
937                         adapter->tx_timeout_count + 1, tx_ring->queue_index);
938
939                 /* schedule immediate reset if we believe we hung */
940                 ixgbe_tx_timeout_reset(adapter);
941
942                 /* the adapter is about to reset, no point in enabling stuff */
943                 return true;
944         }
945
946         netdev_tx_completed_queue(txring_txq(tx_ring),
947                                   total_packets, total_bytes);
948
949 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
950         if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
951                      (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
952                 /* Make sure that anybody stopping the queue after this
953                  * sees the new next_to_clean.
954                  */
955                 smp_mb();
956                 if (__netif_subqueue_stopped(tx_ring->netdev,
957                                              tx_ring->queue_index)
958                     && !test_bit(__IXGBE_DOWN, &adapter->state)) {
959                         netif_wake_subqueue(tx_ring->netdev,
960                                             tx_ring->queue_index);
961                         ++tx_ring->tx_stats.restart_queue;
962                 }
963         }
964
965         return !!budget;
966 }
967
968 #ifdef CONFIG_IXGBE_DCA
969 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
970                                 struct ixgbe_ring *tx_ring,
971                                 int cpu)
972 {
973         struct ixgbe_hw *hw = &adapter->hw;
974         u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
975         u16 reg_offset;
976
977         switch (hw->mac.type) {
978         case ixgbe_mac_82598EB:
979                 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
980                 break;
981         case ixgbe_mac_82599EB:
982         case ixgbe_mac_X540:
983                 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
984                 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
985                 break;
986         default:
987                 /* for unknown hardware do not write register */
988                 return;
989         }
990
991         /*
992          * We can enable relaxed ordering for reads, but not writes when
993          * DCA is enabled.  This is due to a known issue in some chipsets
994          * which will cause the DCA tag to be cleared.
995          */
996         txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
997                   IXGBE_DCA_TXCTRL_DATA_RRO_EN |
998                   IXGBE_DCA_TXCTRL_DESC_DCA_EN;
999
1000         IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1001 }
1002
1003 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1004                                 struct ixgbe_ring *rx_ring,
1005                                 int cpu)
1006 {
1007         struct ixgbe_hw *hw = &adapter->hw;
1008         u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1009         u8 reg_idx = rx_ring->reg_idx;
1010
1011
1012         switch (hw->mac.type) {
1013         case ixgbe_mac_82599EB:
1014         case ixgbe_mac_X540:
1015                 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1016                 break;
1017         default:
1018                 break;
1019         }
1020
1021         /*
1022          * We can enable relaxed ordering for reads, but not writes when
1023          * DCA is enabled.  This is due to a known issue in some chipsets
1024          * which will cause the DCA tag to be cleared.
1025          */
1026         rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1027                   IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1028
1029         IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1030 }
1031
1032 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1033 {
1034         struct ixgbe_adapter *adapter = q_vector->adapter;
1035         struct ixgbe_ring *ring;
1036         int cpu = get_cpu();
1037
1038         if (q_vector->cpu == cpu)
1039                 goto out_no_update;
1040
1041         ixgbe_for_each_ring(ring, q_vector->tx)
1042                 ixgbe_update_tx_dca(adapter, ring, cpu);
1043
1044         ixgbe_for_each_ring(ring, q_vector->rx)
1045                 ixgbe_update_rx_dca(adapter, ring, cpu);
1046
1047         q_vector->cpu = cpu;
1048 out_no_update:
1049         put_cpu();
1050 }
1051
1052 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1053 {
1054         int i;
1055
1056         if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1057                 return;
1058
1059         /* always use CB2 mode, difference is masked in the CB driver */
1060         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1061
1062         for (i = 0; i < adapter->num_q_vectors; i++) {
1063                 adapter->q_vector[i]->cpu = -1;
1064                 ixgbe_update_dca(adapter->q_vector[i]);
1065         }
1066 }
1067
1068 static int __ixgbe_notify_dca(struct device *dev, void *data)
1069 {
1070         struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1071         unsigned long event = *(unsigned long *)data;
1072
1073         if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1074                 return 0;
1075
1076         switch (event) {
1077         case DCA_PROVIDER_ADD:
1078                 /* if we're already enabled, don't do it again */
1079                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1080                         break;
1081                 if (dca_add_requester(dev) == 0) {
1082                         adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1083                         ixgbe_setup_dca(adapter);
1084                         break;
1085                 }
1086                 /* Fall Through since DCA is disabled. */
1087         case DCA_PROVIDER_REMOVE:
1088                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1089                         dca_remove_requester(dev);
1090                         adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1091                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1092                 }
1093                 break;
1094         }
1095
1096         return 0;
1097 }
1098
1099 #endif /* CONFIG_IXGBE_DCA */
1100 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1101                                  union ixgbe_adv_rx_desc *rx_desc,
1102                                  struct sk_buff *skb)
1103 {
1104         if (ring->netdev->features & NETIF_F_RXHASH)
1105                 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1106 }
1107
1108 #ifdef IXGBE_FCOE
1109 /**
1110  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1111  * @ring: structure containing ring specific data
1112  * @rx_desc: advanced rx descriptor
1113  *
1114  * Returns : true if it is FCoE pkt
1115  */
1116 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1117                                     union ixgbe_adv_rx_desc *rx_desc)
1118 {
1119         __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1120
1121         return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1122                ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1123                 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1124                              IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1125 }
1126
1127 #endif /* IXGBE_FCOE */
1128 /**
1129  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1130  * @ring: structure containing ring specific data
1131  * @rx_desc: current Rx descriptor being processed
1132  * @skb: skb currently being received and modified
1133  **/
1134 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1135                                      union ixgbe_adv_rx_desc *rx_desc,
1136                                      struct sk_buff *skb)
1137 {
1138         skb_checksum_none_assert(skb);
1139
1140         /* Rx csum disabled */
1141         if (!(ring->netdev->features & NETIF_F_RXCSUM))
1142                 return;
1143
1144         /* if IP and error */
1145         if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1146             ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1147                 ring->rx_stats.csum_err++;
1148                 return;
1149         }
1150
1151         if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1152                 return;
1153
1154         if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1155                 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1156
1157                 /*
1158                  * 82599 errata, UDP frames with a 0 checksum can be marked as
1159                  * checksum errors.
1160                  */
1161                 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1162                     test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1163                         return;
1164
1165                 ring->rx_stats.csum_err++;
1166                 return;
1167         }
1168
1169         /* It must be a TCP or UDP packet with a valid checksum */
1170         skb->ip_summed = CHECKSUM_UNNECESSARY;
1171 }
1172
1173 static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1174 {
1175         rx_ring->next_to_use = val;
1176
1177         /* update next to alloc since we have filled the ring */
1178         rx_ring->next_to_alloc = val;
1179         /*
1180          * Force memory writes to complete before letting h/w
1181          * know there are new descriptors to fetch.  (Only
1182          * applicable for weak-ordered memory model archs,
1183          * such as IA-64).
1184          */
1185         wmb();
1186         writel(val, rx_ring->tail);
1187 }
1188
1189 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1190                                     struct ixgbe_rx_buffer *bi)
1191 {
1192         struct page *page = bi->page;
1193         dma_addr_t dma = bi->dma;
1194
1195         /* since we are recycling buffers we should seldom need to alloc */
1196         if (likely(dma))
1197                 return true;
1198
1199         /* alloc new page for storage */
1200         if (likely(!page)) {
1201                 page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
1202                                          bi->skb, ixgbe_rx_pg_order(rx_ring));
1203                 if (unlikely(!page)) {
1204                         rx_ring->rx_stats.alloc_rx_page_failed++;
1205                         return false;
1206                 }
1207                 bi->page = page;
1208         }
1209
1210         /* map page for use */
1211         dma = dma_map_page(rx_ring->dev, page, 0,
1212                            ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1213
1214         /*
1215          * if mapping failed free memory back to system since
1216          * there isn't much point in holding memory we can't use
1217          */
1218         if (dma_mapping_error(rx_ring->dev, dma)) {
1219                 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1220                 bi->page = NULL;
1221
1222                 rx_ring->rx_stats.alloc_rx_page_failed++;
1223                 return false;
1224         }
1225
1226         bi->dma = dma;
1227         bi->page_offset = 0;
1228
1229         return true;
1230 }
1231
1232 /**
1233  * ixgbe_alloc_rx_buffers - Replace used receive buffers
1234  * @rx_ring: ring to place buffers on
1235  * @cleaned_count: number of buffers to replace
1236  **/
1237 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1238 {
1239         union ixgbe_adv_rx_desc *rx_desc;
1240         struct ixgbe_rx_buffer *bi;
1241         u16 i = rx_ring->next_to_use;
1242
1243         /* nothing to do */
1244         if (!cleaned_count)
1245                 return;
1246
1247         rx_desc = IXGBE_RX_DESC(rx_ring, i);
1248         bi = &rx_ring->rx_buffer_info[i];
1249         i -= rx_ring->count;
1250
1251         do {
1252                 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1253                         break;
1254
1255                 /*
1256                  * Refresh the desc even if buffer_addrs didn't change
1257                  * because each write-back erases this info.
1258                  */
1259                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1260
1261                 rx_desc++;
1262                 bi++;
1263                 i++;
1264                 if (unlikely(!i)) {
1265                         rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1266                         bi = rx_ring->rx_buffer_info;
1267                         i -= rx_ring->count;
1268                 }
1269
1270                 /* clear the hdr_addr for the next_to_use descriptor */
1271                 rx_desc->read.hdr_addr = 0;
1272
1273                 cleaned_count--;
1274         } while (cleaned_count);
1275
1276         i += rx_ring->count;
1277
1278         if (rx_ring->next_to_use != i)
1279                 ixgbe_release_rx_desc(rx_ring, i);
1280 }
1281
1282 /**
1283  * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
1284  * @data: pointer to the start of the headers
1285  * @max_len: total length of section to find headers in
1286  *
1287  * This function is meant to determine the length of headers that will
1288  * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
1289  * motivation of doing this is to only perform one pull for IPv4 TCP
1290  * packets so that we can do basic things like calculating the gso_size
1291  * based on the average data per packet.
1292  **/
1293 static unsigned int ixgbe_get_headlen(unsigned char *data,
1294                                       unsigned int max_len)
1295 {
1296         union {
1297                 unsigned char *network;
1298                 /* l2 headers */
1299                 struct ethhdr *eth;
1300                 struct vlan_hdr *vlan;
1301                 /* l3 headers */
1302                 struct iphdr *ipv4;
1303                 struct ipv6hdr *ipv6;
1304         } hdr;
1305         __be16 protocol;
1306         u8 nexthdr = 0; /* default to not TCP */
1307         u8 hlen;
1308
1309         /* this should never happen, but better safe than sorry */
1310         if (max_len < ETH_HLEN)
1311                 return max_len;
1312
1313         /* initialize network frame pointer */
1314         hdr.network = data;
1315
1316         /* set first protocol and move network header forward */
1317         protocol = hdr.eth->h_proto;
1318         hdr.network += ETH_HLEN;
1319
1320         /* handle any vlan tag if present */
1321         if (protocol == __constant_htons(ETH_P_8021Q)) {
1322                 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1323                         return max_len;
1324
1325                 protocol = hdr.vlan->h_vlan_encapsulated_proto;
1326                 hdr.network += VLAN_HLEN;
1327         }
1328
1329         /* handle L3 protocols */
1330         if (protocol == __constant_htons(ETH_P_IP)) {
1331                 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1332                         return max_len;
1333
1334                 /* access ihl as a u8 to avoid unaligned access on ia64 */
1335                 hlen = (hdr.network[0] & 0x0F) << 2;
1336
1337                 /* verify hlen meets minimum size requirements */
1338                 if (hlen < sizeof(struct iphdr))
1339                         return hdr.network - data;
1340
1341                 /* record next protocol if header is present */
1342                 if (!hdr.ipv4->frag_off)
1343                         nexthdr = hdr.ipv4->protocol;
1344         } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1345                 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1346                         return max_len;
1347
1348                 /* record next protocol */
1349                 nexthdr = hdr.ipv6->nexthdr;
1350                 hlen = sizeof(struct ipv6hdr);
1351 #ifdef IXGBE_FCOE
1352         } else if (protocol == __constant_htons(ETH_P_FCOE)) {
1353                 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1354                         return max_len;
1355                 hlen = FCOE_HEADER_LEN;
1356 #endif
1357         } else {
1358                 return hdr.network - data;
1359         }
1360
1361         /* relocate pointer to start of L4 header */
1362         hdr.network += hlen;
1363
1364         /* finally sort out TCP/UDP */
1365         if (nexthdr == IPPROTO_TCP) {
1366                 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1367                         return max_len;
1368
1369                 /* access doff as a u8 to avoid unaligned access on ia64 */
1370                 hlen = (hdr.network[12] & 0xF0) >> 2;
1371
1372                 /* verify hlen meets minimum size requirements */
1373                 if (hlen < sizeof(struct tcphdr))
1374                         return hdr.network - data;
1375
1376                 hdr.network += hlen;
1377         } else if (nexthdr == IPPROTO_UDP) {
1378                 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1379                         return max_len;
1380
1381                 hdr.network += sizeof(struct udphdr);
1382         }
1383
1384         /*
1385          * If everything has gone correctly hdr.network should be the
1386          * data section of the packet and will be the end of the header.
1387          * If not then it probably represents the end of the last recognized
1388          * header.
1389          */
1390         if ((hdr.network - data) < max_len)
1391                 return hdr.network - data;
1392         else
1393                 return max_len;
1394 }
1395
1396 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1397                                    struct sk_buff *skb)
1398 {
1399         u16 hdr_len = skb_headlen(skb);
1400
1401         /* set gso_size to avoid messing up TCP MSS */
1402         skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1403                                                  IXGBE_CB(skb)->append_cnt);
1404         if (skb->protocol == __constant_htons(ETH_P_IPV6))
1405                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1406         else
1407                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1408 }
1409
1410 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1411                                    struct sk_buff *skb)
1412 {
1413         /* if append_cnt is 0 then frame is not RSC */
1414         if (!IXGBE_CB(skb)->append_cnt)
1415                 return;
1416
1417         rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1418         rx_ring->rx_stats.rsc_flush++;
1419
1420         ixgbe_set_rsc_gso_size(rx_ring, skb);
1421
1422         /* gso_size is computed using append_cnt so always clear it last */
1423         IXGBE_CB(skb)->append_cnt = 0;
1424 }
1425
1426 /**
1427  * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1428  * @rx_ring: rx descriptor ring packet is being transacted on
1429  * @rx_desc: pointer to the EOP Rx descriptor
1430  * @skb: pointer to current skb being populated
1431  *
1432  * This function checks the ring, descriptor, and packet information in
1433  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
1434  * other fields within the skb.
1435  **/
1436 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1437                                      union ixgbe_adv_rx_desc *rx_desc,
1438                                      struct sk_buff *skb)
1439 {
1440         struct net_device *dev = rx_ring->netdev;
1441
1442         skb->protocol = eth_type_trans(skb, dev);
1443
1444         ixgbe_update_rsc_stats(rx_ring, skb);
1445
1446         ixgbe_rx_hash(rx_ring, rx_desc, skb);
1447
1448         ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1449
1450         ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
1451
1452         if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1453             ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1454                 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1455                 __vlan_hwaccel_put_tag(skb, vid);
1456         }
1457
1458         skb_record_rx_queue(skb, rx_ring->queue_index);
1459 }
1460
1461 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1462                          struct sk_buff *skb)
1463 {
1464         struct ixgbe_adapter *adapter = q_vector->adapter;
1465
1466         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1467                 napi_gro_receive(&q_vector->napi, skb);
1468         else
1469                 netif_rx(skb);
1470 }
1471
1472 /**
1473  * ixgbe_is_non_eop - process handling of non-EOP buffers
1474  * @rx_ring: Rx ring being processed
1475  * @rx_desc: Rx descriptor for current buffer
1476  * @skb: Current socket buffer containing buffer in progress
1477  *
1478  * This function updates next to clean.  If the buffer is an EOP buffer
1479  * this function exits returning false, otherwise it will place the
1480  * sk_buff in the next buffer to be chained and return true indicating
1481  * that this is in fact a non-EOP buffer.
1482  **/
1483 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1484                              union ixgbe_adv_rx_desc *rx_desc,
1485                              struct sk_buff *skb)
1486 {
1487         u32 ntc = rx_ring->next_to_clean + 1;
1488
1489         /* fetch, update, and store next to clean */
1490         ntc = (ntc < rx_ring->count) ? ntc : 0;
1491         rx_ring->next_to_clean = ntc;
1492
1493         prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1494
1495         /* update RSC append count if present */
1496         if (ring_is_rsc_enabled(rx_ring)) {
1497                 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1498                                      cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1499
1500                 if (unlikely(rsc_enabled)) {
1501                         u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1502
1503                         rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1504                         IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1505
1506                         /* update ntc based on RSC value */
1507                         ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1508                         ntc &= IXGBE_RXDADV_NEXTP_MASK;
1509                         ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1510                 }
1511         }
1512
1513         /* if we are the last buffer then there is nothing else to do */
1514         if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1515                 return false;
1516
1517         /* place skb in next buffer to be received */
1518         rx_ring->rx_buffer_info[ntc].skb = skb;
1519         rx_ring->rx_stats.non_eop_descs++;
1520
1521         return true;
1522 }
1523
1524 /**
1525  * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1526  * @rx_ring: rx descriptor ring packet is being transacted on
1527  * @skb: pointer to current skb being adjusted
1528  *
1529  * This function is an ixgbe specific version of __pskb_pull_tail.  The
1530  * main difference between this version and the original function is that
1531  * this function can make several assumptions about the state of things
1532  * that allow for significant optimizations versus the standard function.
1533  * As a result we can do things like drop a frag and maintain an accurate
1534  * truesize for the skb.
1535  */
1536 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1537                             struct sk_buff *skb)
1538 {
1539         struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1540         unsigned char *va;
1541         unsigned int pull_len;
1542
1543         /*
1544          * it is valid to use page_address instead of kmap since we are
1545          * working with pages allocated out of the lomem pool per
1546          * alloc_page(GFP_ATOMIC)
1547          */
1548         va = skb_frag_address(frag);
1549
1550         /*
1551          * we need the header to contain the greater of either ETH_HLEN or
1552          * 60 bytes if the skb->len is less than 60 for skb_pad.
1553          */
1554         pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1555
1556         /* align pull length to size of long to optimize memcpy performance */
1557         skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1558
1559         /* update all of the pointers */
1560         skb_frag_size_sub(frag, pull_len);
1561         frag->page_offset += pull_len;
1562         skb->data_len -= pull_len;
1563         skb->tail += pull_len;
1564 }
1565
1566 /**
1567  * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1568  * @rx_ring: rx descriptor ring packet is being transacted on
1569  * @skb: pointer to current skb being updated
1570  *
1571  * This function provides a basic DMA sync up for the first fragment of an
1572  * skb.  The reason for doing this is that the first fragment cannot be
1573  * unmapped until we have reached the end of packet descriptor for a buffer
1574  * chain.
1575  */
1576 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1577                                 struct sk_buff *skb)
1578 {
1579         /* if the page was released unmap it, else just sync our portion */
1580         if (unlikely(IXGBE_CB(skb)->page_released)) {
1581                 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1582                                ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1583                 IXGBE_CB(skb)->page_released = false;
1584         } else {
1585                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1586
1587                 dma_sync_single_range_for_cpu(rx_ring->dev,
1588                                               IXGBE_CB(skb)->dma,
1589                                               frag->page_offset,
1590                                               ixgbe_rx_bufsz(rx_ring),
1591                                               DMA_FROM_DEVICE);
1592         }
1593         IXGBE_CB(skb)->dma = 0;
1594 }
1595
1596 /**
1597  * ixgbe_cleanup_headers - Correct corrupted or empty headers
1598  * @rx_ring: rx descriptor ring packet is being transacted on
1599  * @rx_desc: pointer to the EOP Rx descriptor
1600  * @skb: pointer to current skb being fixed
1601  *
1602  * Check for corrupted packet headers caused by senders on the local L2
1603  * embedded NIC switch not setting up their Tx Descriptors right.  These
1604  * should be very rare.
1605  *
1606  * Also address the case where we are pulling data in on pages only
1607  * and as such no data is present in the skb header.
1608  *
1609  * In addition if skb is not at least 60 bytes we need to pad it so that
1610  * it is large enough to qualify as a valid Ethernet frame.
1611  *
1612  * Returns true if an error was encountered and skb was freed.
1613  **/
1614 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1615                                   union ixgbe_adv_rx_desc *rx_desc,
1616                                   struct sk_buff *skb)
1617 {
1618         struct net_device *netdev = rx_ring->netdev;
1619
1620         /* verify that the packet does not have any known errors */
1621         if (unlikely(ixgbe_test_staterr(rx_desc,
1622                                         IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1623             !(netdev->features & NETIF_F_RXALL))) {
1624                 dev_kfree_skb_any(skb);
1625                 return true;
1626         }
1627
1628         /* place header in linear portion of buffer */
1629         if (skb_is_nonlinear(skb))
1630                 ixgbe_pull_tail(rx_ring, skb);
1631
1632 #ifdef IXGBE_FCOE
1633         /* do not attempt to pad FCoE Frames as this will disrupt DDP */
1634         if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1635                 return false;
1636
1637 #endif
1638         /* if skb_pad returns an error the skb was freed */
1639         if (unlikely(skb->len < 60)) {
1640                 int pad_len = 60 - skb->len;
1641
1642                 if (skb_pad(skb, pad_len))
1643                         return true;
1644                 __skb_put(skb, pad_len);
1645         }
1646
1647         return false;
1648 }
1649
1650 /**
1651  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1652  * @rx_ring: rx descriptor ring to store buffers on
1653  * @old_buff: donor buffer to have page reused
1654  *
1655  * Synchronizes page for reuse by the adapter
1656  **/
1657 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1658                                 struct ixgbe_rx_buffer *old_buff)
1659 {
1660         struct ixgbe_rx_buffer *new_buff;
1661         u16 nta = rx_ring->next_to_alloc;
1662
1663         new_buff = &rx_ring->rx_buffer_info[nta];
1664
1665         /* update, and store next to alloc */
1666         nta++;
1667         rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1668
1669         /* transfer page from old buffer to new buffer */
1670         new_buff->page = old_buff->page;
1671         new_buff->dma = old_buff->dma;
1672         new_buff->page_offset = old_buff->page_offset;
1673
1674         /* sync the buffer for use by the device */
1675         dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1676                                          new_buff->page_offset,
1677                                          ixgbe_rx_bufsz(rx_ring),
1678                                          DMA_FROM_DEVICE);
1679 }
1680
1681 /**
1682  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1683  * @rx_ring: rx descriptor ring to transact packets on
1684  * @rx_buffer: buffer containing page to add
1685  * @rx_desc: descriptor containing length of buffer written by hardware
1686  * @skb: sk_buff to place the data into
1687  *
1688  * This function will add the data contained in rx_buffer->page to the skb.
1689  * This is done either through a direct copy if the data in the buffer is
1690  * less than the skb header size, otherwise it will just attach the page as
1691  * a frag to the skb.
1692  *
1693  * The function will then update the page offset if necessary and return
1694  * true if the buffer can be reused by the adapter.
1695  **/
1696 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1697                               struct ixgbe_rx_buffer *rx_buffer,
1698                               union ixgbe_adv_rx_desc *rx_desc,
1699                               struct sk_buff *skb)
1700 {
1701         struct page *page = rx_buffer->page;
1702         unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1703 #if (PAGE_SIZE < 8192)
1704         unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1705 #else
1706         unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1707         unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1708                                    ixgbe_rx_bufsz(rx_ring);
1709 #endif
1710
1711         if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1712                 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1713
1714                 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1715
1716                 /* we can reuse buffer as-is, just make sure it is local */
1717                 if (likely(page_to_nid(page) == numa_node_id()))
1718                         return true;
1719
1720                 /* this page cannot be reused so discard it */
1721                 put_page(page);
1722                 return false;
1723         }
1724
1725         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1726                         rx_buffer->page_offset, size, truesize);
1727
1728         /* avoid re-using remote pages */
1729         if (unlikely(page_to_nid(page) != numa_node_id()))
1730                 return false;
1731
1732 #if (PAGE_SIZE < 8192)
1733         /* if we are only owner of page we can reuse it */
1734         if (unlikely(page_count(page) != 1))
1735                 return false;
1736
1737         /* flip page offset to other buffer */
1738         rx_buffer->page_offset ^= truesize;
1739
1740         /*
1741          * since we are the only owner of the page and we need to
1742          * increment it, just set the value to 2 in order to avoid
1743          * an unecessary locked operation
1744          */
1745         atomic_set(&page->_count, 2);
1746 #else
1747         /* move offset up to the next cache line */
1748         rx_buffer->page_offset += truesize;
1749
1750         if (rx_buffer->page_offset > last_offset)
1751                 return false;
1752
1753         /* bump ref count on page before it is given to the stack */
1754         get_page(page);
1755 #endif
1756
1757         return true;
1758 }
1759
1760 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1761                                              union ixgbe_adv_rx_desc *rx_desc)
1762 {
1763         struct ixgbe_rx_buffer *rx_buffer;
1764         struct sk_buff *skb;
1765         struct page *page;
1766
1767         rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1768         page = rx_buffer->page;
1769         prefetchw(page);
1770
1771         skb = rx_buffer->skb;
1772
1773         if (likely(!skb)) {
1774                 void *page_addr = page_address(page) +
1775                                   rx_buffer->page_offset;
1776
1777                 /* prefetch first cache line of first page */
1778                 prefetch(page_addr);
1779 #if L1_CACHE_BYTES < 128
1780                 prefetch(page_addr + L1_CACHE_BYTES);
1781 #endif
1782
1783                 /* allocate a skb to store the frags */
1784                 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1785                                                 IXGBE_RX_HDR_SIZE);
1786                 if (unlikely(!skb)) {
1787                         rx_ring->rx_stats.alloc_rx_buff_failed++;
1788                         return NULL;
1789                 }
1790
1791                 /*
1792                  * we will be copying header into skb->data in
1793                  * pskb_may_pull so it is in our interest to prefetch
1794                  * it now to avoid a possible cache miss
1795                  */
1796                 prefetchw(skb->data);
1797
1798                 /*
1799                  * Delay unmapping of the first packet. It carries the
1800                  * header information, HW may still access the header
1801                  * after the writeback.  Only unmap it when EOP is
1802                  * reached
1803                  */
1804                 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1805                         goto dma_sync;
1806
1807                 IXGBE_CB(skb)->dma = rx_buffer->dma;
1808         } else {
1809                 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1810                         ixgbe_dma_sync_frag(rx_ring, skb);
1811
1812 dma_sync:
1813                 /* we are reusing so sync this buffer for CPU use */
1814                 dma_sync_single_range_for_cpu(rx_ring->dev,
1815                                               rx_buffer->dma,
1816                                               rx_buffer->page_offset,
1817                                               ixgbe_rx_bufsz(rx_ring),
1818                                               DMA_FROM_DEVICE);
1819         }
1820
1821         /* pull page into skb */
1822         if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1823                 /* hand second half of page back to the ring */
1824                 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1825         } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1826                 /* the page has been released from the ring */
1827                 IXGBE_CB(skb)->page_released = true;
1828         } else {
1829                 /* we are not reusing the buffer so unmap it */
1830                 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1831                                ixgbe_rx_pg_size(rx_ring),
1832                                DMA_FROM_DEVICE);
1833         }
1834
1835         /* clear contents of buffer_info */
1836         rx_buffer->skb = NULL;
1837         rx_buffer->dma = 0;
1838         rx_buffer->page = NULL;
1839
1840         return skb;
1841 }
1842
1843 /**
1844  * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1845  * @q_vector: structure containing interrupt and ring information
1846  * @rx_ring: rx descriptor ring to transact packets on
1847  * @budget: Total limit on number of packets to process
1848  *
1849  * This function provides a "bounce buffer" approach to Rx interrupt
1850  * processing.  The advantage to this is that on systems that have
1851  * expensive overhead for IOMMU access this provides a means of avoiding
1852  * it by maintaining the mapping of the page to the syste.
1853  *
1854  * Returns true if all work is completed without reaching budget
1855  **/
1856 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1857                                struct ixgbe_ring *rx_ring,
1858                                const int budget)
1859 {
1860         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1861 #ifdef IXGBE_FCOE
1862         struct ixgbe_adapter *adapter = q_vector->adapter;
1863         int ddp_bytes;
1864         unsigned int mss = 0;
1865 #endif /* IXGBE_FCOE */
1866         u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1867
1868         do {
1869                 union ixgbe_adv_rx_desc *rx_desc;
1870                 struct sk_buff *skb;
1871
1872                 /* return some buffers to hardware, one at a time is too slow */
1873                 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1874                         ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1875                         cleaned_count = 0;
1876                 }
1877
1878                 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1879
1880                 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
1881                         break;
1882
1883                 /*
1884                  * This memory barrier is needed to keep us from reading
1885                  * any other fields out of the rx_desc until we know the
1886                  * RXD_STAT_DD bit is set
1887                  */
1888                 rmb();
1889
1890                 /* retrieve a buffer from the ring */
1891                 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
1892
1893                 /* exit if we failed to retrieve a buffer */
1894                 if (!skb)
1895                         break;
1896
1897                 cleaned_count++;
1898
1899                 /* place incomplete frames back on ring for completion */
1900                 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
1901                         continue;
1902
1903                 /* verify the packet layout is correct */
1904                 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
1905                         continue;
1906
1907                 /* probably a little skewed due to removing CRC */
1908                 total_rx_bytes += skb->len;
1909
1910                 /* populate checksum, timestamp, VLAN, and protocol */
1911                 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
1912
1913 #ifdef IXGBE_FCOE
1914                 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1915                 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
1916                         ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1917                         /* include DDPed FCoE data */
1918                         if (ddp_bytes > 0) {
1919                                 if (!mss) {
1920                                         mss = rx_ring->netdev->mtu -
1921                                                 sizeof(struct fcoe_hdr) -
1922                                                 sizeof(struct fc_frame_header) -
1923                                                 sizeof(struct fcoe_crc_eof);
1924                                         if (mss > 512)
1925                                                 mss &= ~511;
1926                                 }
1927                                 total_rx_bytes += ddp_bytes;
1928                                 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
1929                                                                  mss);
1930                         }
1931                         if (!ddp_bytes) {
1932                                 dev_kfree_skb_any(skb);
1933                                 continue;
1934                         }
1935                 }
1936
1937 #endif /* IXGBE_FCOE */
1938                 ixgbe_rx_skb(q_vector, skb);
1939
1940                 /* update budget accounting */
1941                 total_rx_packets++;
1942         } while (likely(total_rx_packets < budget));
1943
1944         u64_stats_update_begin(&rx_ring->syncp);
1945         rx_ring->stats.packets += total_rx_packets;
1946         rx_ring->stats.bytes += total_rx_bytes;
1947         u64_stats_update_end(&rx_ring->syncp);
1948         q_vector->rx.total_packets += total_rx_packets;
1949         q_vector->rx.total_bytes += total_rx_bytes;
1950
1951         if (cleaned_count)
1952                 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1953
1954         return (total_rx_packets < budget);
1955 }
1956
1957 /**
1958  * ixgbe_configure_msix - Configure MSI-X hardware
1959  * @adapter: board private structure
1960  *
1961  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1962  * interrupts.
1963  **/
1964 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1965 {
1966         struct ixgbe_q_vector *q_vector;
1967         int v_idx;
1968         u32 mask;
1969
1970         /* Populate MSIX to EITR Select */
1971         if (adapter->num_vfs > 32) {
1972                 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1973                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1974         }
1975
1976         /*
1977          * Populate the IVAR table and set the ITR values to the
1978          * corresponding register.
1979          */
1980         for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1981                 struct ixgbe_ring *ring;
1982                 q_vector = adapter->q_vector[v_idx];
1983
1984                 ixgbe_for_each_ring(ring, q_vector->rx)
1985                         ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1986
1987                 ixgbe_for_each_ring(ring, q_vector->tx)
1988                         ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1989
1990                 ixgbe_write_eitr(q_vector);
1991         }
1992
1993         switch (adapter->hw.mac.type) {
1994         case ixgbe_mac_82598EB:
1995                 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1996                                v_idx);
1997                 break;
1998         case ixgbe_mac_82599EB:
1999         case ixgbe_mac_X540:
2000                 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2001                 break;
2002         default:
2003                 break;
2004         }
2005         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2006
2007         /* set up to autoclear timer, and the vectors */
2008         mask = IXGBE_EIMS_ENABLE_MASK;
2009         mask &= ~(IXGBE_EIMS_OTHER |
2010                   IXGBE_EIMS_MAILBOX |
2011                   IXGBE_EIMS_LSC);
2012
2013         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2014 }
2015
2016 enum latency_range {
2017         lowest_latency = 0,
2018         low_latency = 1,
2019         bulk_latency = 2,
2020         latency_invalid = 255
2021 };
2022
2023 /**
2024  * ixgbe_update_itr - update the dynamic ITR value based on statistics
2025  * @q_vector: structure containing interrupt and ring information
2026  * @ring_container: structure containing ring performance data
2027  *
2028  *      Stores a new ITR value based on packets and byte
2029  *      counts during the last interrupt.  The advantage of per interrupt
2030  *      computation is faster updates and more accurate ITR for the current
2031  *      traffic pattern.  Constants in this function were computed
2032  *      based on theoretical maximum wire speed and thresholds were set based
2033  *      on testing data as well as attempting to minimize response time
2034  *      while increasing bulk throughput.
2035  *      this functionality is controlled by the InterruptThrottleRate module
2036  *      parameter (see ixgbe_param.c)
2037  **/
2038 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2039                              struct ixgbe_ring_container *ring_container)
2040 {
2041         int bytes = ring_container->total_bytes;
2042         int packets = ring_container->total_packets;
2043         u32 timepassed_us;
2044         u64 bytes_perint;
2045         u8 itr_setting = ring_container->itr;
2046
2047         if (packets == 0)
2048                 return;
2049
2050         /* simple throttlerate management
2051          *   0-10MB/s   lowest (100000 ints/s)
2052          *  10-20MB/s   low    (20000 ints/s)
2053          *  20-1249MB/s bulk   (8000 ints/s)
2054          */
2055         /* what was last interrupt timeslice? */
2056         timepassed_us = q_vector->itr >> 2;
2057         bytes_perint = bytes / timepassed_us; /* bytes/usec */
2058
2059         switch (itr_setting) {
2060         case lowest_latency:
2061                 if (bytes_perint > 10)
2062                         itr_setting = low_latency;
2063                 break;
2064         case low_latency:
2065                 if (bytes_perint > 20)
2066                         itr_setting = bulk_latency;
2067                 else if (bytes_perint <= 10)
2068                         itr_setting = lowest_latency;
2069                 break;
2070         case bulk_latency:
2071                 if (bytes_perint <= 20)
2072                         itr_setting = low_latency;
2073                 break;
2074         }
2075
2076         /* clear work counters since we have the values we need */
2077         ring_container->total_bytes = 0;
2078         ring_container->total_packets = 0;
2079
2080         /* write updated itr to ring container */
2081         ring_container->itr = itr_setting;
2082 }
2083
2084 /**
2085  * ixgbe_write_eitr - write EITR register in hardware specific way
2086  * @q_vector: structure containing interrupt and ring information
2087  *
2088  * This function is made to be called by ethtool and by the driver
2089  * when it needs to update EITR registers at runtime.  Hardware
2090  * specific quirks/differences are taken care of here.
2091  */
2092 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2093 {
2094         struct ixgbe_adapter *adapter = q_vector->adapter;
2095         struct ixgbe_hw *hw = &adapter->hw;
2096         int v_idx = q_vector->v_idx;
2097         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2098
2099         switch (adapter->hw.mac.type) {
2100         case ixgbe_mac_82598EB:
2101                 /* must write high and low 16 bits to reset counter */
2102                 itr_reg |= (itr_reg << 16);
2103                 break;
2104         case ixgbe_mac_82599EB:
2105         case ixgbe_mac_X540:
2106                 /*
2107                  * set the WDIS bit to not clear the timer bits and cause an
2108                  * immediate assertion of the interrupt
2109                  */
2110                 itr_reg |= IXGBE_EITR_CNT_WDIS;
2111                 break;
2112         default:
2113                 break;
2114         }
2115         IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2116 }
2117
2118 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2119 {
2120         u32 new_itr = q_vector->itr;
2121         u8 current_itr;
2122
2123         ixgbe_update_itr(q_vector, &q_vector->tx);
2124         ixgbe_update_itr(q_vector, &q_vector->rx);
2125
2126         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2127
2128         switch (current_itr) {
2129         /* counts and packets in update_itr are dependent on these numbers */
2130         case lowest_latency:
2131                 new_itr = IXGBE_100K_ITR;
2132                 break;
2133         case low_latency:
2134                 new_itr = IXGBE_20K_ITR;
2135                 break;
2136         case bulk_latency:
2137                 new_itr = IXGBE_8K_ITR;
2138                 break;
2139         default:
2140                 break;
2141         }
2142
2143         if (new_itr != q_vector->itr) {
2144                 /* do an exponential smoothing */
2145                 new_itr = (10 * new_itr * q_vector->itr) /
2146                           ((9 * new_itr) + q_vector->itr);
2147
2148                 /* save the algorithm value here */
2149                 q_vector->itr = new_itr;
2150
2151                 ixgbe_write_eitr(q_vector);
2152         }
2153 }
2154
2155 /**
2156  * ixgbe_check_overtemp_subtask - check for over temperature
2157  * @adapter: pointer to adapter
2158  **/
2159 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2160 {
2161         struct ixgbe_hw *hw = &adapter->hw;
2162         u32 eicr = adapter->interrupt_event;
2163
2164         if (test_bit(__IXGBE_DOWN, &adapter->state))
2165                 return;
2166
2167         if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2168             !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2169                 return;
2170
2171         adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2172
2173         switch (hw->device_id) {
2174         case IXGBE_DEV_ID_82599_T3_LOM:
2175                 /*
2176                  * Since the warning interrupt is for both ports
2177                  * we don't have to check if:
2178                  *  - This interrupt wasn't for our port.
2179                  *  - We may have missed the interrupt so always have to
2180                  *    check if we  got a LSC
2181                  */
2182                 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
2183                     !(eicr & IXGBE_EICR_LSC))
2184                         return;
2185
2186                 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2187                         u32 autoneg;
2188                         bool link_up = false;
2189
2190                         hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2191
2192                         if (link_up)
2193                                 return;
2194                 }
2195
2196                 /* Check if this is not due to overtemp */
2197                 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2198                         return;
2199
2200                 break;
2201         default:
2202                 if (!(eicr & IXGBE_EICR_GPI_SDP0))
2203                         return;
2204                 break;
2205         }
2206         e_crit(drv,
2207                "Network adapter has been stopped because it has over heated. "
2208                "Restart the computer. If the problem persists, "
2209                "power off the system and replace the adapter\n");
2210
2211         adapter->interrupt_event = 0;
2212 }
2213
2214 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2215 {
2216         struct ixgbe_hw *hw = &adapter->hw;
2217
2218         if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2219             (eicr & IXGBE_EICR_GPI_SDP1)) {
2220                 e_crit(probe, "Fan has stopped, replace the adapter\n");
2221                 /* write to clear the interrupt */
2222                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2223         }
2224 }
2225
2226 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2227 {
2228         if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2229                 return;
2230
2231         switch (adapter->hw.mac.type) {
2232         case ixgbe_mac_82599EB:
2233                 /*
2234                  * Need to check link state so complete overtemp check
2235                  * on service task
2236                  */
2237                 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2238                     (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2239                         adapter->interrupt_event = eicr;
2240                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2241                         ixgbe_service_event_schedule(adapter);
2242                         return;
2243                 }
2244                 return;
2245         case ixgbe_mac_X540:
2246                 if (!(eicr & IXGBE_EICR_TS))
2247                         return;
2248                 break;
2249         default:
2250                 return;
2251         }
2252
2253         e_crit(drv,
2254                "Network adapter has been stopped because it has over heated. "
2255                "Restart the computer. If the problem persists, "
2256                "power off the system and replace the adapter\n");
2257 }
2258
2259 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2260 {
2261         struct ixgbe_hw *hw = &adapter->hw;
2262
2263         if (eicr & IXGBE_EICR_GPI_SDP2) {
2264                 /* Clear the interrupt */
2265                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2266                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2267                         adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2268                         ixgbe_service_event_schedule(adapter);
2269                 }
2270         }
2271
2272         if (eicr & IXGBE_EICR_GPI_SDP1) {
2273                 /* Clear the interrupt */
2274                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2275                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2276                         adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2277                         ixgbe_service_event_schedule(adapter);
2278                 }
2279         }
2280 }
2281
2282 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2283 {
2284         struct ixgbe_hw *hw = &adapter->hw;
2285
2286         adapter->lsc_int++;
2287         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2288         adapter->link_check_timeout = jiffies;
2289         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2290                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2291                 IXGBE_WRITE_FLUSH(hw);
2292                 ixgbe_service_event_schedule(adapter);
2293         }
2294 }
2295
2296 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2297                                            u64 qmask)
2298 {
2299         u32 mask;
2300         struct ixgbe_hw *hw = &adapter->hw;
2301
2302         switch (hw->mac.type) {
2303         case ixgbe_mac_82598EB:
2304                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2305                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2306                 break;
2307         case ixgbe_mac_82599EB:
2308         case ixgbe_mac_X540:
2309                 mask = (qmask & 0xFFFFFFFF);
2310                 if (mask)
2311                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2312                 mask = (qmask >> 32);
2313                 if (mask)
2314                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2315                 break;
2316         default:
2317                 break;
2318         }
2319         /* skip the flush */
2320 }
2321
2322 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2323                                             u64 qmask)
2324 {
2325         u32 mask;
2326         struct ixgbe_hw *hw = &adapter->hw;
2327
2328         switch (hw->mac.type) {
2329         case ixgbe_mac_82598EB:
2330                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2331                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2332                 break;
2333         case ixgbe_mac_82599EB:
2334         case ixgbe_mac_X540:
2335                 mask = (qmask & 0xFFFFFFFF);
2336                 if (mask)
2337                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2338                 mask = (qmask >> 32);
2339                 if (mask)
2340                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2341                 break;
2342         default:
2343                 break;
2344         }
2345         /* skip the flush */
2346 }
2347
2348 /**
2349  * ixgbe_irq_enable - Enable default interrupt generation settings
2350  * @adapter: board private structure
2351  **/
2352 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2353                                     bool flush)
2354 {
2355         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2356
2357         /* don't reenable LSC while waiting for link */
2358         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2359                 mask &= ~IXGBE_EIMS_LSC;
2360
2361         if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2362                 switch (adapter->hw.mac.type) {
2363                 case ixgbe_mac_82599EB:
2364                         mask |= IXGBE_EIMS_GPI_SDP0;
2365                         break;
2366                 case ixgbe_mac_X540:
2367                         mask |= IXGBE_EIMS_TS;
2368                         break;
2369                 default:
2370                         break;
2371                 }
2372         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2373                 mask |= IXGBE_EIMS_GPI_SDP1;
2374         switch (adapter->hw.mac.type) {
2375         case ixgbe_mac_82599EB:
2376                 mask |= IXGBE_EIMS_GPI_SDP1;
2377                 mask |= IXGBE_EIMS_GPI_SDP2;
2378         case ixgbe_mac_X540:
2379                 mask |= IXGBE_EIMS_ECC;
2380                 mask |= IXGBE_EIMS_MAILBOX;
2381                 break;
2382         default:
2383                 break;
2384         }
2385
2386         if (adapter->hw.mac.type == ixgbe_mac_X540)
2387                 mask |= IXGBE_EIMS_TIMESYNC;
2388
2389         if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2390             !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2391                 mask |= IXGBE_EIMS_FLOW_DIR;
2392
2393         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2394         if (queues)
2395                 ixgbe_irq_enable_queues(adapter, ~0);
2396         if (flush)
2397                 IXGBE_WRITE_FLUSH(&adapter->hw);
2398 }
2399
2400 static irqreturn_t ixgbe_msix_other(int irq, void *data)
2401 {
2402         struct ixgbe_adapter *adapter = data;
2403         struct ixgbe_hw *hw = &adapter->hw;
2404         u32 eicr;
2405
2406         /*
2407          * Workaround for Silicon errata.  Use clear-by-write instead
2408          * of clear-by-read.  Reading with EICS will return the
2409          * interrupt causes without clearing, which later be done
2410          * with the write to EICR.
2411          */
2412         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2413         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2414
2415         if (eicr & IXGBE_EICR_LSC)
2416                 ixgbe_check_lsc(adapter);
2417
2418         if (eicr & IXGBE_EICR_MAILBOX)
2419                 ixgbe_msg_task(adapter);
2420
2421         switch (hw->mac.type) {
2422         case ixgbe_mac_82599EB:
2423         case ixgbe_mac_X540:
2424                 if (eicr & IXGBE_EICR_ECC)
2425                         e_info(link, "Received unrecoverable ECC Err, please "
2426                                "reboot\n");
2427                 /* Handle Flow Director Full threshold interrupt */
2428                 if (eicr & IXGBE_EICR_FLOW_DIR) {
2429                         int reinit_count = 0;
2430                         int i;
2431                         for (i = 0; i < adapter->num_tx_queues; i++) {
2432                                 struct ixgbe_ring *ring = adapter->tx_ring[i];
2433                                 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2434                                                        &ring->state))
2435                                         reinit_count++;
2436                         }
2437                         if (reinit_count) {
2438                                 /* no more flow director interrupts until after init */
2439                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2440                                 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2441                                 ixgbe_service_event_schedule(adapter);
2442                         }
2443                 }
2444                 ixgbe_check_sfp_event(adapter, eicr);
2445                 ixgbe_check_overtemp_event(adapter, eicr);
2446                 break;
2447         default:
2448                 break;
2449         }
2450
2451         ixgbe_check_fan_failure(adapter, eicr);
2452
2453         if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2454                 ixgbe_ptp_check_pps_event(adapter, eicr);
2455
2456         /* re-enable the original interrupt state, no lsc, no queues */
2457         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2458                 ixgbe_irq_enable(adapter, false, false);
2459
2460         return IRQ_HANDLED;
2461 }
2462
2463 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2464 {
2465         struct ixgbe_q_vector *q_vector = data;
2466
2467         /* EIAM disabled interrupts (on this vector) for us */
2468
2469         if (q_vector->rx.ring || q_vector->tx.ring)
2470                 napi_schedule(&q_vector->napi);
2471
2472         return IRQ_HANDLED;
2473 }
2474
2475 /**
2476  * ixgbe_poll - NAPI Rx polling callback
2477  * @napi: structure for representing this polling device
2478  * @budget: how many packets driver is allowed to clean
2479  *
2480  * This function is used for legacy and MSI, NAPI mode
2481  **/
2482 int ixgbe_poll(struct napi_struct *napi, int budget)
2483 {
2484         struct ixgbe_q_vector *q_vector =
2485                                 container_of(napi, struct ixgbe_q_vector, napi);
2486         struct ixgbe_adapter *adapter = q_vector->adapter;
2487         struct ixgbe_ring *ring;
2488         int per_ring_budget;
2489         bool clean_complete = true;
2490
2491 #ifdef CONFIG_IXGBE_DCA
2492         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2493                 ixgbe_update_dca(q_vector);
2494 #endif
2495
2496         ixgbe_for_each_ring(ring, q_vector->tx)
2497                 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2498
2499         /* attempt to distribute budget to each queue fairly, but don't allow
2500          * the budget to go below 1 because we'll exit polling */
2501         if (q_vector->rx.count > 1)
2502                 per_ring_budget = max(budget/q_vector->rx.count, 1);
2503         else
2504                 per_ring_budget = budget;
2505
2506         ixgbe_for_each_ring(ring, q_vector->rx)
2507                 clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
2508                                                      per_ring_budget);
2509
2510         /* If all work not completed, return budget and keep polling */
2511         if (!clean_complete)
2512                 return budget;
2513
2514         /* all work done, exit the polling mode */
2515         napi_complete(napi);
2516         if (adapter->rx_itr_setting & 1)
2517                 ixgbe_set_itr(q_vector);
2518         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2519                 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2520
2521         return 0;
2522 }
2523
2524 /**
2525  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2526  * @adapter: board private structure
2527  *
2528  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2529  * interrupts from the kernel.
2530  **/
2531 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2532 {
2533         struct net_device *netdev = adapter->netdev;
2534         int vector, err;
2535         int ri = 0, ti = 0;
2536
2537         for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2538                 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2539                 struct msix_entry *entry = &adapter->msix_entries[vector];
2540
2541                 if (q_vector->tx.ring && q_vector->rx.ring) {
2542                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2543                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
2544                         ti++;
2545                 } else if (q_vector->rx.ring) {
2546                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2547                                  "%s-%s-%d", netdev->name, "rx", ri++);
2548                 } else if (q_vector->tx.ring) {
2549                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2550                                  "%s-%s-%d", netdev->name, "tx", ti++);
2551                 } else {
2552                         /* skip this unused q_vector */
2553                         continue;
2554                 }
2555                 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2556                                   q_vector->name, q_vector);
2557                 if (err) {
2558                         e_err(probe, "request_irq failed for MSIX interrupt "
2559                               "Error: %d\n", err);
2560                         goto free_queue_irqs;
2561                 }
2562                 /* If Flow Director is enabled, set interrupt affinity */
2563                 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2564                         /* assign the mask for this irq */
2565                         irq_set_affinity_hint(entry->vector,
2566                                               &q_vector->affinity_mask);
2567                 }
2568         }
2569
2570         err = request_irq(adapter->msix_entries[vector].vector,
2571                           ixgbe_msix_other, 0, netdev->name, adapter);
2572         if (err) {
2573                 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2574                 goto free_queue_irqs;
2575         }
2576
2577         return 0;
2578
2579 free_queue_irqs:
2580         while (vector) {
2581                 vector--;
2582                 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2583                                       NULL);
2584                 free_irq(adapter->msix_entries[vector].vector,
2585                          adapter->q_vector[vector]);
2586         }
2587         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2588         pci_disable_msix(adapter->pdev);
2589         kfree(adapter->msix_entries);
2590         adapter->msix_entries = NULL;
2591         return err;
2592 }
2593
2594 /**
2595  * ixgbe_intr - legacy mode Interrupt Handler
2596  * @irq: interrupt number
2597  * @data: pointer to a network interface device structure
2598  **/
2599 static irqreturn_t ixgbe_intr(int irq, void *data)
2600 {
2601         struct ixgbe_adapter *adapter = data;
2602         struct ixgbe_hw *hw = &adapter->hw;
2603         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2604         u32 eicr;
2605
2606         /*
2607          * Workaround for silicon errata #26 on 82598.  Mask the interrupt
2608          * before the read of EICR.
2609          */
2610         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2611
2612         /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2613          * therefore no explicit interrupt disable is necessary */
2614         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2615         if (!eicr) {
2616                 /*
2617                  * shared interrupt alert!
2618                  * make sure interrupts are enabled because the read will
2619                  * have disabled interrupts due to EIAM
2620                  * finish the workaround of silicon errata on 82598.  Unmask
2621                  * the interrupt that we masked before the EICR read.
2622                  */
2623                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2624                         ixgbe_irq_enable(adapter, true, true);
2625                 return IRQ_NONE;        /* Not our interrupt */
2626         }
2627
2628         if (eicr & IXGBE_EICR_LSC)
2629                 ixgbe_check_lsc(adapter);
2630
2631         switch (hw->mac.type) {
2632         case ixgbe_mac_82599EB:
2633                 ixgbe_check_sfp_event(adapter, eicr);
2634                 /* Fall through */
2635         case ixgbe_mac_X540:
2636                 if (eicr & IXGBE_EICR_ECC)
2637                         e_info(link, "Received unrecoverable ECC err, please "
2638                                      "reboot\n");
2639                 ixgbe_check_overtemp_event(adapter, eicr);
2640                 break;
2641         default:
2642                 break;
2643         }
2644
2645         ixgbe_check_fan_failure(adapter, eicr);
2646         if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2647                 ixgbe_ptp_check_pps_event(adapter, eicr);
2648
2649         /* would disable interrupts here but EIAM disabled it */
2650         napi_schedule(&q_vector->napi);
2651
2652         /*
2653          * re-enable link(maybe) and non-queue interrupts, no flush.
2654          * ixgbe_poll will re-enable the queue interrupts
2655          */
2656         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2657                 ixgbe_irq_enable(adapter, false, false);
2658
2659         return IRQ_HANDLED;
2660 }
2661
2662 /**
2663  * ixgbe_request_irq - initialize interrupts
2664  * @adapter: board private structure
2665  *
2666  * Attempts to configure interrupts using the best available
2667  * capabilities of the hardware and kernel.
2668  **/
2669 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2670 {
2671         struct net_device *netdev = adapter->netdev;
2672         int err;
2673
2674         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2675                 err = ixgbe_request_msix_irqs(adapter);
2676         else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2677                 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2678                                   netdev->name, adapter);
2679         else
2680                 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2681                                   netdev->name, adapter);
2682
2683         if (err)
2684                 e_err(probe, "request_irq failed, Error %d\n", err);
2685
2686         return err;
2687 }
2688
2689 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2690 {
2691         int vector;
2692
2693         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2694                 free_irq(adapter->pdev->irq, adapter);
2695                 return;
2696         }
2697
2698         for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2699                 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2700                 struct msix_entry *entry = &adapter->msix_entries[vector];
2701
2702                 /* free only the irqs that were actually requested */
2703                 if (!q_vector->rx.ring && !q_vector->tx.ring)
2704                         continue;
2705
2706                 /* clear the affinity_mask in the IRQ descriptor */
2707                 irq_set_affinity_hint(entry->vector, NULL);
2708
2709                 free_irq(entry->vector, q_vector);
2710         }
2711
2712         free_irq(adapter->msix_entries[vector++].vector, adapter);
2713 }
2714
2715 /**
2716  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2717  * @adapter: board private structure
2718  **/
2719 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2720 {
2721         switch (adapter->hw.mac.type) {
2722         case ixgbe_mac_82598EB:
2723                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2724                 break;
2725         case ixgbe_mac_82599EB:
2726         case ixgbe_mac_X540:
2727                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2728                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2729                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2730                 break;
2731         default:
2732                 break;
2733         }
2734         IXGBE_WRITE_FLUSH(&adapter->hw);
2735         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2736                 int vector;
2737
2738                 for (vector = 0; vector < adapter->num_q_vectors; vector++)
2739                         synchronize_irq(adapter->msix_entries[vector].vector);
2740
2741                 synchronize_irq(adapter->msix_entries[vector++].vector);
2742         } else {
2743                 synchronize_irq(adapter->pdev->irq);
2744         }
2745 }
2746
2747 /**
2748  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2749  *
2750  **/
2751 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2752 {
2753         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2754
2755         ixgbe_write_eitr(q_vector);
2756
2757         ixgbe_set_ivar(adapter, 0, 0, 0);
2758         ixgbe_set_ivar(adapter, 1, 0, 0);
2759
2760         e_info(hw, "Legacy interrupt IVAR setup done\n");
2761 }
2762
2763 /**
2764  * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2765  * @adapter: board private structure
2766  * @ring: structure containing ring specific data
2767  *
2768  * Configure the Tx descriptor ring after a reset.
2769  **/
2770 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2771                              struct ixgbe_ring *ring)
2772 {
2773         struct ixgbe_hw *hw = &adapter->hw;
2774         u64 tdba = ring->dma;
2775         int wait_loop = 10;
2776         u32 txdctl = IXGBE_TXDCTL_ENABLE;
2777         u8 reg_idx = ring->reg_idx;
2778
2779         /* disable queue to avoid issues while updating state */
2780         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
2781         IXGBE_WRITE_FLUSH(hw);
2782
2783         IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2784                         (tdba & DMA_BIT_MASK(32)));
2785         IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2786         IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2787                         ring->count * sizeof(union ixgbe_adv_tx_desc));
2788         IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2789         IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2790         ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2791
2792         /*
2793          * set WTHRESH to encourage burst writeback, it should not be set
2794          * higher than 1 when ITR is 0 as it could cause false TX hangs
2795          *
2796          * In order to avoid issues WTHRESH + PTHRESH should always be equal
2797          * to or less than the number of on chip descriptors, which is
2798          * currently 40.
2799          */
2800         if (!ring->q_vector || (ring->q_vector->itr < 8))
2801                 txdctl |= (1 << 16);    /* WTHRESH = 1 */
2802         else
2803                 txdctl |= (8 << 16);    /* WTHRESH = 8 */
2804
2805         /*
2806          * Setting PTHRESH to 32 both improves performance
2807          * and avoids a TX hang with DFP enabled
2808          */
2809         txdctl |= (1 << 8) |    /* HTHRESH = 1 */
2810                    32;          /* PTHRESH = 32 */
2811
2812         /* reinitialize flowdirector state */
2813         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2814                 ring->atr_sample_rate = adapter->atr_sample_rate;
2815                 ring->atr_count = 0;
2816                 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2817         } else {
2818                 ring->atr_sample_rate = 0;
2819         }
2820
2821         clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2822
2823         /* enable queue */
2824         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2825
2826         /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2827         if (hw->mac.type == ixgbe_mac_82598EB &&
2828             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2829                 return;
2830
2831         /* poll to verify queue is enabled */
2832         do {
2833                 usleep_range(1000, 2000);
2834                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2835         } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2836         if (!wait_loop)
2837                 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2838 }
2839
2840 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2841 {
2842         struct ixgbe_hw *hw = &adapter->hw;
2843         u32 rttdcs, mtqc;
2844         u8 tcs = netdev_get_num_tc(adapter->netdev);
2845
2846         if (hw->mac.type == ixgbe_mac_82598EB)
2847                 return;
2848
2849         /* disable the arbiter while setting MTQC */
2850         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2851         rttdcs |= IXGBE_RTTDCS_ARBDIS;
2852         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2853
2854         /* set transmit pool layout */
2855         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2856                 mtqc = IXGBE_MTQC_VT_ENA;
2857                 if (tcs > 4)
2858                         mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2859                 else if (tcs > 1)
2860                         mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2861                 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
2862                         mtqc |= IXGBE_MTQC_32VF;
2863                 else
2864                         mtqc |= IXGBE_MTQC_64VF;
2865         } else {
2866                 if (tcs > 4)
2867                         mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2868                 else if (tcs > 1)
2869                         mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2870                 else
2871                         mtqc = IXGBE_MTQC_64Q_1PB;
2872         }
2873
2874         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
2875
2876         /* Enable Security TX Buffer IFG for multiple pb */
2877         if (tcs) {
2878                 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2879                 sectx |= IXGBE_SECTX_DCB;
2880                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
2881         }
2882
2883         /* re-enable the arbiter */
2884         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2885         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2886 }
2887
2888 /**
2889  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2890  * @adapter: board private structure
2891  *
2892  * Configure the Tx unit of the MAC after a reset.
2893  **/
2894 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2895 {
2896         struct ixgbe_hw *hw = &adapter->hw;
2897         u32 dmatxctl;
2898         u32 i;
2899
2900         ixgbe_setup_mtqc(adapter);
2901
2902         if (hw->mac.type != ixgbe_mac_82598EB) {
2903                 /* DMATXCTL.EN must be before Tx queues are enabled */
2904                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2905                 dmatxctl |= IXGBE_DMATXCTL_TE;
2906                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2907         }
2908
2909         /* Setup the HW Tx Head and Tail descriptor pointers */
2910         for (i = 0; i < adapter->num_tx_queues; i++)
2911                 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2912 }
2913
2914 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
2915                                  struct ixgbe_ring *ring)
2916 {
2917         struct ixgbe_hw *hw = &adapter->hw;
2918         u8 reg_idx = ring->reg_idx;
2919         u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2920
2921         srrctl |= IXGBE_SRRCTL_DROP_EN;
2922
2923         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2924 }
2925
2926 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
2927                                   struct ixgbe_ring *ring)
2928 {
2929         struct ixgbe_hw *hw = &adapter->hw;
2930         u8 reg_idx = ring->reg_idx;
2931         u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2932
2933         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2934
2935         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2936 }
2937
2938 #ifdef CONFIG_IXGBE_DCB
2939 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2940 #else
2941 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2942 #endif
2943 {
2944         int i;
2945         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
2946
2947         if (adapter->ixgbe_ieee_pfc)
2948                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
2949
2950         /*
2951          * We should set the drop enable bit if:
2952          *  SR-IOV is enabled
2953          *   or
2954          *  Number of Rx queues > 1 and flow control is disabled
2955          *
2956          *  This allows us to avoid head of line blocking for security
2957          *  and performance reasons.
2958          */
2959         if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
2960             !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
2961                 for (i = 0; i < adapter->num_rx_queues; i++)
2962                         ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
2963         } else {
2964                 for (i = 0; i < adapter->num_rx_queues; i++)
2965                         ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
2966         }
2967 }
2968
2969 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2970
2971 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2972                                    struct ixgbe_ring *rx_ring)
2973 {
2974         struct ixgbe_hw *hw = &adapter->hw;
2975         u32 srrctl;
2976         u8 reg_idx = rx_ring->reg_idx;
2977
2978         if (hw->mac.type == ixgbe_mac_82598EB) {
2979                 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
2980
2981                 /*
2982                  * if VMDq is not active we must program one srrctl register
2983                  * per RSS queue since we have enabled RDRXCTL.MVMEN
2984                  */
2985                 reg_idx &= mask;
2986         }
2987
2988         /* configure header buffer length, needed for RSC */
2989         srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
2990
2991         /* configure the packet buffer length */
2992         srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2993
2994         /* configure descriptor type */
2995         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2996
2997         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2998 }
2999
3000 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3001 {
3002         struct ixgbe_hw *hw = &adapter->hw;
3003         static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
3004                           0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
3005                           0x6A3E67EA, 0x14364D17, 0x3BED200D};
3006         u32 mrqc = 0, reta = 0;
3007         u32 rxcsum;
3008         int i, j;
3009         u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3010
3011         /*
3012          * Program table for at least 2 queues w/ SR-IOV so that VFs can
3013          * make full use of any rings they may have.  We will use the
3014          * PSRTYPE register to control how many rings we use within the PF.
3015          */
3016         if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3017                 rss_i = 2;
3018
3019         /* Fill out hash function seeds */
3020         for (i = 0; i < 10; i++)
3021                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
3022
3023         /* Fill out redirection table */
3024         for (i = 0, j = 0; i < 128; i++, j++) {
3025                 if (j == rss_i)
3026                         j = 0;
3027                 /* reta = 4-byte sliding window of
3028                  * 0x00..(indices-1)(indices-1)00..etc. */
3029                 reta = (reta << 8) | (j * 0x11);
3030                 if ((i & 3) == 3)
3031                         IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3032         }
3033
3034         /* Disable indicating checksum in descriptor, enables RSS hash */
3035         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3036         rxcsum |= IXGBE_RXCSUM_PCSD;
3037         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3038
3039         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3040                 if (adapter->ring_feature[RING_F_RSS].mask)
3041                         mrqc = IXGBE_MRQC_RSSEN;
3042         } else {
3043                 u8 tcs = netdev_get_num_tc(adapter->netdev);
3044
3045                 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3046                         if (tcs > 4)
3047                                 mrqc = IXGBE_MRQC_VMDQRT8TCEN;  /* 8 TCs */
3048                         else if (tcs > 1)
3049                                 mrqc = IXGBE_MRQC_VMDQRT4TCEN;  /* 4 TCs */
3050                         else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3051                                 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3052                         else
3053                                 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3054                 } else {
3055                         if (tcs > 4)
3056                                 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3057                         else if (tcs > 1)
3058                                 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3059                         else
3060                                 mrqc = IXGBE_MRQC_RSSEN;
3061                 }
3062         }
3063
3064         /* Perform hash on these packet types */
3065         mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3066                 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3067                 IXGBE_MRQC_RSS_FIELD_IPV6 |
3068                 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3069
3070         if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3071                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3072         if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3073                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3074
3075         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3076 }
3077
3078 /**
3079  * ixgbe_configure_rscctl - enable RSC for the indicated ring
3080  * @adapter:    address of board private structure
3081  * @index:      index of ring to set
3082  **/
3083 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3084                                    struct ixgbe_ring *ring)
3085 {
3086         struct ixgbe_hw *hw = &adapter->hw;
3087         u32 rscctrl;
3088         u8 reg_idx = ring->reg_idx;
3089
3090         if (!ring_is_rsc_enabled(ring))
3091                 return;
3092
3093         rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3094         rscctrl |= IXGBE_RSCCTL_RSCEN;
3095         /*
3096          * we must limit the number of descriptors so that the
3097          * total size of max desc * buf_len is not greater
3098          * than 65536
3099          */
3100         rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3101         IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3102 }
3103
3104 #define IXGBE_MAX_RX_DESC_POLL 10
3105 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3106                                        struct ixgbe_ring *ring)
3107 {
3108         struct ixgbe_hw *hw = &adapter->hw;
3109         int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3110         u32 rxdctl;
3111         u8 reg_idx = ring->reg_idx;
3112
3113         /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3114         if (hw->mac.type == ixgbe_mac_82598EB &&
3115             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3116                 return;
3117
3118         do {
3119                 usleep_range(1000, 2000);
3120                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3121         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3122
3123         if (!wait_loop) {
3124                 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3125                       "the polling period\n", reg_idx);
3126         }
3127 }
3128
3129 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3130                             struct ixgbe_ring *ring)
3131 {
3132         struct ixgbe_hw *hw = &adapter->hw;
3133         int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3134         u32 rxdctl;
3135         u8 reg_idx = ring->reg_idx;
3136
3137         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3138         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3139
3140         /* write value back with RXDCTL.ENABLE bit cleared */
3141         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3142
3143         if (hw->mac.type == ixgbe_mac_82598EB &&
3144             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3145                 return;
3146
3147         /* the hardware may take up to 100us to really disable the rx queue */
3148         do {
3149                 udelay(10);
3150                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3151         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3152
3153         if (!wait_loop) {
3154                 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3155                       "the polling period\n", reg_idx);
3156         }
3157 }
3158
3159 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3160                              struct ixgbe_ring *ring)
3161 {
3162         struct ixgbe_hw *hw = &adapter->hw;
3163         u64 rdba = ring->dma;
3164         u32 rxdctl;
3165         u8 reg_idx = ring->reg_idx;
3166
3167         /* disable queue to avoid issues while updating state */
3168         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3169         ixgbe_disable_rx_queue(adapter, ring);
3170
3171         IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3172         IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3173         IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3174                         ring->count * sizeof(union ixgbe_adv_rx_desc));
3175         IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3176         IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3177         ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
3178
3179         ixgbe_configure_srrctl(adapter, ring);
3180         ixgbe_configure_rscctl(adapter, ring);
3181
3182         if (hw->mac.type == ixgbe_mac_82598EB) {
3183                 /*
3184                  * enable cache line friendly hardware writes:
3185                  * PTHRESH=32 descriptors (half the internal cache),
3186                  * this also removes ugly rx_no_buffer_count increment
3187                  * HTHRESH=4 descriptors (to minimize latency on fetch)
3188                  * WTHRESH=8 burst writeback up to two cache lines
3189                  */
3190                 rxdctl &= ~0x3FFFFF;
3191                 rxdctl |=  0x080420;
3192         }
3193
3194         /* enable receive descriptor ring */
3195         rxdctl |= IXGBE_RXDCTL_ENABLE;
3196         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3197
3198         ixgbe_rx_desc_queue_enable(adapter, ring);
3199         ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3200 }
3201
3202 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3203 {
3204         struct ixgbe_hw *hw = &adapter->hw;
3205         int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3206         int p;
3207
3208         /* PSRTYPE must be initialized in non 82598 adapters */
3209         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3210                       IXGBE_PSRTYPE_UDPHDR |
3211                       IXGBE_PSRTYPE_IPV4HDR |
3212                       IXGBE_PSRTYPE_L2HDR |
3213                       IXGBE_PSRTYPE_IPV6HDR;
3214
3215         if (hw->mac.type == ixgbe_mac_82598EB)
3216                 return;
3217
3218         if (rss_i > 3)
3219                 psrtype |= 2 << 29;
3220         else if (rss_i > 1)
3221                 psrtype |= 1 << 29;
3222
3223         for (p = 0; p < adapter->num_rx_pools; p++)
3224                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
3225                                 psrtype);
3226 }
3227
3228 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3229 {
3230         struct ixgbe_hw *hw = &adapter->hw;
3231         u32 reg_offset, vf_shift;
3232         u32 gcr_ext, vmdctl;
3233         int i;
3234
3235         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3236                 return;
3237
3238         vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3239         vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3240         vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3241         vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3242         vmdctl |= IXGBE_VT_CTL_REPLEN;
3243         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3244
3245         vf_shift = VMDQ_P(0) % 32;
3246         reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3247
3248         /* Enable only the PF's pool for Tx/Rx */
3249         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3250         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3251         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3252         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3253         if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
3254                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3255
3256         /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3257         hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3258
3259         /*
3260          * Set up VF register offsets for selected VT Mode,
3261          * i.e. 32 or 64 VFs for SR-IOV
3262          */
3263         switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3264         case IXGBE_82599_VMDQ_8Q_MASK:
3265                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3266                 break;
3267         case IXGBE_82599_VMDQ_4Q_MASK:
3268                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3269                 break;
3270         default:
3271                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3272                 break;
3273         }
3274
3275         IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3276
3277
3278         /* Enable MAC Anti-Spoofing */
3279         hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3280                                           adapter->num_vfs);
3281         /* For VFs that have spoof checking turned off */
3282         for (i = 0; i < adapter->num_vfs; i++) {
3283                 if (!adapter->vfinfo[i].spoofchk_enabled)
3284                         ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3285         }
3286 }
3287
3288 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3289 {
3290         struct ixgbe_hw *hw = &adapter->hw;
3291         struct net_device *netdev = adapter->netdev;
3292         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3293         struct ixgbe_ring *rx_ring;
3294         int i;
3295         u32 mhadd, hlreg0;
3296
3297 #ifdef IXGBE_FCOE
3298         /* adjust max frame to be able to do baby jumbo for FCoE */
3299         if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3300             (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3301                 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3302
3303 #endif /* IXGBE_FCOE */
3304
3305         /* adjust max frame to be at least the size of a standard frame */
3306         if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3307                 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3308
3309         mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3310         if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3311                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3312                 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3313
3314                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3315         }
3316
3317         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3318         /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3319         hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3320         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3321
3322         /*
3323          * Setup the HW Rx Head and Tail Descriptor Pointers and
3324          * the Base and Length of the Rx Descriptor Ring
3325          */
3326         for (i = 0; i < adapter->num_rx_queues; i++) {
3327                 rx_ring = adapter->rx_ring[i];
3328                 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3329                         set_ring_rsc_enabled(rx_ring);
3330                 else
3331                         clear_ring_rsc_enabled(rx_ring);
3332         }
3333 }
3334
3335 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3336 {
3337         struct ixgbe_hw *hw = &adapter->hw;
3338         u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3339
3340         switch (hw->mac.type) {
3341         case ixgbe_mac_82598EB:
3342                 /*
3343                  * For VMDq support of different descriptor types or
3344                  * buffer sizes through the use of multiple SRRCTL
3345                  * registers, RDRXCTL.MVMEN must be set to 1
3346                  *
3347                  * also, the manual doesn't mention it clearly but DCA hints
3348                  * will only use queue 0's tags unless this bit is set.  Side
3349                  * effects of setting this bit are only that SRRCTL must be
3350                  * fully programmed [0..15]
3351                  */
3352                 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3353                 break;
3354         case ixgbe_mac_82599EB:
3355         case ixgbe_mac_X540:
3356                 /* Disable RSC for ACK packets */
3357                 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3358                    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3359                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3360                 /* hardware requires some bits to be set by default */
3361                 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3362                 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3363                 break;
3364         default:
3365                 /* We should do nothing since we don't know this hardware */
3366                 return;
3367         }
3368
3369         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3370 }
3371
3372 /**
3373  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3374  * @adapter: board private structure
3375  *
3376  * Configure the Rx unit of the MAC after a reset.
3377  **/
3378 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3379 {
3380         struct ixgbe_hw *hw = &adapter->hw;
3381         int i;
3382         u32 rxctrl;
3383
3384         /* disable receives while setting up the descriptors */
3385         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3386         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3387
3388         ixgbe_setup_psrtype(adapter);
3389         ixgbe_setup_rdrxctl(adapter);
3390
3391         /* Program registers for the distribution of queues */
3392         ixgbe_setup_mrqc(adapter);
3393
3394         /* set_rx_buffer_len must be called before ring initialization */
3395         ixgbe_set_rx_buffer_len(adapter);
3396
3397         /*
3398          * Setup the HW Rx Head and Tail Descriptor Pointers and
3399          * the Base and Length of the Rx Descriptor Ring
3400          */
3401         for (i = 0; i < adapter->num_rx_queues; i++)
3402                 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3403
3404         /* disable drop enable for 82598 parts */
3405         if (hw->mac.type == ixgbe_mac_82598EB)
3406                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3407
3408         /* enable all receives */
3409         rxctrl |= IXGBE_RXCTRL_RXEN;
3410         hw->mac.ops.enable_rx_dma(hw, rxctrl);
3411 }
3412
3413 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3414 {
3415         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3416         struct ixgbe_hw *hw = &adapter->hw;
3417
3418         /* add VID to filter table */
3419         hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3420         set_bit(vid, adapter->active_vlans);
3421
3422         return 0;
3423 }
3424
3425 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3426 {
3427         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3428         struct ixgbe_hw *hw = &adapter->hw;
3429
3430         /* remove VID from filter table */
3431         hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3432         clear_bit(vid, adapter->active_vlans);
3433
3434         return 0;
3435 }
3436
3437 /**
3438  * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3439  * @adapter: driver data
3440  */
3441 static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3442 {
3443         struct ixgbe_hw *hw = &adapter->hw;
3444         u32 vlnctrl;
3445
3446         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3447         vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3448         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3449 }
3450
3451 /**
3452  * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3453  * @adapter: driver data
3454  */
3455 static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3456 {
3457         struct ixgbe_hw *hw = &adapter->hw;
3458         u32 vlnctrl;
3459
3460         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3461         vlnctrl |= IXGBE_VLNCTRL_VFE;
3462         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3463         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3464 }
3465
3466 /**
3467  * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3468  * @adapter: driver data
3469  */
3470 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3471 {
3472         struct ixgbe_hw *hw = &adapter->hw;
3473         u32 vlnctrl;
3474         int i, j;
3475
3476         switch (hw->mac.type) {
3477         case ixgbe_mac_82598EB:
3478                 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3479                 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3480                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3481                 break;
3482         case ixgbe_mac_82599EB:
3483         case ixgbe_mac_X540:
3484                 for (i = 0; i < adapter->num_rx_queues; i++) {
3485                         j = adapter->rx_ring[i]->reg_idx;
3486                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3487                         vlnctrl &= ~IXGBE_RXDCTL_VME;
3488                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3489                 }
3490                 break;
3491         default:
3492                 break;
3493         }
3494 }
3495
3496 /**
3497  * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
3498  * @adapter: driver data
3499  */