Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[~shefty/rdma-dev.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
64
65 #include "cxgb4.h"
66 #include "t4_regs.h"
67 #include "t4_msg.h"
68 #include "t4fw_api.h"
69 #include "l2t.h"
70
71 #define DRV_VERSION "1.3.0-ko"
72 #define DRV_DESC "Chelsio T4 Network Driver"
73
74 /*
75  * Max interrupt hold-off timer value in us.  Queues fall back to this value
76  * under extreme memory pressure so it's largish to give the system time to
77  * recover.
78  */
79 #define MAX_SGE_TIMERVAL 200U
80
81 enum {
82         /*
83          * Physical Function provisioning constants.
84          */
85         PFRES_NVI = 4,                  /* # of Virtual Interfaces */
86         PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
87         PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
88                                          */
89         PFRES_NEQ = 256,                /* # of egress queues */
90         PFRES_NIQ = 0,                  /* # of ingress queues */
91         PFRES_TC = 0,                   /* PCI-E traffic class */
92         PFRES_NEXACTF = 128,            /* # of exact MPS filters */
93
94         PFRES_R_CAPS = FW_CMD_CAP_PF,
95         PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97 #ifdef CONFIG_PCI_IOV
98         /*
99          * Virtual Function provisioning constants.  We need two extra Ingress
100          * Queues with Interrupt capability to serve as the VF's Firmware
101          * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102          * neither will have Free Lists associated with them).  For each
103          * Ethernet/Control Egress Queue and for each Free List, we need an
104          * Egress Context.
105          */
106         VFRES_NPORTS = 1,               /* # of "ports" per VF */
107         VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
108
109         VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
110         VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
111         VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112         VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
113         VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
114         VFRES_TC = 0,                   /* PCI-E traffic class */
115         VFRES_NEXACTF = 16,             /* # of exact MPS filters */
116
117         VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118         VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
119 #endif
120 };
121
122 /*
123  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
124  * static and likely not to be useful in the long run.  We really need to
125  * implement some form of persistent configuration which the firmware
126  * controls.
127  */
128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129                                   unsigned int pf, unsigned int vf)
130 {
131         unsigned int portn, portvec;
132
133         /*
134          * Give PF's access to all of the ports.
135          */
136         if (vf == 0)
137                 return FW_PFVF_CMD_PMASK_MASK;
138
139         /*
140          * For VFs, we'll assign them access to the ports based purely on the
141          * PF.  We assign active ports in order, wrapping around if there are
142          * fewer active ports than PFs: e.g. active port[pf % nports].
143          * Unfortunately the adapter's port_info structs haven't been
144          * initialized yet so we have to compute this.
145          */
146         if (adapter->params.nports == 0)
147                 return 0;
148
149         portn = pf % adapter->params.nports;
150         portvec = adapter->params.portvec;
151         for (;;) {
152                 /*
153                  * Isolate the lowest set bit in the port vector.  If we're at
154                  * the port number that we want, return that as the pmask.
155                  * otherwise mask that bit out of the port vector and
156                  * decrement our port number ...
157                  */
158                 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159                 if (portn == 0)
160                         return pmask;
161                 portn--;
162                 portvec &= ~pmask;
163         }
164         /*NOTREACHED*/
165 }
166
167 enum {
168         MAX_TXQ_ENTRIES      = 16384,
169         MAX_CTRL_TXQ_ENTRIES = 1024,
170         MAX_RSPQ_ENTRIES     = 16384,
171         MAX_RX_BUFFERS       = 16384,
172         MIN_TXQ_ENTRIES      = 32,
173         MIN_CTRL_TXQ_ENTRIES = 32,
174         MIN_RSPQ_ENTRIES     = 128,
175         MIN_FL_ENTRIES       = 16
176 };
177
178 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
179                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
180                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
181
182 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
183
184 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
185         CH_DEVICE(0xa000, 0),  /* PE10K */
186         CH_DEVICE(0x4001, -1),
187         CH_DEVICE(0x4002, -1),
188         CH_DEVICE(0x4003, -1),
189         CH_DEVICE(0x4004, -1),
190         CH_DEVICE(0x4005, -1),
191         CH_DEVICE(0x4006, -1),
192         CH_DEVICE(0x4007, -1),
193         CH_DEVICE(0x4008, -1),
194         CH_DEVICE(0x4009, -1),
195         CH_DEVICE(0x400a, -1),
196         CH_DEVICE(0x4401, 4),
197         CH_DEVICE(0x4402, 4),
198         CH_DEVICE(0x4403, 4),
199         CH_DEVICE(0x4404, 4),
200         CH_DEVICE(0x4405, 4),
201         CH_DEVICE(0x4406, 4),
202         CH_DEVICE(0x4407, 4),
203         CH_DEVICE(0x4408, 4),
204         CH_DEVICE(0x4409, 4),
205         CH_DEVICE(0x440a, 4),
206         CH_DEVICE(0x440d, 4),
207         CH_DEVICE(0x440e, 4),
208         { 0, }
209 };
210
211 #define FW_FNAME "cxgb4/t4fw.bin"
212 #define FW_CFNAME "cxgb4/t4-config.txt"
213
214 MODULE_DESCRIPTION(DRV_DESC);
215 MODULE_AUTHOR("Chelsio Communications");
216 MODULE_LICENSE("Dual BSD/GPL");
217 MODULE_VERSION(DRV_VERSION);
218 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
219 MODULE_FIRMWARE(FW_FNAME);
220
221 /*
222  * Normally we're willing to become the firmware's Master PF but will be happy
223  * if another PF has already become the Master and initialized the adapter.
224  * Setting "force_init" will cause this driver to forcibly establish itself as
225  * the Master PF and initialize the adapter.
226  */
227 static uint force_init;
228
229 module_param(force_init, uint, 0644);
230 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
231
232 /*
233  * Normally if the firmware we connect to has Configuration File support, we
234  * use that and only fall back to the old Driver-based initialization if the
235  * Configuration File fails for some reason.  If force_old_init is set, then
236  * we'll always use the old Driver-based initialization sequence.
237  */
238 static uint force_old_init;
239
240 module_param(force_old_init, uint, 0644);
241 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
242
243 static int dflt_msg_enable = DFLT_MSG_ENABLE;
244
245 module_param(dflt_msg_enable, int, 0644);
246 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
247
248 /*
249  * The driver uses the best interrupt scheme available on a platform in the
250  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
251  * of these schemes the driver may consider as follows:
252  *
253  * msi = 2: choose from among all three options
254  * msi = 1: only consider MSI and INTx interrupts
255  * msi = 0: force INTx interrupts
256  */
257 static int msi = 2;
258
259 module_param(msi, int, 0644);
260 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
261
262 /*
263  * Queue interrupt hold-off timer values.  Queues default to the first of these
264  * upon creation.
265  */
266 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
267
268 module_param_array(intr_holdoff, uint, NULL, 0644);
269 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
270                  "0..4 in microseconds");
271
272 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
273
274 module_param_array(intr_cnt, uint, NULL, 0644);
275 MODULE_PARM_DESC(intr_cnt,
276                  "thresholds 1..3 for queue interrupt packet counters");
277
278 /*
279  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
280  * offset by 2 bytes in order to have the IP headers line up on 4-byte
281  * boundaries.  This is a requirement for many architectures which will throw
282  * a machine check fault if an attempt is made to access one of the 4-byte IP
283  * header fields on a non-4-byte boundary.  And it's a major performance issue
284  * even on some architectures which allow it like some implementations of the
285  * x86 ISA.  However, some architectures don't mind this and for some very
286  * edge-case performance sensitive applications (like forwarding large volumes
287  * of small packets), setting this DMA offset to 0 will decrease the number of
288  * PCI-E Bus transfers enough to measurably affect performance.
289  */
290 static int rx_dma_offset = 2;
291
292 static bool vf_acls;
293
294 #ifdef CONFIG_PCI_IOV
295 module_param(vf_acls, bool, 0644);
296 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
297
298 static unsigned int num_vf[4];
299
300 module_param_array(num_vf, uint, NULL, 0644);
301 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
302 #endif
303
304 /*
305  * The filter TCAM has a fixed portion and a variable portion.  The fixed
306  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
307  * ports.  The variable portion is 36 bits which can include things like Exact
308  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
309  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
310  * far exceed the 36-bit budget for this "compressed" header portion of the
311  * filter.  Thus, we have a scarce resource which must be carefully managed.
312  *
313  * By default we set this up to mostly match the set of filter matching
314  * capabilities of T3 but with accommodations for some of T4's more
315  * interesting features:
316  *
317  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
318  *     [Inner] VLAN (17), Port (3), FCoE (1) }
319  */
320 enum {
321         TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
322         TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
323         TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
324 };
325
326 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
327
328 static struct dentry *cxgb4_debugfs_root;
329
330 static LIST_HEAD(adapter_list);
331 static DEFINE_MUTEX(uld_mutex);
332 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
333 static const char *uld_str[] = { "RDMA", "iSCSI" };
334
335 static void link_report(struct net_device *dev)
336 {
337         if (!netif_carrier_ok(dev))
338                 netdev_info(dev, "link down\n");
339         else {
340                 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
341
342                 const char *s = "10Mbps";
343                 const struct port_info *p = netdev_priv(dev);
344
345                 switch (p->link_cfg.speed) {
346                 case SPEED_10000:
347                         s = "10Gbps";
348                         break;
349                 case SPEED_1000:
350                         s = "1000Mbps";
351                         break;
352                 case SPEED_100:
353                         s = "100Mbps";
354                         break;
355                 }
356
357                 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
358                             fc[p->link_cfg.fc]);
359         }
360 }
361
362 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
363 {
364         struct net_device *dev = adapter->port[port_id];
365
366         /* Skip changes from disabled ports. */
367         if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
368                 if (link_stat)
369                         netif_carrier_on(dev);
370                 else
371                         netif_carrier_off(dev);
372
373                 link_report(dev);
374         }
375 }
376
377 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
378 {
379         static const char *mod_str[] = {
380                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
381         };
382
383         const struct net_device *dev = adap->port[port_id];
384         const struct port_info *pi = netdev_priv(dev);
385
386         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
387                 netdev_info(dev, "port module unplugged\n");
388         else if (pi->mod_type < ARRAY_SIZE(mod_str))
389                 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
390 }
391
392 /*
393  * Configure the exact and hash address filters to handle a port's multicast
394  * and secondary unicast MAC addresses.
395  */
396 static int set_addr_filters(const struct net_device *dev, bool sleep)
397 {
398         u64 mhash = 0;
399         u64 uhash = 0;
400         bool free = true;
401         u16 filt_idx[7];
402         const u8 *addr[7];
403         int ret, naddr = 0;
404         const struct netdev_hw_addr *ha;
405         int uc_cnt = netdev_uc_count(dev);
406         int mc_cnt = netdev_mc_count(dev);
407         const struct port_info *pi = netdev_priv(dev);
408         unsigned int mb = pi->adapter->fn;
409
410         /* first do the secondary unicast addresses */
411         netdev_for_each_uc_addr(ha, dev) {
412                 addr[naddr++] = ha->addr;
413                 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
414                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
415                                         naddr, addr, filt_idx, &uhash, sleep);
416                         if (ret < 0)
417                                 return ret;
418
419                         free = false;
420                         naddr = 0;
421                 }
422         }
423
424         /* next set up the multicast addresses */
425         netdev_for_each_mc_addr(ha, dev) {
426                 addr[naddr++] = ha->addr;
427                 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
428                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
429                                         naddr, addr, filt_idx, &mhash, sleep);
430                         if (ret < 0)
431                                 return ret;
432
433                         free = false;
434                         naddr = 0;
435                 }
436         }
437
438         return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
439                                 uhash | mhash, sleep);
440 }
441
442 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
443 module_param(dbfifo_int_thresh, int, 0644);
444 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
445
446 /*
447  * usecs to sleep while draining the dbfifo
448  */
449 static int dbfifo_drain_delay = 1000;
450 module_param(dbfifo_drain_delay, int, 0644);
451 MODULE_PARM_DESC(dbfifo_drain_delay,
452                  "usecs to sleep while draining the dbfifo");
453
454 /*
455  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
456  * If @mtu is -1 it is left unchanged.
457  */
458 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
459 {
460         int ret;
461         struct port_info *pi = netdev_priv(dev);
462
463         ret = set_addr_filters(dev, sleep_ok);
464         if (ret == 0)
465                 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
466                                     (dev->flags & IFF_PROMISC) ? 1 : 0,
467                                     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
468                                     sleep_ok);
469         return ret;
470 }
471
472 static struct workqueue_struct *workq;
473
474 /**
475  *      link_start - enable a port
476  *      @dev: the port to enable
477  *
478  *      Performs the MAC and PHY actions needed to enable a port.
479  */
480 static int link_start(struct net_device *dev)
481 {
482         int ret;
483         struct port_info *pi = netdev_priv(dev);
484         unsigned int mb = pi->adapter->fn;
485
486         /*
487          * We do not set address filters and promiscuity here, the stack does
488          * that step explicitly.
489          */
490         ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
491                             !!(dev->features & NETIF_F_HW_VLAN_RX), true);
492         if (ret == 0) {
493                 ret = t4_change_mac(pi->adapter, mb, pi->viid,
494                                     pi->xact_addr_filt, dev->dev_addr, true,
495                                     true);
496                 if (ret >= 0) {
497                         pi->xact_addr_filt = ret;
498                         ret = 0;
499                 }
500         }
501         if (ret == 0)
502                 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
503                                     &pi->link_cfg);
504         if (ret == 0)
505                 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
506         return ret;
507 }
508
509 /*
510  * Response queue handler for the FW event queue.
511  */
512 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
513                           const struct pkt_gl *gl)
514 {
515         u8 opcode = ((const struct rss_header *)rsp)->opcode;
516
517         rsp++;                                          /* skip RSS header */
518         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
519                 const struct cpl_sge_egr_update *p = (void *)rsp;
520                 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
521                 struct sge_txq *txq;
522
523                 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
524                 txq->restarts++;
525                 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
526                         struct sge_eth_txq *eq;
527
528                         eq = container_of(txq, struct sge_eth_txq, q);
529                         netif_tx_wake_queue(eq->txq);
530                 } else {
531                         struct sge_ofld_txq *oq;
532
533                         oq = container_of(txq, struct sge_ofld_txq, q);
534                         tasklet_schedule(&oq->qresume_tsk);
535                 }
536         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
537                 const struct cpl_fw6_msg *p = (void *)rsp;
538
539                 if (p->type == 0)
540                         t4_handle_fw_rpl(q->adap, p->data);
541         } else if (opcode == CPL_L2T_WRITE_RPL) {
542                 const struct cpl_l2t_write_rpl *p = (void *)rsp;
543
544                 do_l2t_write_rpl(q->adap, p);
545         } else
546                 dev_err(q->adap->pdev_dev,
547                         "unexpected CPL %#x on FW event queue\n", opcode);
548         return 0;
549 }
550
551 /**
552  *      uldrx_handler - response queue handler for ULD queues
553  *      @q: the response queue that received the packet
554  *      @rsp: the response queue descriptor holding the offload message
555  *      @gl: the gather list of packet fragments
556  *
557  *      Deliver an ingress offload packet to a ULD.  All processing is done by
558  *      the ULD, we just maintain statistics.
559  */
560 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
561                          const struct pkt_gl *gl)
562 {
563         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
564
565         if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
566                 rxq->stats.nomem++;
567                 return -1;
568         }
569         if (gl == NULL)
570                 rxq->stats.imm++;
571         else if (gl == CXGB4_MSG_AN)
572                 rxq->stats.an++;
573         else
574                 rxq->stats.pkts++;
575         return 0;
576 }
577
578 static void disable_msi(struct adapter *adapter)
579 {
580         if (adapter->flags & USING_MSIX) {
581                 pci_disable_msix(adapter->pdev);
582                 adapter->flags &= ~USING_MSIX;
583         } else if (adapter->flags & USING_MSI) {
584                 pci_disable_msi(adapter->pdev);
585                 adapter->flags &= ~USING_MSI;
586         }
587 }
588
589 /*
590  * Interrupt handler for non-data events used with MSI-X.
591  */
592 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
593 {
594         struct adapter *adap = cookie;
595
596         u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
597         if (v & PFSW) {
598                 adap->swintr = 1;
599                 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
600         }
601         t4_slow_intr_handler(adap);
602         return IRQ_HANDLED;
603 }
604
605 /*
606  * Name the MSI-X interrupts.
607  */
608 static void name_msix_vecs(struct adapter *adap)
609 {
610         int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
611
612         /* non-data interrupts */
613         snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
614
615         /* FW events */
616         snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
617                  adap->port[0]->name);
618
619         /* Ethernet queues */
620         for_each_port(adap, j) {
621                 struct net_device *d = adap->port[j];
622                 const struct port_info *pi = netdev_priv(d);
623
624                 for (i = 0; i < pi->nqsets; i++, msi_idx++)
625                         snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
626                                  d->name, i);
627         }
628
629         /* offload queues */
630         for_each_ofldrxq(&adap->sge, i)
631                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
632                          adap->port[0]->name, i);
633
634         for_each_rdmarxq(&adap->sge, i)
635                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
636                          adap->port[0]->name, i);
637 }
638
639 static int request_msix_queue_irqs(struct adapter *adap)
640 {
641         struct sge *s = &adap->sge;
642         int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
643
644         err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
645                           adap->msix_info[1].desc, &s->fw_evtq);
646         if (err)
647                 return err;
648
649         for_each_ethrxq(s, ethqidx) {
650                 err = request_irq(adap->msix_info[msi_index].vec,
651                                   t4_sge_intr_msix, 0,
652                                   adap->msix_info[msi_index].desc,
653                                   &s->ethrxq[ethqidx].rspq);
654                 if (err)
655                         goto unwind;
656                 msi_index++;
657         }
658         for_each_ofldrxq(s, ofldqidx) {
659                 err = request_irq(adap->msix_info[msi_index].vec,
660                                   t4_sge_intr_msix, 0,
661                                   adap->msix_info[msi_index].desc,
662                                   &s->ofldrxq[ofldqidx].rspq);
663                 if (err)
664                         goto unwind;
665                 msi_index++;
666         }
667         for_each_rdmarxq(s, rdmaqidx) {
668                 err = request_irq(adap->msix_info[msi_index].vec,
669                                   t4_sge_intr_msix, 0,
670                                   adap->msix_info[msi_index].desc,
671                                   &s->rdmarxq[rdmaqidx].rspq);
672                 if (err)
673                         goto unwind;
674                 msi_index++;
675         }
676         return 0;
677
678 unwind:
679         while (--rdmaqidx >= 0)
680                 free_irq(adap->msix_info[--msi_index].vec,
681                          &s->rdmarxq[rdmaqidx].rspq);
682         while (--ofldqidx >= 0)
683                 free_irq(adap->msix_info[--msi_index].vec,
684                          &s->ofldrxq[ofldqidx].rspq);
685         while (--ethqidx >= 0)
686                 free_irq(adap->msix_info[--msi_index].vec,
687                          &s->ethrxq[ethqidx].rspq);
688         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
689         return err;
690 }
691
692 static void free_msix_queue_irqs(struct adapter *adap)
693 {
694         int i, msi_index = 2;
695         struct sge *s = &adap->sge;
696
697         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
698         for_each_ethrxq(s, i)
699                 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
700         for_each_ofldrxq(s, i)
701                 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
702         for_each_rdmarxq(s, i)
703                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
704 }
705
706 /**
707  *      write_rss - write the RSS table for a given port
708  *      @pi: the port
709  *      @queues: array of queue indices for RSS
710  *
711  *      Sets up the portion of the HW RSS table for the port's VI to distribute
712  *      packets to the Rx queues in @queues.
713  */
714 static int write_rss(const struct port_info *pi, const u16 *queues)
715 {
716         u16 *rss;
717         int i, err;
718         const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
719
720         rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
721         if (!rss)
722                 return -ENOMEM;
723
724         /* map the queue indices to queue ids */
725         for (i = 0; i < pi->rss_size; i++, queues++)
726                 rss[i] = q[*queues].rspq.abs_id;
727
728         err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
729                                   pi->rss_size, rss, pi->rss_size);
730         kfree(rss);
731         return err;
732 }
733
734 /**
735  *      setup_rss - configure RSS
736  *      @adap: the adapter
737  *
738  *      Sets up RSS for each port.
739  */
740 static int setup_rss(struct adapter *adap)
741 {
742         int i, err;
743
744         for_each_port(adap, i) {
745                 const struct port_info *pi = adap2pinfo(adap, i);
746
747                 err = write_rss(pi, pi->rss);
748                 if (err)
749                         return err;
750         }
751         return 0;
752 }
753
754 /*
755  * Return the channel of the ingress queue with the given qid.
756  */
757 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
758 {
759         qid -= p->ingr_start;
760         return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
761 }
762
763 /*
764  * Wait until all NAPI handlers are descheduled.
765  */
766 static void quiesce_rx(struct adapter *adap)
767 {
768         int i;
769
770         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
771                 struct sge_rspq *q = adap->sge.ingr_map[i];
772
773                 if (q && q->handler)
774                         napi_disable(&q->napi);
775         }
776 }
777
778 /*
779  * Enable NAPI scheduling and interrupt generation for all Rx queues.
780  */
781 static void enable_rx(struct adapter *adap)
782 {
783         int i;
784
785         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
786                 struct sge_rspq *q = adap->sge.ingr_map[i];
787
788                 if (!q)
789                         continue;
790                 if (q->handler)
791                         napi_enable(&q->napi);
792                 /* 0-increment GTS to start the timer and enable interrupts */
793                 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
794                              SEINTARM(q->intr_params) |
795                              INGRESSQID(q->cntxt_id));
796         }
797 }
798
799 /**
800  *      setup_sge_queues - configure SGE Tx/Rx/response queues
801  *      @adap: the adapter
802  *
803  *      Determines how many sets of SGE queues to use and initializes them.
804  *      We support multiple queue sets per port if we have MSI-X, otherwise
805  *      just one queue set per port.
806  */
807 static int setup_sge_queues(struct adapter *adap)
808 {
809         int err, msi_idx, i, j;
810         struct sge *s = &adap->sge;
811
812         bitmap_zero(s->starving_fl, MAX_EGRQ);
813         bitmap_zero(s->txq_maperr, MAX_EGRQ);
814
815         if (adap->flags & USING_MSIX)
816                 msi_idx = 1;         /* vector 0 is for non-queue interrupts */
817         else {
818                 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
819                                        NULL, NULL);
820                 if (err)
821                         return err;
822                 msi_idx = -((int)s->intrq.abs_id + 1);
823         }
824
825         err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
826                                msi_idx, NULL, fwevtq_handler);
827         if (err) {
828 freeout:        t4_free_sge_resources(adap);
829                 return err;
830         }
831
832         for_each_port(adap, i) {
833                 struct net_device *dev = adap->port[i];
834                 struct port_info *pi = netdev_priv(dev);
835                 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
836                 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
837
838                 for (j = 0; j < pi->nqsets; j++, q++) {
839                         if (msi_idx > 0)
840                                 msi_idx++;
841                         err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
842                                                msi_idx, &q->fl,
843                                                t4_ethrx_handler);
844                         if (err)
845                                 goto freeout;
846                         q->rspq.idx = j;
847                         memset(&q->stats, 0, sizeof(q->stats));
848                 }
849                 for (j = 0; j < pi->nqsets; j++, t++) {
850                         err = t4_sge_alloc_eth_txq(adap, t, dev,
851                                         netdev_get_tx_queue(dev, j),
852                                         s->fw_evtq.cntxt_id);
853                         if (err)
854                                 goto freeout;
855                 }
856         }
857
858         j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
859         for_each_ofldrxq(s, i) {
860                 struct sge_ofld_rxq *q = &s->ofldrxq[i];
861                 struct net_device *dev = adap->port[i / j];
862
863                 if (msi_idx > 0)
864                         msi_idx++;
865                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
866                                        &q->fl, uldrx_handler);
867                 if (err)
868                         goto freeout;
869                 memset(&q->stats, 0, sizeof(q->stats));
870                 s->ofld_rxq[i] = q->rspq.abs_id;
871                 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
872                                             s->fw_evtq.cntxt_id);
873                 if (err)
874                         goto freeout;
875         }
876
877         for_each_rdmarxq(s, i) {
878                 struct sge_ofld_rxq *q = &s->rdmarxq[i];
879
880                 if (msi_idx > 0)
881                         msi_idx++;
882                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
883                                        msi_idx, &q->fl, uldrx_handler);
884                 if (err)
885                         goto freeout;
886                 memset(&q->stats, 0, sizeof(q->stats));
887                 s->rdma_rxq[i] = q->rspq.abs_id;
888         }
889
890         for_each_port(adap, i) {
891                 /*
892                  * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
893                  * have RDMA queues, and that's the right value.
894                  */
895                 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
896                                             s->fw_evtq.cntxt_id,
897                                             s->rdmarxq[i].rspq.cntxt_id);
898                 if (err)
899                         goto freeout;
900         }
901
902         t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
903                      RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
904                      QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
905         return 0;
906 }
907
908 /*
909  * Returns 0 if new FW was successfully loaded, a positive errno if a load was
910  * started but failed, and a negative errno if flash load couldn't start.
911  */
912 static int upgrade_fw(struct adapter *adap)
913 {
914         int ret;
915         u32 vers;
916         const struct fw_hdr *hdr;
917         const struct firmware *fw;
918         struct device *dev = adap->pdev_dev;
919
920         ret = request_firmware(&fw, FW_FNAME, dev);
921         if (ret < 0) {
922                 dev_err(dev, "unable to load firmware image " FW_FNAME
923                         ", error %d\n", ret);
924                 return ret;
925         }
926
927         hdr = (const struct fw_hdr *)fw->data;
928         vers = ntohl(hdr->fw_ver);
929         if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
930                 ret = -EINVAL;              /* wrong major version, won't do */
931                 goto out;
932         }
933
934         /*
935          * If the flash FW is unusable or we found something newer, load it.
936          */
937         if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
938             vers > adap->params.fw_vers) {
939                 dev_info(dev, "upgrading firmware ...\n");
940                 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
941                                     /*force=*/false);
942                 if (!ret)
943                         dev_info(dev, "firmware successfully upgraded to "
944                                  FW_FNAME " (%d.%d.%d.%d)\n",
945                                  FW_HDR_FW_VER_MAJOR_GET(vers),
946                                  FW_HDR_FW_VER_MINOR_GET(vers),
947                                  FW_HDR_FW_VER_MICRO_GET(vers),
948                                  FW_HDR_FW_VER_BUILD_GET(vers));
949                 else
950                         dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
951         } else {
952                 /*
953                  * Tell our caller that we didn't upgrade the firmware.
954                  */
955                 ret = -EINVAL;
956         }
957
958 out:    release_firmware(fw);
959         return ret;
960 }
961
962 /*
963  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
964  * The allocated memory is cleared.
965  */
966 void *t4_alloc_mem(size_t size)
967 {
968         void *p = kzalloc(size, GFP_KERNEL);
969
970         if (!p)
971                 p = vzalloc(size);
972         return p;
973 }
974
975 /*
976  * Free memory allocated through alloc_mem().
977  */
978 static void t4_free_mem(void *addr)
979 {
980         if (is_vmalloc_addr(addr))
981                 vfree(addr);
982         else
983                 kfree(addr);
984 }
985
986 static inline int is_offload(const struct adapter *adap)
987 {
988         return adap->params.offload;
989 }
990
991 /*
992  * Implementation of ethtool operations.
993  */
994
995 static u32 get_msglevel(struct net_device *dev)
996 {
997         return netdev2adap(dev)->msg_enable;
998 }
999
1000 static void set_msglevel(struct net_device *dev, u32 val)
1001 {
1002         netdev2adap(dev)->msg_enable = val;
1003 }
1004
1005 static char stats_strings[][ETH_GSTRING_LEN] = {
1006         "TxOctetsOK         ",
1007         "TxFramesOK         ",
1008         "TxBroadcastFrames  ",
1009         "TxMulticastFrames  ",
1010         "TxUnicastFrames    ",
1011         "TxErrorFrames      ",
1012
1013         "TxFrames64         ",
1014         "TxFrames65To127    ",
1015         "TxFrames128To255   ",
1016         "TxFrames256To511   ",
1017         "TxFrames512To1023  ",
1018         "TxFrames1024To1518 ",
1019         "TxFrames1519ToMax  ",
1020
1021         "TxFramesDropped    ",
1022         "TxPauseFrames      ",
1023         "TxPPP0Frames       ",
1024         "TxPPP1Frames       ",
1025         "TxPPP2Frames       ",
1026         "TxPPP3Frames       ",
1027         "TxPPP4Frames       ",
1028         "TxPPP5Frames       ",
1029         "TxPPP6Frames       ",
1030         "TxPPP7Frames       ",
1031
1032         "RxOctetsOK         ",
1033         "RxFramesOK         ",
1034         "RxBroadcastFrames  ",
1035         "RxMulticastFrames  ",
1036         "RxUnicastFrames    ",
1037
1038         "RxFramesTooLong    ",
1039         "RxJabberErrors     ",
1040         "RxFCSErrors        ",
1041         "RxLengthErrors     ",
1042         "RxSymbolErrors     ",
1043         "RxRuntFrames       ",
1044
1045         "RxFrames64         ",
1046         "RxFrames65To127    ",
1047         "RxFrames128To255   ",
1048         "RxFrames256To511   ",
1049         "RxFrames512To1023  ",
1050         "RxFrames1024To1518 ",
1051         "RxFrames1519ToMax  ",
1052
1053         "RxPauseFrames      ",
1054         "RxPPP0Frames       ",
1055         "RxPPP1Frames       ",
1056         "RxPPP2Frames       ",
1057         "RxPPP3Frames       ",
1058         "RxPPP4Frames       ",
1059         "RxPPP5Frames       ",
1060         "RxPPP6Frames       ",
1061         "RxPPP7Frames       ",
1062
1063         "RxBG0FramesDropped ",
1064         "RxBG1FramesDropped ",
1065         "RxBG2FramesDropped ",
1066         "RxBG3FramesDropped ",
1067         "RxBG0FramesTrunc   ",
1068         "RxBG1FramesTrunc   ",
1069         "RxBG2FramesTrunc   ",
1070         "RxBG3FramesTrunc   ",
1071
1072         "TSO                ",
1073         "TxCsumOffload      ",
1074         "RxCsumGood         ",
1075         "VLANextractions    ",
1076         "VLANinsertions     ",
1077         "GROpackets         ",
1078         "GROmerged          ",
1079 };
1080
1081 static int get_sset_count(struct net_device *dev, int sset)
1082 {
1083         switch (sset) {
1084         case ETH_SS_STATS:
1085                 return ARRAY_SIZE(stats_strings);
1086         default:
1087                 return -EOPNOTSUPP;
1088         }
1089 }
1090
1091 #define T4_REGMAP_SIZE (160 * 1024)
1092
1093 static int get_regs_len(struct net_device *dev)
1094 {
1095         return T4_REGMAP_SIZE;
1096 }
1097
1098 static int get_eeprom_len(struct net_device *dev)
1099 {
1100         return EEPROMSIZE;
1101 }
1102
1103 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1104 {
1105         struct adapter *adapter = netdev2adap(dev);
1106
1107         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1108         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1109         strlcpy(info->bus_info, pci_name(adapter->pdev),
1110                 sizeof(info->bus_info));
1111
1112         if (adapter->params.fw_vers)
1113                 snprintf(info->fw_version, sizeof(info->fw_version),
1114                         "%u.%u.%u.%u, TP %u.%u.%u.%u",
1115                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1116                         FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1117                         FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1118                         FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1119                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1120                         FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1121                         FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1122                         FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1123 }
1124
1125 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1126 {
1127         if (stringset == ETH_SS_STATS)
1128                 memcpy(data, stats_strings, sizeof(stats_strings));
1129 }
1130
1131 /*
1132  * port stats maintained per queue of the port.  They should be in the same
1133  * order as in stats_strings above.
1134  */
1135 struct queue_port_stats {
1136         u64 tso;
1137         u64 tx_csum;
1138         u64 rx_csum;
1139         u64 vlan_ex;
1140         u64 vlan_ins;
1141         u64 gro_pkts;
1142         u64 gro_merged;
1143 };
1144
1145 static void collect_sge_port_stats(const struct adapter *adap,
1146                 const struct port_info *p, struct queue_port_stats *s)
1147 {
1148         int i;
1149         const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1150         const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1151
1152         memset(s, 0, sizeof(*s));
1153         for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1154                 s->tso += tx->tso;
1155                 s->tx_csum += tx->tx_cso;
1156                 s->rx_csum += rx->stats.rx_cso;
1157                 s->vlan_ex += rx->stats.vlan_ex;
1158                 s->vlan_ins += tx->vlan_ins;
1159                 s->gro_pkts += rx->stats.lro_pkts;
1160                 s->gro_merged += rx->stats.lro_merged;
1161         }
1162 }
1163
1164 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1165                       u64 *data)
1166 {
1167         struct port_info *pi = netdev_priv(dev);
1168         struct adapter *adapter = pi->adapter;
1169
1170         t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1171
1172         data += sizeof(struct port_stats) / sizeof(u64);
1173         collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1174 }
1175
1176 /*
1177  * Return a version number to identify the type of adapter.  The scheme is:
1178  * - bits 0..9: chip version
1179  * - bits 10..15: chip revision
1180  * - bits 16..23: register dump version
1181  */
1182 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1183 {
1184         return 4 | (ap->params.rev << 10) | (1 << 16);
1185 }
1186
1187 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1188                            unsigned int end)
1189 {
1190         u32 *p = buf + start;
1191
1192         for ( ; start <= end; start += sizeof(u32))
1193                 *p++ = t4_read_reg(ap, start);
1194 }
1195
1196 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1197                      void *buf)
1198 {
1199         static const unsigned int reg_ranges[] = {
1200                 0x1008, 0x1108,
1201                 0x1180, 0x11b4,
1202                 0x11fc, 0x123c,
1203                 0x1300, 0x173c,
1204                 0x1800, 0x18fc,
1205                 0x3000, 0x30d8,
1206                 0x30e0, 0x5924,
1207                 0x5960, 0x59d4,
1208                 0x5a00, 0x5af8,
1209                 0x6000, 0x6098,
1210                 0x6100, 0x6150,
1211                 0x6200, 0x6208,
1212                 0x6240, 0x6248,
1213                 0x6280, 0x6338,
1214                 0x6370, 0x638c,
1215                 0x6400, 0x643c,
1216                 0x6500, 0x6524,
1217                 0x6a00, 0x6a38,
1218                 0x6a60, 0x6a78,
1219                 0x6b00, 0x6b84,
1220                 0x6bf0, 0x6c84,
1221                 0x6cf0, 0x6d84,
1222                 0x6df0, 0x6e84,
1223                 0x6ef0, 0x6f84,
1224                 0x6ff0, 0x7084,
1225                 0x70f0, 0x7184,
1226                 0x71f0, 0x7284,
1227                 0x72f0, 0x7384,
1228                 0x73f0, 0x7450,
1229                 0x7500, 0x7530,
1230                 0x7600, 0x761c,
1231                 0x7680, 0x76cc,
1232                 0x7700, 0x7798,
1233                 0x77c0, 0x77fc,
1234                 0x7900, 0x79fc,
1235                 0x7b00, 0x7c38,
1236                 0x7d00, 0x7efc,
1237                 0x8dc0, 0x8e1c,
1238                 0x8e30, 0x8e78,
1239                 0x8ea0, 0x8f6c,
1240                 0x8fc0, 0x9074,
1241                 0x90fc, 0x90fc,
1242                 0x9400, 0x9458,
1243                 0x9600, 0x96bc,
1244                 0x9800, 0x9808,
1245                 0x9820, 0x983c,
1246                 0x9850, 0x9864,
1247                 0x9c00, 0x9c6c,
1248                 0x9c80, 0x9cec,
1249                 0x9d00, 0x9d6c,
1250                 0x9d80, 0x9dec,
1251                 0x9e00, 0x9e6c,
1252                 0x9e80, 0x9eec,
1253                 0x9f00, 0x9f6c,
1254                 0x9f80, 0x9fec,
1255                 0xd004, 0xd03c,
1256                 0xdfc0, 0xdfe0,
1257                 0xe000, 0xea7c,
1258                 0xf000, 0x11190,
1259                 0x19040, 0x1906c,
1260                 0x19078, 0x19080,
1261                 0x1908c, 0x19124,
1262                 0x19150, 0x191b0,
1263                 0x191d0, 0x191e8,
1264                 0x19238, 0x1924c,
1265                 0x193f8, 0x19474,
1266                 0x19490, 0x194f8,
1267                 0x19800, 0x19f30,
1268                 0x1a000, 0x1a06c,
1269                 0x1a0b0, 0x1a120,
1270                 0x1a128, 0x1a138,
1271                 0x1a190, 0x1a1c4,
1272                 0x1a1fc, 0x1a1fc,
1273                 0x1e040, 0x1e04c,
1274                 0x1e284, 0x1e28c,
1275                 0x1e2c0, 0x1e2c0,
1276                 0x1e2e0, 0x1e2e0,
1277                 0x1e300, 0x1e384,
1278                 0x1e3c0, 0x1e3c8,
1279                 0x1e440, 0x1e44c,
1280                 0x1e684, 0x1e68c,
1281                 0x1e6c0, 0x1e6c0,
1282                 0x1e6e0, 0x1e6e0,
1283                 0x1e700, 0x1e784,
1284                 0x1e7c0, 0x1e7c8,
1285                 0x1e840, 0x1e84c,
1286                 0x1ea84, 0x1ea8c,
1287                 0x1eac0, 0x1eac0,
1288                 0x1eae0, 0x1eae0,
1289                 0x1eb00, 0x1eb84,
1290                 0x1ebc0, 0x1ebc8,
1291                 0x1ec40, 0x1ec4c,
1292                 0x1ee84, 0x1ee8c,
1293                 0x1eec0, 0x1eec0,
1294                 0x1eee0, 0x1eee0,
1295                 0x1ef00, 0x1ef84,
1296                 0x1efc0, 0x1efc8,
1297                 0x1f040, 0x1f04c,
1298                 0x1f284, 0x1f28c,
1299                 0x1f2c0, 0x1f2c0,
1300                 0x1f2e0, 0x1f2e0,
1301                 0x1f300, 0x1f384,
1302                 0x1f3c0, 0x1f3c8,
1303                 0x1f440, 0x1f44c,
1304                 0x1f684, 0x1f68c,
1305                 0x1f6c0, 0x1f6c0,
1306                 0x1f6e0, 0x1f6e0,
1307                 0x1f700, 0x1f784,
1308                 0x1f7c0, 0x1f7c8,
1309                 0x1f840, 0x1f84c,
1310                 0x1fa84, 0x1fa8c,
1311                 0x1fac0, 0x1fac0,
1312                 0x1fae0, 0x1fae0,
1313                 0x1fb00, 0x1fb84,
1314                 0x1fbc0, 0x1fbc8,
1315                 0x1fc40, 0x1fc4c,
1316                 0x1fe84, 0x1fe8c,
1317                 0x1fec0, 0x1fec0,
1318                 0x1fee0, 0x1fee0,
1319                 0x1ff00, 0x1ff84,
1320                 0x1ffc0, 0x1ffc8,
1321                 0x20000, 0x2002c,
1322                 0x20100, 0x2013c,
1323                 0x20190, 0x201c8,
1324                 0x20200, 0x20318,
1325                 0x20400, 0x20528,
1326                 0x20540, 0x20614,
1327                 0x21000, 0x21040,
1328                 0x2104c, 0x21060,
1329                 0x210c0, 0x210ec,
1330                 0x21200, 0x21268,
1331                 0x21270, 0x21284,
1332                 0x212fc, 0x21388,
1333                 0x21400, 0x21404,
1334                 0x21500, 0x21518,
1335                 0x2152c, 0x2153c,
1336                 0x21550, 0x21554,
1337                 0x21600, 0x21600,
1338                 0x21608, 0x21628,
1339                 0x21630, 0x2163c,
1340                 0x21700, 0x2171c,
1341                 0x21780, 0x2178c,
1342                 0x21800, 0x21c38,
1343                 0x21c80, 0x21d7c,
1344                 0x21e00, 0x21e04,
1345                 0x22000, 0x2202c,
1346                 0x22100, 0x2213c,
1347                 0x22190, 0x221c8,
1348                 0x22200, 0x22318,
1349                 0x22400, 0x22528,
1350                 0x22540, 0x22614,
1351                 0x23000, 0x23040,
1352                 0x2304c, 0x23060,
1353                 0x230c0, 0x230ec,
1354                 0x23200, 0x23268,
1355                 0x23270, 0x23284,
1356                 0x232fc, 0x23388,
1357                 0x23400, 0x23404,
1358                 0x23500, 0x23518,
1359                 0x2352c, 0x2353c,
1360                 0x23550, 0x23554,
1361                 0x23600, 0x23600,
1362                 0x23608, 0x23628,
1363                 0x23630, 0x2363c,
1364                 0x23700, 0x2371c,
1365                 0x23780, 0x2378c,
1366                 0x23800, 0x23c38,
1367                 0x23c80, 0x23d7c,
1368                 0x23e00, 0x23e04,
1369                 0x24000, 0x2402c,
1370                 0x24100, 0x2413c,
1371                 0x24190, 0x241c8,
1372                 0x24200, 0x24318,
1373                 0x24400, 0x24528,
1374                 0x24540, 0x24614,
1375                 0x25000, 0x25040,
1376                 0x2504c, 0x25060,
1377                 0x250c0, 0x250ec,
1378                 0x25200, 0x25268,
1379                 0x25270, 0x25284,
1380                 0x252fc, 0x25388,
1381                 0x25400, 0x25404,
1382                 0x25500, 0x25518,
1383                 0x2552c, 0x2553c,
1384                 0x25550, 0x25554,
1385                 0x25600, 0x25600,
1386                 0x25608, 0x25628,
1387                 0x25630, 0x2563c,
1388                 0x25700, 0x2571c,
1389                 0x25780, 0x2578c,
1390                 0x25800, 0x25c38,
1391                 0x25c80, 0x25d7c,
1392                 0x25e00, 0x25e04,
1393                 0x26000, 0x2602c,
1394                 0x26100, 0x2613c,
1395                 0x26190, 0x261c8,
1396                 0x26200, 0x26318,
1397                 0x26400, 0x26528,
1398                 0x26540, 0x26614,
1399                 0x27000, 0x27040,
1400                 0x2704c, 0x27060,
1401                 0x270c0, 0x270ec,
1402                 0x27200, 0x27268,
1403                 0x27270, 0x27284,
1404                 0x272fc, 0x27388,
1405                 0x27400, 0x27404,
1406                 0x27500, 0x27518,
1407                 0x2752c, 0x2753c,
1408                 0x27550, 0x27554,
1409                 0x27600, 0x27600,
1410                 0x27608, 0x27628,
1411                 0x27630, 0x2763c,
1412                 0x27700, 0x2771c,
1413                 0x27780, 0x2778c,
1414                 0x27800, 0x27c38,
1415                 0x27c80, 0x27d7c,
1416                 0x27e00, 0x27e04
1417         };
1418
1419         int i;
1420         struct adapter *ap = netdev2adap(dev);
1421
1422         regs->version = mk_adap_vers(ap);
1423
1424         memset(buf, 0, T4_REGMAP_SIZE);
1425         for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1426                 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1427 }
1428
1429 static int restart_autoneg(struct net_device *dev)
1430 {
1431         struct port_info *p = netdev_priv(dev);
1432
1433         if (!netif_running(dev))
1434                 return -EAGAIN;
1435         if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1436                 return -EINVAL;
1437         t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1438         return 0;
1439 }
1440
1441 static int identify_port(struct net_device *dev,
1442                          enum ethtool_phys_id_state state)
1443 {
1444         unsigned int val;
1445         struct adapter *adap = netdev2adap(dev);
1446
1447         if (state == ETHTOOL_ID_ACTIVE)
1448                 val = 0xffff;
1449         else if (state == ETHTOOL_ID_INACTIVE)
1450                 val = 0;
1451         else
1452                 return -EINVAL;
1453
1454         return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1455 }
1456
1457 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1458 {
1459         unsigned int v = 0;
1460
1461         if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1462             type == FW_PORT_TYPE_BT_XAUI) {
1463                 v |= SUPPORTED_TP;
1464                 if (caps & FW_PORT_CAP_SPEED_100M)
1465                         v |= SUPPORTED_100baseT_Full;
1466                 if (caps & FW_PORT_CAP_SPEED_1G)
1467                         v |= SUPPORTED_1000baseT_Full;
1468                 if (caps & FW_PORT_CAP_SPEED_10G)
1469                         v |= SUPPORTED_10000baseT_Full;
1470         } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1471                 v |= SUPPORTED_Backplane;
1472                 if (caps & FW_PORT_CAP_SPEED_1G)
1473                         v |= SUPPORTED_1000baseKX_Full;
1474                 if (caps & FW_PORT_CAP_SPEED_10G)
1475                         v |= SUPPORTED_10000baseKX4_Full;
1476         } else if (type == FW_PORT_TYPE_KR)
1477                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1478         else if (type == FW_PORT_TYPE_BP_AP)
1479                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1480                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1481         else if (type == FW_PORT_TYPE_BP4_AP)
1482                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1483                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1484                      SUPPORTED_10000baseKX4_Full;
1485         else if (type == FW_PORT_TYPE_FIBER_XFI ||
1486                  type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1487                 v |= SUPPORTED_FIBRE;
1488
1489         if (caps & FW_PORT_CAP_ANEG)
1490                 v |= SUPPORTED_Autoneg;
1491         return v;
1492 }
1493
1494 static unsigned int to_fw_linkcaps(unsigned int caps)
1495 {
1496         unsigned int v = 0;
1497
1498         if (caps & ADVERTISED_100baseT_Full)
1499                 v |= FW_PORT_CAP_SPEED_100M;
1500         if (caps & ADVERTISED_1000baseT_Full)
1501                 v |= FW_PORT_CAP_SPEED_1G;
1502         if (caps & ADVERTISED_10000baseT_Full)
1503                 v |= FW_PORT_CAP_SPEED_10G;
1504         return v;
1505 }
1506
1507 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1508 {
1509         const struct port_info *p = netdev_priv(dev);
1510
1511         if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1512             p->port_type == FW_PORT_TYPE_BT_XFI ||
1513             p->port_type == FW_PORT_TYPE_BT_XAUI)
1514                 cmd->port = PORT_TP;
1515         else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1516                  p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1517                 cmd->port = PORT_FIBRE;
1518         else if (p->port_type == FW_PORT_TYPE_SFP) {
1519                 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1520                     p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1521                         cmd->port = PORT_DA;
1522                 else
1523                         cmd->port = PORT_FIBRE;
1524         } else
1525                 cmd->port = PORT_OTHER;
1526
1527         if (p->mdio_addr >= 0) {
1528                 cmd->phy_address = p->mdio_addr;
1529                 cmd->transceiver = XCVR_EXTERNAL;
1530                 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1531                         MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1532         } else {
1533                 cmd->phy_address = 0;  /* not really, but no better option */
1534                 cmd->transceiver = XCVR_INTERNAL;
1535                 cmd->mdio_support = 0;
1536         }
1537
1538         cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1539         cmd->advertising = from_fw_linkcaps(p->port_type,
1540                                             p->link_cfg.advertising);
1541         ethtool_cmd_speed_set(cmd,
1542                               netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1543         cmd->duplex = DUPLEX_FULL;
1544         cmd->autoneg = p->link_cfg.autoneg;
1545         cmd->maxtxpkt = 0;
1546         cmd->maxrxpkt = 0;
1547         return 0;
1548 }
1549
1550 static unsigned int speed_to_caps(int speed)
1551 {
1552         if (speed == SPEED_100)
1553                 return FW_PORT_CAP_SPEED_100M;
1554         if (speed == SPEED_1000)
1555                 return FW_PORT_CAP_SPEED_1G;
1556         if (speed == SPEED_10000)
1557                 return FW_PORT_CAP_SPEED_10G;
1558         return 0;
1559 }
1560
1561 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1562 {
1563         unsigned int cap;
1564         struct port_info *p = netdev_priv(dev);
1565         struct link_config *lc = &p->link_cfg;
1566         u32 speed = ethtool_cmd_speed(cmd);
1567
1568         if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
1569                 return -EINVAL;
1570
1571         if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1572                 /*
1573                  * PHY offers a single speed.  See if that's what's
1574                  * being requested.
1575                  */
1576                 if (cmd->autoneg == AUTONEG_DISABLE &&
1577                     (lc->supported & speed_to_caps(speed)))
1578                         return 0;
1579                 return -EINVAL;
1580         }
1581
1582         if (cmd->autoneg == AUTONEG_DISABLE) {
1583                 cap = speed_to_caps(speed);
1584
1585                 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1586                     (speed == SPEED_10000))
1587                         return -EINVAL;
1588                 lc->requested_speed = cap;
1589                 lc->advertising = 0;
1590         } else {
1591                 cap = to_fw_linkcaps(cmd->advertising);
1592                 if (!(lc->supported & cap))
1593                         return -EINVAL;
1594                 lc->requested_speed = 0;
1595                 lc->advertising = cap | FW_PORT_CAP_ANEG;
1596         }
1597         lc->autoneg = cmd->autoneg;
1598
1599         if (netif_running(dev))
1600                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1601                                      lc);
1602         return 0;
1603 }
1604
1605 static void get_pauseparam(struct net_device *dev,
1606                            struct ethtool_pauseparam *epause)
1607 {
1608         struct port_info *p = netdev_priv(dev);
1609
1610         epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1611         epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1612         epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1613 }
1614
1615 static int set_pauseparam(struct net_device *dev,
1616                           struct ethtool_pauseparam *epause)
1617 {
1618         struct port_info *p = netdev_priv(dev);
1619         struct link_config *lc = &p->link_cfg;
1620
1621         if (epause->autoneg == AUTONEG_DISABLE)
1622                 lc->requested_fc = 0;
1623         else if (lc->supported & FW_PORT_CAP_ANEG)
1624                 lc->requested_fc = PAUSE_AUTONEG;
1625         else
1626                 return -EINVAL;
1627
1628         if (epause->rx_pause)
1629                 lc->requested_fc |= PAUSE_RX;
1630         if (epause->tx_pause)
1631                 lc->requested_fc |= PAUSE_TX;
1632         if (netif_running(dev))
1633                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1634                                      lc);
1635         return 0;
1636 }
1637
1638 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1639 {
1640         const struct port_info *pi = netdev_priv(dev);
1641         const struct sge *s = &pi->adapter->sge;
1642
1643         e->rx_max_pending = MAX_RX_BUFFERS;
1644         e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1645         e->rx_jumbo_max_pending = 0;
1646         e->tx_max_pending = MAX_TXQ_ENTRIES;
1647
1648         e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1649         e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1650         e->rx_jumbo_pending = 0;
1651         e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1652 }
1653
1654 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1655 {
1656         int i;
1657         const struct port_info *pi = netdev_priv(dev);
1658         struct adapter *adapter = pi->adapter;
1659         struct sge *s = &adapter->sge;
1660
1661         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1662             e->tx_pending > MAX_TXQ_ENTRIES ||
1663             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1664             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1665             e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1666                 return -EINVAL;
1667
1668         if (adapter->flags & FULL_INIT_DONE)
1669                 return -EBUSY;
1670
1671         for (i = 0; i < pi->nqsets; ++i) {
1672                 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1673                 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1674                 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1675         }
1676         return 0;
1677 }
1678
1679 static int closest_timer(const struct sge *s, int time)
1680 {
1681         int i, delta, match = 0, min_delta = INT_MAX;
1682
1683         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1684                 delta = time - s->timer_val[i];
1685                 if (delta < 0)
1686                         delta = -delta;
1687                 if (delta < min_delta) {
1688                         min_delta = delta;
1689                         match = i;
1690                 }
1691         }
1692         return match;
1693 }
1694
1695 static int closest_thres(const struct sge *s, int thres)
1696 {
1697         int i, delta, match = 0, min_delta = INT_MAX;
1698
1699         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1700                 delta = thres - s->counter_val[i];
1701                 if (delta < 0)
1702                         delta = -delta;
1703                 if (delta < min_delta) {
1704                         min_delta = delta;
1705                         match = i;
1706                 }
1707         }
1708         return match;
1709 }
1710
1711 /*
1712  * Return a queue's interrupt hold-off time in us.  0 means no timer.
1713  */
1714 static unsigned int qtimer_val(const struct adapter *adap,
1715                                const struct sge_rspq *q)
1716 {
1717         unsigned int idx = q->intr_params >> 1;
1718
1719         return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1720 }
1721
1722 /**
1723  *      set_rxq_intr_params - set a queue's interrupt holdoff parameters
1724  *      @adap: the adapter
1725  *      @q: the Rx queue
1726  *      @us: the hold-off time in us, or 0 to disable timer
1727  *      @cnt: the hold-off packet count, or 0 to disable counter
1728  *
1729  *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1730  *      one of the two needs to be enabled for the queue to generate interrupts.
1731  */
1732 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1733                                unsigned int us, unsigned int cnt)
1734 {
1735         if ((us | cnt) == 0)
1736                 cnt = 1;
1737
1738         if (cnt) {
1739                 int err;
1740                 u32 v, new_idx;
1741
1742                 new_idx = closest_thres(&adap->sge, cnt);
1743                 if (q->desc && q->pktcnt_idx != new_idx) {
1744                         /* the queue has already been created, update it */
1745                         v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1746                             FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1747                             FW_PARAMS_PARAM_YZ(q->cntxt_id);
1748                         err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1749                                             &new_idx);
1750                         if (err)
1751                                 return err;
1752                 }
1753                 q->pktcnt_idx = new_idx;
1754         }
1755
1756         us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1757         q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1758         return 0;
1759 }
1760
1761 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1762 {
1763         const struct port_info *pi = netdev_priv(dev);
1764         struct adapter *adap = pi->adapter;
1765
1766         return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1767                         c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1768 }
1769
1770 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1771 {
1772         const struct port_info *pi = netdev_priv(dev);
1773         const struct adapter *adap = pi->adapter;
1774         const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1775
1776         c->rx_coalesce_usecs = qtimer_val(adap, rq);
1777         c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1778                 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1779         return 0;
1780 }
1781
1782 /**
1783  *      eeprom_ptov - translate a physical EEPROM address to virtual
1784  *      @phys_addr: the physical EEPROM address
1785  *      @fn: the PCI function number
1786  *      @sz: size of function-specific area
1787  *
1788  *      Translate a physical EEPROM address to virtual.  The first 1K is
1789  *      accessed through virtual addresses starting at 31K, the rest is
1790  *      accessed through virtual addresses starting at 0.
1791  *
1792  *      The mapping is as follows:
1793  *      [0..1K) -> [31K..32K)
1794  *      [1K..1K+A) -> [31K-A..31K)
1795  *      [1K+A..ES) -> [0..ES-A-1K)
1796  *
1797  *      where A = @fn * @sz, and ES = EEPROM size.
1798  */
1799 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1800 {
1801         fn *= sz;
1802         if (phys_addr < 1024)
1803                 return phys_addr + (31 << 10);
1804         if (phys_addr < 1024 + fn)
1805                 return 31744 - fn + phys_addr - 1024;
1806         if (phys_addr < EEPROMSIZE)
1807                 return phys_addr - 1024 - fn;
1808         return -EINVAL;
1809 }
1810
1811 /*
1812  * The next two routines implement eeprom read/write from physical addresses.
1813  */
1814 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1815 {
1816         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1817
1818         if (vaddr >= 0)
1819                 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1820         return vaddr < 0 ? vaddr : 0;
1821 }
1822
1823 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1824 {
1825         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1826
1827         if (vaddr >= 0)
1828                 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1829         return vaddr < 0 ? vaddr : 0;
1830 }
1831
1832 #define EEPROM_MAGIC 0x38E2F10C
1833
1834 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1835                       u8 *data)
1836 {
1837         int i, err = 0;
1838         struct adapter *adapter = netdev2adap(dev);
1839
1840         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1841         if (!buf)
1842                 return -ENOMEM;
1843
1844         e->magic = EEPROM_MAGIC;
1845         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1846                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1847
1848         if (!err)
1849                 memcpy(data, buf + e->offset, e->len);
1850         kfree(buf);
1851         return err;
1852 }
1853
1854 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1855                       u8 *data)
1856 {
1857         u8 *buf;
1858         int err = 0;
1859         u32 aligned_offset, aligned_len, *p;
1860         struct adapter *adapter = netdev2adap(dev);
1861
1862         if (eeprom->magic != EEPROM_MAGIC)
1863                 return -EINVAL;
1864
1865         aligned_offset = eeprom->offset & ~3;
1866         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1867
1868         if (adapter->fn > 0) {
1869                 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1870
1871                 if (aligned_offset < start ||
1872                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
1873                         return -EPERM;
1874         }
1875
1876         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1877                 /*
1878                  * RMW possibly needed for first or last words.
1879                  */
1880                 buf = kmalloc(aligned_len, GFP_KERNEL);
1881                 if (!buf)
1882                         return -ENOMEM;
1883                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1884                 if (!err && aligned_len > 4)
1885                         err = eeprom_rd_phys(adapter,
1886                                              aligned_offset + aligned_len - 4,
1887                                              (u32 *)&buf[aligned_len - 4]);
1888                 if (err)
1889                         goto out;
1890                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1891         } else
1892                 buf = data;
1893
1894         err = t4_seeprom_wp(adapter, false);
1895         if (err)
1896                 goto out;
1897
1898         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1899                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1900                 aligned_offset += 4;
1901         }
1902
1903         if (!err)
1904                 err = t4_seeprom_wp(adapter, true);
1905 out:
1906         if (buf != data)
1907                 kfree(buf);
1908         return err;
1909 }
1910
1911 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1912 {
1913         int ret;
1914         const struct firmware *fw;
1915         struct adapter *adap = netdev2adap(netdev);
1916
1917         ef->data[sizeof(ef->data) - 1] = '\0';
1918         ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1919         if (ret < 0)
1920                 return ret;
1921
1922         ret = t4_load_fw(adap, fw->data, fw->size);
1923         release_firmware(fw);
1924         if (!ret)
1925                 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1926         return ret;
1927 }
1928
1929 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1930 #define BCAST_CRC 0xa0ccc1a6
1931
1932 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1933 {
1934         wol->supported = WAKE_BCAST | WAKE_MAGIC;
1935         wol->wolopts = netdev2adap(dev)->wol;
1936         memset(&wol->sopass, 0, sizeof(wol->sopass));
1937 }
1938
1939 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1940 {
1941         int err = 0;
1942         struct port_info *pi = netdev_priv(dev);
1943
1944         if (wol->wolopts & ~WOL_SUPPORTED)
1945                 return -EINVAL;
1946         t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1947                             (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1948         if (wol->wolopts & WAKE_BCAST) {
1949                 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1950                                         ~0ULL, 0, false);
1951                 if (!err)
1952                         err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1953                                                 ~6ULL, ~0ULL, BCAST_CRC, true);
1954         } else
1955                 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1956         return err;
1957 }
1958
1959 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1960 {
1961         const struct port_info *pi = netdev_priv(dev);
1962         netdev_features_t changed = dev->features ^ features;
1963         int err;
1964
1965         if (!(changed & NETIF_F_HW_VLAN_RX))
1966                 return 0;
1967
1968         err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1969                             -1, -1, -1,
1970                             !!(features & NETIF_F_HW_VLAN_RX), true);
1971         if (unlikely(err))
1972                 dev->features = features ^ NETIF_F_HW_VLAN_RX;
1973         return err;
1974 }
1975
1976 static u32 get_rss_table_size(struct net_device *dev)
1977 {
1978         const struct port_info *pi = netdev_priv(dev);
1979
1980         return pi->rss_size;
1981 }
1982
1983 static int get_rss_table(struct net_device *dev, u32 *p)
1984 {
1985         const struct port_info *pi = netdev_priv(dev);
1986         unsigned int n = pi->rss_size;
1987
1988         while (n--)
1989                 p[n] = pi->rss[n];
1990         return 0;
1991 }
1992
1993 static int set_rss_table(struct net_device *dev, const u32 *p)
1994 {
1995         unsigned int i;
1996         struct port_info *pi = netdev_priv(dev);
1997
1998         for (i = 0; i < pi->rss_size; i++)
1999                 pi->rss[i] = p[i];
2000         if (pi->adapter->flags & FULL_INIT_DONE)
2001                 return write_rss(pi, pi->rss);
2002         return 0;
2003 }
2004
2005 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2006                      u32 *rules)
2007 {
2008         const struct port_info *pi = netdev_priv(dev);
2009
2010         switch (info->cmd) {
2011         case ETHTOOL_GRXFH: {
2012                 unsigned int v = pi->rss_mode;
2013
2014                 info->data = 0;
2015                 switch (info->flow_type) {
2016                 case TCP_V4_FLOW:
2017                         if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2018                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2019                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2020                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2021                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2022                         break;
2023                 case UDP_V4_FLOW:
2024                         if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2025                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2026                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2027                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2028                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2029                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2030                         break;
2031                 case SCTP_V4_FLOW:
2032                 case AH_ESP_V4_FLOW:
2033                 case IPV4_FLOW:
2034                         if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2035                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2036                         break;
2037                 case TCP_V6_FLOW:
2038                         if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2039                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2040                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2041                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2042                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2043                         break;
2044                 case UDP_V6_FLOW:
2045                         if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2046                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2047                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2048                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2049                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2050                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2051                         break;
2052                 case SCTP_V6_FLOW:
2053                 case AH_ESP_V6_FLOW:
2054                 case IPV6_FLOW:
2055                         if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2056                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2057                         break;
2058                 }
2059                 return 0;
2060         }
2061         case ETHTOOL_GRXRINGS:
2062                 info->data = pi->nqsets;
2063                 return 0;
2064         }
2065         return -EOPNOTSUPP;
2066 }
2067
2068 static const struct ethtool_ops cxgb_ethtool_ops = {
2069         .get_settings      = get_settings,
2070         .set_settings      = set_settings,
2071         .get_drvinfo       = get_drvinfo,
2072         .get_msglevel      = get_msglevel,
2073         .set_msglevel      = set_msglevel,
2074         .get_ringparam     = get_sge_param,
2075         .set_ringparam     = set_sge_param,
2076         .get_coalesce      = get_coalesce,
2077         .set_coalesce      = set_coalesce,
2078         .get_eeprom_len    = get_eeprom_len,
2079         .get_eeprom        = get_eeprom,
2080         .set_eeprom        = set_eeprom,
2081         .get_pauseparam    = get_pauseparam,
2082         .set_pauseparam    = set_pauseparam,
2083         .get_link          = ethtool_op_get_link,
2084         .get_strings       = get_strings,
2085         .set_phys_id       = identify_port,
2086         .nway_reset        = restart_autoneg,
2087         .get_sset_count    = get_sset_count,
2088         .get_ethtool_stats = get_stats,
2089         .get_regs_len      = get_regs_len,
2090         .get_regs          = get_regs,
2091         .get_wol           = get_wol,
2092         .set_wol           = set_wol,
2093         .get_rxnfc         = get_rxnfc,
2094         .get_rxfh_indir_size = get_rss_table_size,
2095         .get_rxfh_indir    = get_rss_table,
2096         .set_rxfh_indir    = set_rss_table,
2097         .flash_device      = set_flash,
2098 };
2099
2100 /*
2101  * debugfs support
2102  */
2103 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2104                         loff_t *ppos)
2105 {
2106         loff_t pos = *ppos;
2107         loff_t avail = file->f_path.dentry->d_inode->i_size;
2108         unsigned int mem = (uintptr_t)file->private_data & 3;
2109         struct adapter *adap = file->private_data - mem;
2110
2111         if (pos < 0)
2112                 return -EINVAL;
2113         if (pos >= avail)
2114                 return 0;
2115         if (count > avail - pos)
2116                 count = avail - pos;
2117
2118         while (count) {
2119                 size_t len;
2120                 int ret, ofst;
2121                 __be32 data[16];
2122
2123                 if (mem == MEM_MC)
2124                         ret = t4_mc_read(adap, pos, data, NULL);
2125                 else
2126                         ret = t4_edc_read(adap, mem, pos, data, NULL);
2127                 if (ret)
2128                         return ret;
2129
2130                 ofst = pos % sizeof(data);
2131                 len = min(count, sizeof(data) - ofst);
2132                 if (copy_to_user(buf, (u8 *)data + ofst, len))
2133                         return -EFAULT;
2134
2135                 buf += len;
2136                 pos += len;
2137                 count -= len;
2138         }
2139         count = pos - *ppos;
2140         *ppos = pos;
2141         return count;
2142 }
2143
2144 static const struct file_operations mem_debugfs_fops = {
2145         .owner   = THIS_MODULE,
2146         .open    = simple_open,
2147         .read    = mem_read,
2148         .llseek  = default_llseek,
2149 };
2150
2151 static void add_debugfs_mem(struct adapter *adap, const char *name,
2152                             unsigned int idx, unsigned int size_mb)
2153 {
2154         struct dentry *de;
2155
2156         de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2157                                  (void *)adap + idx, &mem_debugfs_fops);
2158         if (de && de->d_inode)
2159                 de->d_inode->i_size = size_mb << 20;
2160 }
2161
2162 static int setup_debugfs(struct adapter *adap)
2163 {
2164         int i;
2165
2166         if (IS_ERR_OR_NULL(adap->debugfs_root))
2167                 return -1;
2168
2169         i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2170         if (i & EDRAM0_ENABLE)
2171                 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2172         if (i & EDRAM1_ENABLE)
2173                 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2174         if (i & EXT_MEM_ENABLE)
2175                 add_debugfs_mem(adap, "mc", MEM_MC,
2176                         EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2177         if (adap->l2t)
2178                 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2179                                     &t4_l2t_fops);
2180         return 0;
2181 }
2182
2183 /*
2184  * upper-layer driver support
2185  */
2186
2187 /*
2188  * Allocate an active-open TID and set it to the supplied value.
2189  */
2190 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2191 {
2192         int atid = -1;
2193
2194         spin_lock_bh(&t->atid_lock);
2195         if (t->afree) {
2196                 union aopen_entry *p = t->afree;
2197
2198                 atid = p - t->atid_tab;
2199                 t->afree = p->next;
2200                 p->data = data;
2201                 t->atids_in_use++;
2202         }
2203         spin_unlock_bh(&t->atid_lock);
2204         return atid;
2205 }
2206 EXPORT_SYMBOL(cxgb4_alloc_atid);
2207
2208 /*
2209  * Release an active-open TID.
2210  */
2211 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2212 {
2213         union aopen_entry *p = &t->atid_tab[atid];
2214
2215         spin_lock_bh(&t->atid_lock);
2216         p->next = t->afree;
2217         t->afree = p;
2218         t->atids_in_use--;
2219         spin_unlock_bh(&t->atid_lock);
2220 }
2221 EXPORT_SYMBOL(cxgb4_free_atid);
2222
2223 /*
2224  * Allocate a server TID and set it to the supplied value.
2225  */
2226 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2227 {
2228         int stid;
2229
2230         spin_lock_bh(&t->stid_lock);
2231         if (family == PF_INET) {
2232                 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2233                 if (stid < t->nstids)
2234                         __set_bit(stid, t->stid_bmap);
2235                 else
2236                         stid = -1;
2237         } else {
2238                 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2239                 if (stid < 0)
2240                         stid = -1;
2241         }
2242         if (stid >= 0) {
2243                 t->stid_tab[stid].data = data;
2244                 stid += t->stid_base;
2245                 t->stids_in_use++;
2246         }
2247         spin_unlock_bh(&t->stid_lock);
2248         return stid;
2249 }
2250 EXPORT_SYMBOL(cxgb4_alloc_stid);
2251
2252 /*
2253  * Release a server TID.
2254  */
2255 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2256 {
2257         stid -= t->stid_base;
2258         spin_lock_bh(&t->stid_lock);
2259         if (family == PF_INET)
2260                 __clear_bit(stid, t->stid_bmap);
2261         else
2262                 bitmap_release_region(t->stid_bmap, stid, 2);
2263         t->stid_tab[stid].data = NULL;
2264         t->stids_in_use--;
2265         spin_unlock_bh(&t->stid_lock);
2266 }
2267 EXPORT_SYMBOL(cxgb4_free_stid);
2268
2269 /*
2270  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
2271  */
2272 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2273                            unsigned int tid)
2274 {
2275         struct cpl_tid_release *req;
2276
2277         set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2278         req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2279         INIT_TP_WR(req, tid);
2280         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2281 }
2282
2283 /*
2284  * Queue a TID release request and if necessary schedule a work queue to
2285  * process it.
2286  */
2287 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2288                                     unsigned int tid)
2289 {
2290         void **p = &t->tid_tab[tid];
2291         struct adapter *adap = container_of(t, struct adapter, tids);
2292
2293         spin_lock_bh(&adap->tid_release_lock);
2294         *p = adap->tid_release_head;
2295         /* Low 2 bits encode the Tx channel number */
2296         adap->tid_release_head = (void **)((uintptr_t)p | chan);
2297         if (!adap->tid_release_task_busy) {
2298                 adap->tid_release_task_busy = true;
2299                 queue_work(workq, &adap->tid_release_task);
2300         }
2301         spin_unlock_bh(&adap->tid_release_lock);
2302 }
2303
2304 /*
2305  * Process the list of pending TID release requests.
2306  */
2307 static void process_tid_release_list(struct work_struct *work)
2308 {
2309         struct sk_buff *skb;
2310         struct adapter *adap;
2311
2312         adap = container_of(work, struct adapter, tid_release_task);
2313
2314         spin_lock_bh(&adap->tid_release_lock);
2315         while (adap->tid_release_head) {
2316                 void **p = adap->tid_release_head;
2317                 unsigned int chan = (uintptr_t)p & 3;
2318                 p = (void *)p - chan;
2319
2320                 adap->tid_release_head = *p;
2321                 *p = NULL;
2322                 spin_unlock_bh(&adap->tid_release_lock);
2323
2324                 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2325                                          GFP_KERNEL)))
2326                         schedule_timeout_uninterruptible(1);
2327
2328                 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2329                 t4_ofld_send(adap, skb);
2330                 spin_lock_bh(&adap->tid_release_lock);
2331         }
2332         adap->tid_release_task_busy = false;
2333         spin_unlock_bh(&adap->tid_release_lock);
2334 }
2335
2336 /*
2337  * Release a TID and inform HW.  If we are unable to allocate the release
2338  * message we defer to a work queue.
2339  */
2340 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2341 {
2342         void *old;
2343         struct sk_buff *skb;
2344         struct adapter *adap = container_of(t, struct adapter, tids);
2345
2346         old = t->tid_tab[tid];
2347         skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2348         if (likely(skb)) {
2349                 t->tid_tab[tid] = NULL;
2350                 mk_tid_release(skb, chan, tid);
2351                 t4_ofld_send(adap, skb);
2352         } else
2353                 cxgb4_queue_tid_release(t, chan, tid);
2354         if (old)
2355                 atomic_dec(&t->tids_in_use);
2356 }
2357 EXPORT_SYMBOL(cxgb4_remove_tid);
2358
2359 /*
2360  * Allocate and initialize the TID tables.  Returns 0 on success.
2361  */
2362 static int tid_init(struct tid_info *t)
2363 {
2364         size_t size;
2365         unsigned int natids = t->natids;
2366
2367         size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2368                t->nstids * sizeof(*t->stid_tab) +
2369                BITS_TO_LONGS(t->nstids) * sizeof(long);
2370         t->tid_tab = t4_alloc_mem(size);
2371         if (!t->tid_tab)
2372                 return -ENOMEM;
2373
2374         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2375         t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2376         t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2377         spin_lock_init(&t->stid_lock);
2378         spin_lock_init(&t->atid_lock);
2379
2380         t->stids_in_use = 0;
2381         t->afree = NULL;
2382         t->atids_in_use = 0;
2383         atomic_set(&t->tids_in_use, 0);
2384
2385         /* Setup the free list for atid_tab and clear the stid bitmap. */
2386         if (natids) {
2387                 while (--natids)
2388                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2389                 t->afree = t->atid_tab;
2390         }
2391         bitmap_zero(t->stid_bmap, t->nstids);
2392         return 0;
2393 }
2394
2395 /**
2396  *      cxgb4_create_server - create an IP server
2397  *      @dev: the device
2398  *      @stid: the server TID
2399  *      @sip: local IP address to bind server to
2400  *      @sport: the server's TCP port
2401  *      @queue: queue to direct messages from this server to
2402  *
2403  *      Create an IP server for the given port and address.
2404  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
2405  */
2406 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2407                         __be32 sip, __be16 sport, unsigned int queue)
2408 {
2409         unsigned int chan;
2410         struct sk_buff *skb;
2411         struct adapter *adap;
2412         struct cpl_pass_open_req *req;
2413
2414         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2415         if (!skb)
2416                 return -ENOMEM;
2417
2418         adap = netdev2adap(dev);
2419         req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2420         INIT_TP_WR(req, 0);
2421         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2422         req->local_port = sport;
2423         req->peer_port = htons(0);
2424         req->local_ip = sip;
2425         req->peer_ip = htonl(0);
2426         chan = rxq_to_chan(&adap->sge, queue);
2427         req->opt0 = cpu_to_be64(TX_CHAN(chan));
2428         req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2429                                 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2430         return t4_mgmt_tx(adap, skb);
2431 }
2432 EXPORT_SYMBOL(cxgb4_create_server);
2433
2434 /**
2435  *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2436  *      @mtus: the HW MTU table
2437  *      @mtu: the target MTU
2438  *      @idx: index of selected entry in the MTU table
2439  *
2440  *      Returns the index and the value in the HW MTU table that is closest to
2441  *      but does not exceed @mtu, unless @mtu is smaller than any value in the
2442  *      table, in which case that smallest available value is selected.
2443  */
2444 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2445                             unsigned int *idx)
2446 {
2447         unsigned int i = 0;
2448
2449         while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2450                 ++i;
2451         if (idx)
2452                 *idx = i;
2453         return mtus[i];
2454 }
2455 EXPORT_SYMBOL(cxgb4_best_mtu);
2456
2457 /**
2458  *      cxgb4_port_chan - get the HW channel of a port
2459  *      @dev: the net device for the port
2460  *
2461  *      Return the HW Tx channel of the given port.
2462  */
2463 unsigned int cxgb4_port_chan(const struct net_device *dev)
2464 {
2465         return netdev2pinfo(dev)->tx_chan;
2466 }
2467 EXPORT_SYMBOL(cxgb4_port_chan);
2468
2469 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2470 {
2471         struct adapter *adap = netdev2adap(dev);
2472         u32 v;
2473
2474         v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2475         return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2476 }
2477 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2478
2479 /**
2480  *      cxgb4_port_viid - get the VI id of a port
2481  *      @dev: the net device for the port
2482  *
2483  *      Return the VI id of the given port.
2484  */
2485 unsigned int cxgb4_port_viid(const struct net_device *dev)
2486 {
2487         return netdev2pinfo(dev)->viid;
2488 }
2489 EXPORT_SYMBOL(cxgb4_port_viid);
2490
2491 /**
2492  *      cxgb4_port_idx - get the index of a port
2493  *      @dev: the net device for the port
2494  *
2495  *      Return the index of the given port.
2496  */
2497 unsigned int cxgb4_port_idx(const struct net_device *dev)
2498 {
2499         return netdev2pinfo(dev)->port_id;
2500 }
2501 EXPORT_SYMBOL(cxgb4_port_idx);
2502
2503 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2504                          struct tp_tcp_stats *v6)
2505 {
2506         struct adapter *adap = pci_get_drvdata(pdev);
2507
2508         spin_lock(&adap->stats_lock);
2509         t4_tp_get_tcp_stats(adap, v4, v6);
2510         spin_unlock(&adap->stats_lock);
2511 }
2512 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2513
2514 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2515                       const unsigned int *pgsz_order)
2516 {
2517         struct adapter *adap = netdev2adap(dev);
2518
2519         t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2520         t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2521                      HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2522                      HPZ3(pgsz_order[3]));
2523 }
2524 EXPORT_SYMBOL(cxgb4_iscsi_init);
2525
2526 int cxgb4_flush_eq_cache(struct net_device *dev)
2527 {
2528         struct adapter *adap = netdev2adap(dev);
2529         int ret;
2530
2531         ret = t4_fwaddrspace_write(adap, adap->mbox,
2532                                    0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
2533         return ret;
2534 }
2535 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2536
2537 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2538 {
2539         u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
2540         __be64 indices;
2541         int ret;
2542
2543         ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2544         if (!ret) {
2545                 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2546                 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2547         }
2548         return ret;
2549 }
2550
2551 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2552                         u16 size)
2553 {
2554         struct adapter *adap = netdev2adap(dev);
2555         u16 hw_pidx, hw_cidx;
2556         int ret;
2557
2558         ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2559         if (ret)
2560                 goto out;
2561
2562         if (pidx != hw_pidx) {
2563                 u16 delta;
2564
2565                 if (pidx >= hw_pidx)
2566                         delta = pidx - hw_pidx;
2567                 else
2568                         delta = size - hw_pidx + pidx;
2569                 wmb();
2570                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2571                              QID(qid) | PIDX(delta));
2572         }
2573 out:
2574         return ret;
2575 }
2576 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2577
2578 static struct pci_driver cxgb4_driver;
2579
2580 static void check_neigh_update(struct neighbour *neigh)
2581 {
2582         const struct device *parent;
2583         const struct net_device *netdev = neigh->dev;
2584
2585         if (netdev->priv_flags & IFF_802_1Q_VLAN)
2586                 netdev = vlan_dev_real_dev(netdev);
2587         parent = netdev->dev.parent;
2588         if (parent && parent->driver == &cxgb4_driver.driver)
2589                 t4_l2t_update(dev_get_drvdata(parent), neigh);
2590 }
2591
2592 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2593                        void *data)
2594 {
2595         switch (event) {
2596         case NETEVENT_NEIGH_UPDATE:
2597                 check_neigh_update(data);
2598                 break;
2599         case NETEVENT_REDIRECT:
2600         default:
2601                 break;
2602         }
2603         return 0;
2604 }
2605
2606 static bool netevent_registered;
2607 static struct notifier_block cxgb4_netevent_nb = {
2608         .notifier_call = netevent_cb
2609 };
2610
2611 static void drain_db_fifo(struct adapter *adap, int usecs)
2612 {
2613         u32 v;
2614
2615         do {
2616                 set_current_state(TASK_UNINTERRUPTIBLE);
2617                 schedule_timeout(usecs_to_jiffies(usecs));
2618                 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2619                 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2620                         break;
2621         } while (1);
2622 }
2623
2624 static void disable_txq_db(struct sge_txq *q)
2625 {
2626         spin_lock_irq(&q->db_lock);
2627         q->db_disabled = 1;
2628         spin_unlock_irq(&q->db_lock);
2629 }
2630
2631 static void enable_txq_db(struct sge_txq *q)
2632 {
2633         spin_lock_irq(&q->db_lock);
2634         q->db_disabled = 0;
2635         spin_unlock_irq(&q->db_lock);
2636 }
2637
2638 static void disable_dbs(struct adapter *adap)
2639 {
2640         int i;
2641
2642         for_each_ethrxq(&adap->sge, i)
2643                 disable_txq_db(&adap->sge.ethtxq[i].q);
2644         for_each_ofldrxq(&adap->sge, i)
2645                 disable_txq_db(&adap->sge.ofldtxq[i].q);
2646         for_each_port(adap, i)
2647                 disable_txq_db(&adap->sge.ctrlq[i].q);
2648 }
2649
2650 static void enable_dbs(struct adapter *adap)
2651 {
2652         int i;
2653
2654         for_each_ethrxq(&adap->sge, i)
2655                 enable_txq_db(&adap->sge.ethtxq[i].q);
2656         for_each_ofldrxq(&adap->sge, i)
2657                 enable_txq_db(&adap->sge.ofldtxq[i].q);
2658         for_each_port(adap, i)
2659                 enable_txq_db(&adap->sge.ctrlq[i].q);
2660 }
2661
2662 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2663 {
2664         u16 hw_pidx, hw_cidx;
2665         int ret;
2666
2667         spin_lock_bh(&q->db_lock);
2668         ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2669         if (ret)
2670                 goto out;
2671         if (q->db_pidx != hw_pidx) {
2672                 u16 delta;
2673
2674                 if (q->db_pidx >= hw_pidx)
2675                         delta = q->db_pidx - hw_pidx;
2676                 else
2677                         delta = q->size - hw_pidx + q->db_pidx;
2678                 wmb();
2679                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2680                              QID(q->cntxt_id) | PIDX(delta));
2681         }
2682 out:
2683         q->db_disabled = 0;
2684         spin_unlock_bh(&q->db_lock);
2685         if (ret)
2686                 CH_WARN(adap, "DB drop recovery failed.\n");
2687 }
2688 static void recover_all_queues(struct adapter *adap)
2689 {
2690         int i;
2691
2692         for_each_ethrxq(&adap->sge, i)
2693                 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2694         for_each_ofldrxq(&adap->sge, i)
2695                 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2696         for_each_port(adap, i)
2697                 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2698 }
2699
2700 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2701 {
2702         mutex_lock(&uld_mutex);
2703         if (adap->uld_handle[CXGB4_ULD_RDMA])
2704                 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2705                                 cmd);
2706         mutex_unlock(&uld_mutex);
2707 }
2708
2709 static void process_db_full(struct work_struct *work)
2710 {
2711         struct adapter *adap;
2712
2713         adap = container_of(work, struct adapter, db_full_task);
2714
2715         notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2716         drain_db_fifo(adap, dbfifo_drain_delay);
2717         t4_set_reg_field(adap, SGE_INT_ENABLE3,
2718                          DBFIFO_HP_INT | DBFIFO_LP_INT,
2719                          DBFIFO_HP_INT | DBFIFO_LP_INT);
2720         notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2721 }
2722
2723 static void process_db_drop(struct work_struct *work)
2724 {
2725         struct adapter *adap;
2726
2727         adap = container_of(work, struct adapter, db_drop_task);
2728
2729         t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
2730         disable_dbs(adap);
2731         notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2732         drain_db_fifo(adap, 1);
2733         recover_all_queues(adap);
2734         enable_dbs(adap);
2735 }
2736
2737 void t4_db_full(struct adapter *adap)
2738 {
2739         t4_set_reg_field(adap, SGE_INT_ENABLE3,
2740                          DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
2741         queue_work(workq, &adap->db_full_task);
2742 }
2743
2744 void t4_db_dropped(struct adapter *adap)
2745 {
2746         queue_work(workq, &adap->db_drop_task);
2747 }
2748
2749 static void uld_attach(struct adapter *adap, unsigned int uld)
2750 {
2751         void *handle;
2752         struct cxgb4_lld_info lli;
2753
2754         lli.pdev = adap->pdev;
2755         lli.l2t = adap->l2t;
2756         lli.tids = &adap->tids;
2757         lli.ports = adap->port;
2758         lli.vr = &adap->vres;
2759         lli.mtus = adap->params.mtus;
2760         if (uld == CXGB4_ULD_RDMA) {
2761                 lli.rxq_ids = adap->sge.rdma_rxq;
2762                 lli.nrxq = adap->sge.rdmaqs;
2763         } else if (uld == CXGB4_ULD_ISCSI) {
2764                 lli.rxq_ids = adap->sge.ofld_rxq;
2765                 lli.nrxq = adap->sge.ofldqsets;
2766         }
2767         lli.ntxq = adap->sge.ofldqsets;
2768         lli.nchan = adap->params.nports;
2769         lli.nports = adap->params.nports;
2770         lli.wr_cred = adap->params.ofldq_wr_cred;
2771         lli.adapter_type = adap->params.rev;
2772         lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2773         lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2774                         t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2775                         (adap->fn * 4));
2776         lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2777                         t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2778                         (adap->fn * 4));
2779         lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2780         lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2781         lli.fw_vers = adap->params.fw_vers;
2782         lli.dbfifo_int_thresh = dbfifo_int_thresh;
2783
2784         handle = ulds[uld].add(&lli);
2785         if (IS_ERR(handle)) {
2786                 dev_warn(adap->pdev_dev,
2787                          "could not attach to the %s driver, error %ld\n",
2788                          uld_str[uld], PTR_ERR(handle));
2789                 return;
2790         }
2791
2792         adap->uld_handle[uld] = handle;
2793
2794         if (!netevent_registered) {
2795                 register_netevent_notifier(&cxgb4_netevent_nb);
2796                 netevent_registered = true;
2797         }
2798
2799         if (adap->flags & FULL_INIT_DONE)
2800                 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2801 }
2802
2803 static void attach_ulds(struct adapter *adap)
2804 {
2805         unsigned int i;
2806
2807         mutex_lock(&uld_mutex);
2808         list_add_tail(&adap->list_node, &adapter_list);
2809         for (i = 0; i < CXGB4_ULD_MAX; i++)
2810                 if (ulds[i].add)
2811                         uld_attach(adap, i);
2812         mutex_unlock(&uld_mutex);
2813 }
2814
2815 static void detach_ulds(struct adapter *adap)
2816 {
2817         unsigned int i;
2818
2819         mutex_lock(&uld_mutex);
2820         list_del(&adap->list_node);
2821         for (i = 0; i < CXGB4_ULD_MAX; i++)
2822                 if (adap->uld_handle[i]) {
2823                         ulds[i].state_change(adap->uld_handle[i],
2824                                              CXGB4_STATE_DETACH);
2825                         adap->uld_handle[i] = NULL;
2826                 }
2827         if (netevent_registered && list_empty(&adapter_list)) {
2828                 unregister_netevent_notifier(&cxgb4_netevent_nb);
2829                 netevent_registered = false;
2830         }
2831         mutex_unlock(&uld_mutex);
2832 }
2833
2834 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2835 {
2836         unsigned int i;
2837
2838         mutex_lock(&uld_mutex);
2839         for (i = 0; i < CXGB4_ULD_MAX; i++)
2840                 if (adap->uld_handle[i])
2841                         ulds[i].state_change(adap->uld_handle[i], new_state);
2842         mutex_unlock(&uld_mutex);
2843 }
2844
2845 /**
2846  *      cxgb4_register_uld - register an upper-layer driver
2847  *      @type: the ULD type
2848  *      @p: the ULD methods
2849  *
2850  *      Registers an upper-layer driver with this driver and notifies the ULD
2851  *      about any presently available devices that support its type.  Returns
2852  *      %-EBUSY if a ULD of the same type is already registered.
2853  */
2854 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2855 {
2856         int ret = 0;
2857         struct adapter *adap;
2858
2859         if (type >= CXGB4_ULD_MAX)
2860                 return -EINVAL;
2861         mutex_lock(&uld_mutex);
2862         if (ulds[type].add) {
2863                 ret = -EBUSY;
2864                 goto out;
2865         }
2866         ulds[type] = *p;
2867         list_for_each_entry(adap, &adapter_list, list_node)
2868                 uld_attach(adap, type);
2869 out:    mutex_unlock(&uld_mutex);
2870         return ret;
2871 }
2872 EXPORT_SYMBOL(cxgb4_register_uld);
2873
2874 /**
2875  *      cxgb4_unregister_uld - unregister an upper-layer driver
2876  *      @type: the ULD type
2877  *
2878  *      Unregisters an existing upper-layer driver.
2879  */
2880 int cxgb4_unregister_uld(enum cxgb4_uld type)
2881 {
2882         struct adapter *adap;
2883
2884         if (type >= CXGB4_ULD_MAX)
2885                 return -EINVAL;
2886         mutex_lock(&uld_mutex);
2887         list_for_each_entry(adap, &adapter_list, list_node)
2888                 adap->uld_handle[type] = NULL;
2889         ulds[type].add = NULL;
2890         mutex_unlock(&uld_mutex);
2891         return 0;
2892 }
2893 EXPORT_SYMBOL(cxgb4_unregister_uld);
2894
2895 /**
2896  *      cxgb_up - enable the adapter
2897  *      @adap: adapter being enabled
2898  *
2899  *      Called when the first port is enabled, this function performs the
2900  *      actions necessary to make an adapter operational, such as completing
2901  *      the initialization of HW modules, and enabling interrupts.
2902  *
2903  *      Must be called with the rtnl lock held.
2904  */
2905 static int cxgb_up(struct adapter *adap)
2906 {
2907         int err;
2908
2909         err = setup_sge_queues(adap);
2910         if (err)
2911                 goto out;
2912         err = setup_rss(adap);
2913         if (err)
2914                 goto freeq;
2915
2916         if (adap->flags & USING_MSIX) {
2917                 name_msix_vecs(adap);
2918                 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2919                                   adap->msix_info[0].desc, adap);
2920                 if (err)
2921                         goto irq_err;
2922
2923                 err = request_msix_queue_irqs(adap);
2924                 if (err) {
2925                         free_irq(adap->msix_info[0].vec, adap);
2926                         goto irq_err;
2927                 }
2928         } else {
2929                 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2930                                   (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2931                                   adap->port[0]->name, adap);
2932                 if (err)
2933                         goto irq_err;
2934         }
2935         enable_rx(adap);
2936         t4_sge_start(adap);
2937         t4_intr_enable(adap);
2938         adap->flags |= FULL_INIT_DONE;
2939         notify_ulds(adap, CXGB4_STATE_UP);
2940  out:
2941         return err;
2942  irq_err:
2943         dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2944  freeq:
2945         t4_free_sge_resources(adap);
2946         goto out;
2947 }
2948
2949 static void cxgb_down(struct adapter *adapter)
2950 {
2951         t4_intr_disable(adapter);
2952         cancel_work_sync(&adapter->tid_release_task);
2953         cancel_work_sync(&adapter->db_full_task);
2954         cancel_work_sync(&adapter->db_drop_task);
2955         adapter->tid_release_task_busy = false;
2956         adapter->tid_release_head = NULL;
2957
2958         if (adapter->flags & USING_MSIX) {
2959                 free_msix_queue_irqs(adapter);
2960                 free_irq(adapter->msix_info[0].vec, adapter);
2961         } else
2962                 free_irq(adapter->pdev->irq, adapter);
2963         quiesce_rx(adapter);
2964         t4_sge_stop(adapter);
2965         t4_free_sge_resources(adapter);
2966         adapter->flags &= ~FULL_INIT_DONE;
2967 }
2968
2969 /*
2970  * net_device operations
2971  */
2972 static int cxgb_open(struct net_device *dev)
2973 {
2974         int err;
2975         struct port_info *pi = netdev_priv(dev);
2976         struct adapter *adapter = pi->adapter;
2977
2978         netif_carrier_off(dev);
2979
2980         if (!(adapter->flags & FULL_INIT_DONE)) {
2981                 err = cxgb_up(adapter);
2982                 if (err < 0)
2983                         return err;
2984         }
2985
2986         err = link_start(dev);
2987         if (!err)
2988                 netif_tx_start_all_queues(dev);
2989         return err;
2990 }
2991
2992 static int cxgb_close(struct net_device *dev)
2993 {
2994         struct port_info *pi = netdev_priv(dev);
2995         struct adapter *adapter = pi->adapter;
2996
2997         netif_tx_stop_all_queues(dev);
2998         netif_carrier_off(dev);
2999         return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3000 }
3001
3002 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3003                                                 struct rtnl_link_stats64 *ns)
3004 {
3005         struct port_stats stats;
3006         struct port_info *p = netdev_priv(dev);
3007         struct adapter *adapter = p->adapter;
3008
3009         spin_lock(&adapter->stats_lock);
3010         t4_get_port_stats(adapter, p->tx_chan, &stats);
3011         spin_unlock(&adapter->stats_lock);
3012
3013         ns->tx_bytes   = stats.tx_octets;
3014         ns->tx_packets = stats.tx_frames;
3015         ns->rx_bytes   = stats.rx_octets;
3016         ns->rx_packets = stats.rx_frames;
3017         ns->multicast  = stats.rx_mcast_frames;
3018
3019         /* detailed rx_errors */
3020         ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3021                                stats.rx_runt;
3022         ns->rx_over_errors   = 0;
3023         ns->rx_crc_errors    = stats.rx_fcs_err;
3024         ns->rx_frame_errors  = stats.rx_symbol_err;
3025         ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
3026                                stats.rx_ovflow2 + stats.rx_ovflow3 +
3027                                stats.rx_trunc0 + stats.rx_trunc1 +
3028                                stats.rx_trunc2 + stats.rx_trunc3;
3029         ns->rx_missed_errors = 0;
3030
3031         /* detailed tx_errors */
3032         ns->tx_aborted_errors   = 0;
3033         ns->tx_carrier_errors   = 0;
3034         ns->tx_fifo_errors      = 0;
3035         ns->tx_heartbeat_errors = 0;
3036         ns->tx_window_errors    = 0;
3037
3038         ns->tx_errors = stats.tx_error_frames;
3039         ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3040                 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3041         return ns;
3042 }
3043
3044 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3045 {
3046         unsigned int mbox;
3047         int ret = 0, prtad, devad;
3048         struct port_info *pi = netdev_priv(dev);
3049         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3050
3051         switch (cmd) {
3052         case SIOCGMIIPHY:
3053                 if (pi->mdio_addr < 0)
3054                         return -EOPNOTSUPP;
3055                 data->phy_id = pi->mdio_addr;
3056                 break;
3057         case SIOCGMIIREG:
3058         case SIOCSMIIREG:
3059                 if (mdio_phy_id_is_c45(data->phy_id)) {
3060                         prtad = mdio_phy_id_prtad(data->phy_id);
3061                         devad = mdio_phy_id_devad(data->phy_id);
3062                 } else if (data->phy_id < 32) {
3063                         prtad = data->phy_id;
3064                         devad = 0;
3065                         data->reg_num &= 0x1f;
3066                 } else
3067                         return -EINVAL;
3068
3069                 mbox = pi->adapter->fn;
3070                 if (cmd == SIOCGMIIREG)
3071                         ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3072                                          data->reg_num, &data->val_out);
3073                 else
3074                         ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3075                                          data->reg_num, data->val_in);
3076                 break;
3077         default:
3078                 return -EOPNOTSUPP;
3079         }
3080         return ret;
3081 }
3082
3083 static void cxgb_set_rxmode(struct net_device *dev)
3084 {
3085         /* unfortunately we can't return errors to the stack */
3086         set_rxmode(dev, -1, false);
3087 }
3088
3089 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3090 {
3091         int ret;
3092         struct port_info *pi = netdev_priv(dev);
3093
3094         if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
3095                 return -EINVAL;
3096         ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
3097                             -1, -1, -1, true);
3098         if (!ret)
3099                 dev->mtu = new_mtu;
3100         return ret;
3101 }
3102
3103 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3104 {
3105         int ret;
3106         struct sockaddr *addr = p;
3107         struct port_info *pi = netdev_priv(dev);
3108
3109         if (!is_valid_ether_addr(addr->sa_data))
3110                 return -EADDRNOTAVAIL;
3111
3112         ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
3113                             pi->xact_addr_filt, addr->sa_data, true, true);
3114         if (ret < 0)
3115                 return ret;
3116
3117         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3118         pi->xact_addr_filt = ret;
3119         return 0;
3120 }
3121
3122 #ifdef CONFIG_NET_POLL_CONTROLLER
3123 static void cxgb_netpoll(struct net_device *dev)
3124 {
3125         struct port_info *pi = netdev_priv(dev);
3126         struct adapter *adap = pi->adapter;
3127
3128         if (adap->flags & USING_MSIX) {