2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
71 #define DRV_VERSION "1.3.0-ko"
72 #define DRV_DESC "Chelsio T4 Network Driver"
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
79 #define MAX_SGE_TIMERVAL 200U
83 * Physical Function provisioning constants.
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
131 unsigned int portn, portvec;
134 * Give PF's access to all of the ports.
137 return FW_PFVF_CMD_PMASK_MASK;
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
146 if (adapter->params.nports == 0)
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
178 /* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
184 struct filter_entry {
185 /* Administrative fields for filter.
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
199 struct ch_filter_specification fs;
202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
206 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
208 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
209 CH_DEVICE(0xa000, 0), /* PE10K */
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
235 #define FW_FNAME "cxgb4/t4fw.bin"
236 #define FW_CFNAME "cxgb4/t4-config.txt"
238 MODULE_DESCRIPTION(DRV_DESC);
239 MODULE_AUTHOR("Chelsio Communications");
240 MODULE_LICENSE("Dual BSD/GPL");
241 MODULE_VERSION(DRV_VERSION);
242 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243 MODULE_FIRMWARE(FW_FNAME);
246 * Normally we're willing to become the firmware's Master PF but will be happy
247 * if another PF has already become the Master and initialized the adapter.
248 * Setting "force_init" will cause this driver to forcibly establish itself as
249 * the Master PF and initialize the adapter.
251 static uint force_init;
253 module_param(force_init, uint, 0644);
254 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
257 * Normally if the firmware we connect to has Configuration File support, we
258 * use that and only fall back to the old Driver-based initialization if the
259 * Configuration File fails for some reason. If force_old_init is set, then
260 * we'll always use the old Driver-based initialization sequence.
262 static uint force_old_init;
264 module_param(force_old_init, uint, 0644);
265 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
267 static int dflt_msg_enable = DFLT_MSG_ENABLE;
269 module_param(dflt_msg_enable, int, 0644);
270 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
273 * The driver uses the best interrupt scheme available on a platform in the
274 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
275 * of these schemes the driver may consider as follows:
277 * msi = 2: choose from among all three options
278 * msi = 1: only consider MSI and INTx interrupts
279 * msi = 0: force INTx interrupts
283 module_param(msi, int, 0644);
284 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
287 * Queue interrupt hold-off timer values. Queues default to the first of these
290 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
292 module_param_array(intr_holdoff, uint, NULL, 0644);
293 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
294 "0..4 in microseconds");
296 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
298 module_param_array(intr_cnt, uint, NULL, 0644);
299 MODULE_PARM_DESC(intr_cnt,
300 "thresholds 1..3 for queue interrupt packet counters");
303 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
304 * offset by 2 bytes in order to have the IP headers line up on 4-byte
305 * boundaries. This is a requirement for many architectures which will throw
306 * a machine check fault if an attempt is made to access one of the 4-byte IP
307 * header fields on a non-4-byte boundary. And it's a major performance issue
308 * even on some architectures which allow it like some implementations of the
309 * x86 ISA. However, some architectures don't mind this and for some very
310 * edge-case performance sensitive applications (like forwarding large volumes
311 * of small packets), setting this DMA offset to 0 will decrease the number of
312 * PCI-E Bus transfers enough to measurably affect performance.
314 static int rx_dma_offset = 2;
318 #ifdef CONFIG_PCI_IOV
319 module_param(vf_acls, bool, 0644);
320 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
322 static unsigned int num_vf[4];
324 module_param_array(num_vf, uint, NULL, 0644);
325 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
329 * The filter TCAM has a fixed portion and a variable portion. The fixed
330 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
331 * ports. The variable portion is 36 bits which can include things like Exact
332 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
333 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
334 * far exceed the 36-bit budget for this "compressed" header portion of the
335 * filter. Thus, we have a scarce resource which must be carefully managed.
337 * By default we set this up to mostly match the set of filter matching
338 * capabilities of T3 but with accommodations for some of T4's more
339 * interesting features:
341 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
342 * [Inner] VLAN (17), Port (3), FCoE (1) }
345 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
346 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
347 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
350 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
352 module_param(tp_vlan_pri_map, uint, 0644);
353 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
355 static struct dentry *cxgb4_debugfs_root;
357 static LIST_HEAD(adapter_list);
358 static DEFINE_MUTEX(uld_mutex);
359 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
360 static const char *uld_str[] = { "RDMA", "iSCSI" };
362 static void link_report(struct net_device *dev)
364 if (!netif_carrier_ok(dev))
365 netdev_info(dev, "link down\n");
367 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
369 const char *s = "10Mbps";
370 const struct port_info *p = netdev_priv(dev);
372 switch (p->link_cfg.speed) {
384 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
389 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
391 struct net_device *dev = adapter->port[port_id];
393 /* Skip changes from disabled ports. */
394 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
396 netif_carrier_on(dev);
398 netif_carrier_off(dev);
404 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
406 static const char *mod_str[] = {
407 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
410 const struct net_device *dev = adap->port[port_id];
411 const struct port_info *pi = netdev_priv(dev);
413 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
414 netdev_info(dev, "port module unplugged\n");
415 else if (pi->mod_type < ARRAY_SIZE(mod_str))
416 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
420 * Configure the exact and hash address filters to handle a port's multicast
421 * and secondary unicast MAC addresses.
423 static int set_addr_filters(const struct net_device *dev, bool sleep)
431 const struct netdev_hw_addr *ha;
432 int uc_cnt = netdev_uc_count(dev);
433 int mc_cnt = netdev_mc_count(dev);
434 const struct port_info *pi = netdev_priv(dev);
435 unsigned int mb = pi->adapter->fn;
437 /* first do the secondary unicast addresses */
438 netdev_for_each_uc_addr(ha, dev) {
439 addr[naddr++] = ha->addr;
440 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
441 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
442 naddr, addr, filt_idx, &uhash, sleep);
451 /* next set up the multicast addresses */
452 netdev_for_each_mc_addr(ha, dev) {
453 addr[naddr++] = ha->addr;
454 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
455 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
456 naddr, addr, filt_idx, &mhash, sleep);
465 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
466 uhash | mhash, sleep);
469 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
470 module_param(dbfifo_int_thresh, int, 0644);
471 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
474 * usecs to sleep while draining the dbfifo
476 static int dbfifo_drain_delay = 1000;
477 module_param(dbfifo_drain_delay, int, 0644);
478 MODULE_PARM_DESC(dbfifo_drain_delay,
479 "usecs to sleep while draining the dbfifo");
482 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
483 * If @mtu is -1 it is left unchanged.
485 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
488 struct port_info *pi = netdev_priv(dev);
490 ret = set_addr_filters(dev, sleep_ok);
492 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
493 (dev->flags & IFF_PROMISC) ? 1 : 0,
494 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
499 static struct workqueue_struct *workq;
502 * link_start - enable a port
503 * @dev: the port to enable
505 * Performs the MAC and PHY actions needed to enable a port.
507 static int link_start(struct net_device *dev)
510 struct port_info *pi = netdev_priv(dev);
511 unsigned int mb = pi->adapter->fn;
514 * We do not set address filters and promiscuity here, the stack does
515 * that step explicitly.
517 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
518 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
520 ret = t4_change_mac(pi->adapter, mb, pi->viid,
521 pi->xact_addr_filt, dev->dev_addr, true,
524 pi->xact_addr_filt = ret;
529 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
532 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
536 /* Clear a filter and release any of its resources that we own. This also
537 * clears the filter's "pending" status.
539 static void clear_filter(struct adapter *adap, struct filter_entry *f)
541 /* If the new or old filter have loopback rewriteing rules then we'll
542 * need to free any existing Layer Two Table (L2T) entries of the old
543 * filter rule. The firmware will handle freeing up any Source MAC
544 * Table (SMT) entries used for rewriting Source MAC Addresses in
548 cxgb4_l2t_release(f->l2t);
550 /* The zeroing of the filter rule below clears the filter valid,
551 * pending, locked flags, l2t pointer, etc. so it's all we need for
554 memset(f, 0, sizeof(*f));
557 /* Handle a filter write/deletion reply.
559 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
561 unsigned int idx = GET_TID(rpl);
562 unsigned int nidx = idx - adap->tids.ftid_base;
564 struct filter_entry *f;
566 if (idx >= adap->tids.ftid_base && nidx <
567 (adap->tids.nftids + adap->tids.nsftids)) {
569 ret = GET_TCB_COOKIE(rpl->cookie);
570 f = &adap->tids.ftid_tab[idx];
572 if (ret == FW_FILTER_WR_FLT_DELETED) {
573 /* Clear the filter when we get confirmation from the
574 * hardware that the filter has been deleted.
576 clear_filter(adap, f);
577 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
578 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
580 clear_filter(adap, f);
581 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
582 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
583 f->pending = 0; /* asynchronous setup completed */
586 /* Something went wrong. Issue a warning about the
587 * problem and clear everything out.
589 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
591 clear_filter(adap, f);
596 /* Response queue handler for the FW event queue.
598 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
599 const struct pkt_gl *gl)
601 u8 opcode = ((const struct rss_header *)rsp)->opcode;
603 rsp++; /* skip RSS header */
604 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
605 const struct cpl_sge_egr_update *p = (void *)rsp;
606 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
609 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
611 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
612 struct sge_eth_txq *eq;
614 eq = container_of(txq, struct sge_eth_txq, q);
615 netif_tx_wake_queue(eq->txq);
617 struct sge_ofld_txq *oq;
619 oq = container_of(txq, struct sge_ofld_txq, q);
620 tasklet_schedule(&oq->qresume_tsk);
622 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
623 const struct cpl_fw6_msg *p = (void *)rsp;
626 t4_handle_fw_rpl(q->adap, p->data);
627 } else if (opcode == CPL_L2T_WRITE_RPL) {
628 const struct cpl_l2t_write_rpl *p = (void *)rsp;
630 do_l2t_write_rpl(q->adap, p);
631 } else if (opcode == CPL_SET_TCB_RPL) {
632 const struct cpl_set_tcb_rpl *p = (void *)rsp;
634 filter_rpl(q->adap, p);
636 dev_err(q->adap->pdev_dev,
637 "unexpected CPL %#x on FW event queue\n", opcode);
642 * uldrx_handler - response queue handler for ULD queues
643 * @q: the response queue that received the packet
644 * @rsp: the response queue descriptor holding the offload message
645 * @gl: the gather list of packet fragments
647 * Deliver an ingress offload packet to a ULD. All processing is done by
648 * the ULD, we just maintain statistics.
650 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
651 const struct pkt_gl *gl)
653 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
655 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
661 else if (gl == CXGB4_MSG_AN)
668 static void disable_msi(struct adapter *adapter)
670 if (adapter->flags & USING_MSIX) {
671 pci_disable_msix(adapter->pdev);
672 adapter->flags &= ~USING_MSIX;
673 } else if (adapter->flags & USING_MSI) {
674 pci_disable_msi(adapter->pdev);
675 adapter->flags &= ~USING_MSI;
680 * Interrupt handler for non-data events used with MSI-X.
682 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
684 struct adapter *adap = cookie;
686 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
689 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
691 t4_slow_intr_handler(adap);
696 * Name the MSI-X interrupts.
698 static void name_msix_vecs(struct adapter *adap)
700 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
702 /* non-data interrupts */
703 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
706 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
707 adap->port[0]->name);
709 /* Ethernet queues */
710 for_each_port(adap, j) {
711 struct net_device *d = adap->port[j];
712 const struct port_info *pi = netdev_priv(d);
714 for (i = 0; i < pi->nqsets; i++, msi_idx++)
715 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
720 for_each_ofldrxq(&adap->sge, i)
721 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
722 adap->port[0]->name, i);
724 for_each_rdmarxq(&adap->sge, i)
725 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
726 adap->port[0]->name, i);
729 static int request_msix_queue_irqs(struct adapter *adap)
731 struct sge *s = &adap->sge;
732 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
734 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
735 adap->msix_info[1].desc, &s->fw_evtq);
739 for_each_ethrxq(s, ethqidx) {
740 err = request_irq(adap->msix_info[msi_index].vec,
742 adap->msix_info[msi_index].desc,
743 &s->ethrxq[ethqidx].rspq);
748 for_each_ofldrxq(s, ofldqidx) {
749 err = request_irq(adap->msix_info[msi_index].vec,
751 adap->msix_info[msi_index].desc,
752 &s->ofldrxq[ofldqidx].rspq);
757 for_each_rdmarxq(s, rdmaqidx) {
758 err = request_irq(adap->msix_info[msi_index].vec,
760 adap->msix_info[msi_index].desc,
761 &s->rdmarxq[rdmaqidx].rspq);
769 while (--rdmaqidx >= 0)
770 free_irq(adap->msix_info[--msi_index].vec,
771 &s->rdmarxq[rdmaqidx].rspq);
772 while (--ofldqidx >= 0)
773 free_irq(adap->msix_info[--msi_index].vec,
774 &s->ofldrxq[ofldqidx].rspq);
775 while (--ethqidx >= 0)
776 free_irq(adap->msix_info[--msi_index].vec,
777 &s->ethrxq[ethqidx].rspq);
778 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
782 static void free_msix_queue_irqs(struct adapter *adap)
784 int i, msi_index = 2;
785 struct sge *s = &adap->sge;
787 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
788 for_each_ethrxq(s, i)
789 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
790 for_each_ofldrxq(s, i)
791 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
792 for_each_rdmarxq(s, i)
793 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
797 * write_rss - write the RSS table for a given port
799 * @queues: array of queue indices for RSS
801 * Sets up the portion of the HW RSS table for the port's VI to distribute
802 * packets to the Rx queues in @queues.
804 static int write_rss(const struct port_info *pi, const u16 *queues)
808 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
810 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
814 /* map the queue indices to queue ids */
815 for (i = 0; i < pi->rss_size; i++, queues++)
816 rss[i] = q[*queues].rspq.abs_id;
818 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
819 pi->rss_size, rss, pi->rss_size);
825 * setup_rss - configure RSS
828 * Sets up RSS for each port.
830 static int setup_rss(struct adapter *adap)
834 for_each_port(adap, i) {
835 const struct port_info *pi = adap2pinfo(adap, i);
837 err = write_rss(pi, pi->rss);
845 * Return the channel of the ingress queue with the given qid.
847 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
849 qid -= p->ingr_start;
850 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
854 * Wait until all NAPI handlers are descheduled.
856 static void quiesce_rx(struct adapter *adap)
860 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
861 struct sge_rspq *q = adap->sge.ingr_map[i];
864 napi_disable(&q->napi);
869 * Enable NAPI scheduling and interrupt generation for all Rx queues.
871 static void enable_rx(struct adapter *adap)
875 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
876 struct sge_rspq *q = adap->sge.ingr_map[i];
881 napi_enable(&q->napi);
882 /* 0-increment GTS to start the timer and enable interrupts */
883 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
884 SEINTARM(q->intr_params) |
885 INGRESSQID(q->cntxt_id));
890 * setup_sge_queues - configure SGE Tx/Rx/response queues
893 * Determines how many sets of SGE queues to use and initializes them.
894 * We support multiple queue sets per port if we have MSI-X, otherwise
895 * just one queue set per port.
897 static int setup_sge_queues(struct adapter *adap)
899 int err, msi_idx, i, j;
900 struct sge *s = &adap->sge;
902 bitmap_zero(s->starving_fl, MAX_EGRQ);
903 bitmap_zero(s->txq_maperr, MAX_EGRQ);
905 if (adap->flags & USING_MSIX)
906 msi_idx = 1; /* vector 0 is for non-queue interrupts */
908 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
912 msi_idx = -((int)s->intrq.abs_id + 1);
915 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
916 msi_idx, NULL, fwevtq_handler);
918 freeout: t4_free_sge_resources(adap);
922 for_each_port(adap, i) {
923 struct net_device *dev = adap->port[i];
924 struct port_info *pi = netdev_priv(dev);
925 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
926 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
928 for (j = 0; j < pi->nqsets; j++, q++) {
931 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
937 memset(&q->stats, 0, sizeof(q->stats));
939 for (j = 0; j < pi->nqsets; j++, t++) {
940 err = t4_sge_alloc_eth_txq(adap, t, dev,
941 netdev_get_tx_queue(dev, j),
942 s->fw_evtq.cntxt_id);
948 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
949 for_each_ofldrxq(s, i) {
950 struct sge_ofld_rxq *q = &s->ofldrxq[i];
951 struct net_device *dev = adap->port[i / j];
955 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
956 &q->fl, uldrx_handler);
959 memset(&q->stats, 0, sizeof(q->stats));
960 s->ofld_rxq[i] = q->rspq.abs_id;
961 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
962 s->fw_evtq.cntxt_id);
967 for_each_rdmarxq(s, i) {
968 struct sge_ofld_rxq *q = &s->rdmarxq[i];
972 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
973 msi_idx, &q->fl, uldrx_handler);
976 memset(&q->stats, 0, sizeof(q->stats));
977 s->rdma_rxq[i] = q->rspq.abs_id;
980 for_each_port(adap, i) {
982 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
983 * have RDMA queues, and that's the right value.
985 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
987 s->rdmarxq[i].rspq.cntxt_id);
992 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
993 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
994 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
999 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1000 * started but failed, and a negative errno if flash load couldn't start.
1002 static int upgrade_fw(struct adapter *adap)
1006 const struct fw_hdr *hdr;
1007 const struct firmware *fw;
1008 struct device *dev = adap->pdev_dev;
1010 ret = request_firmware(&fw, FW_FNAME, dev);
1012 dev_err(dev, "unable to load firmware image " FW_FNAME
1013 ", error %d\n", ret);
1017 hdr = (const struct fw_hdr *)fw->data;
1018 vers = ntohl(hdr->fw_ver);
1019 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
1020 ret = -EINVAL; /* wrong major version, won't do */
1025 * If the flash FW is unusable or we found something newer, load it.
1027 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
1028 vers > adap->params.fw_vers) {
1029 dev_info(dev, "upgrading firmware ...\n");
1030 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1033 dev_info(dev, "firmware successfully upgraded to "
1034 FW_FNAME " (%d.%d.%d.%d)\n",
1035 FW_HDR_FW_VER_MAJOR_GET(vers),
1036 FW_HDR_FW_VER_MINOR_GET(vers),
1037 FW_HDR_FW_VER_MICRO_GET(vers),
1038 FW_HDR_FW_VER_BUILD_GET(vers));
1040 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1043 * Tell our caller that we didn't upgrade the firmware.
1048 out: release_firmware(fw);
1053 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1054 * The allocated memory is cleared.
1056 void *t4_alloc_mem(size_t size)
1058 void *p = kzalloc(size, GFP_KERNEL);
1066 * Free memory allocated through alloc_mem().
1068 static void t4_free_mem(void *addr)
1070 if (is_vmalloc_addr(addr))
1076 /* Send a Work Request to write the filter at a specified index. We construct
1077 * a Firmware Filter Work Request to have the work done and put the indicated
1078 * filter into "pending" mode which will prevent any further actions against
1079 * it till we get a reply from the firmware on the completion status of the
1082 static int set_filter_wr(struct adapter *adapter, int fidx)
1084 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1085 struct sk_buff *skb;
1086 struct fw_filter_wr *fwr;
1089 /* If the new filter requires loopback Destination MAC and/or VLAN
1090 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1093 if (f->fs.newdmac || f->fs.newvlan) {
1094 /* allocate L2T entry for new filter */
1095 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1098 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1099 f->fs.eport, f->fs.dmac)) {
1100 cxgb4_l2t_release(f->l2t);
1106 ftid = adapter->tids.ftid_base + fidx;
1108 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1109 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1110 memset(fwr, 0, sizeof(*fwr));
1112 /* It would be nice to put most of the following in t4_hw.c but most
1113 * of the work is translating the cxgbtool ch_filter_specification
1114 * into the Work Request and the definition of that structure is
1115 * currently in cxgbtool.h which isn't appropriate to pull into the
1116 * common code. We may eventually try to come up with a more neutral
1117 * filter specification structure but for now it's easiest to simply
1118 * put this fairly direct code in line ...
1120 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1121 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1123 htonl(V_FW_FILTER_WR_TID(ftid) |
1124 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1125 V_FW_FILTER_WR_NOREPLY(0) |
1126 V_FW_FILTER_WR_IQ(f->fs.iq));
1127 fwr->del_filter_to_l2tix =
1128 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1129 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1130 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1131 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1132 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1133 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1134 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1135 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1136 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1137 f->fs.newvlan == VLAN_REWRITE) |
1138 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1139 f->fs.newvlan == VLAN_REWRITE) |
1140 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1141 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1142 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1143 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1144 fwr->ethtype = htons(f->fs.val.ethtype);
1145 fwr->ethtypem = htons(f->fs.mask.ethtype);
1146 fwr->frag_to_ovlan_vldm =
1147 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1148 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1149 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1150 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1151 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1152 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1154 fwr->rx_chan_rx_rpl_iq =
1155 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1156 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1157 fwr->maci_to_matchtypem =
1158 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1159 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1160 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1161 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1162 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1163 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1164 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1165 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1166 fwr->ptcl = f->fs.val.proto;
1167 fwr->ptclm = f->fs.mask.proto;
1168 fwr->ttyp = f->fs.val.tos;
1169 fwr->ttypm = f->fs.mask.tos;
1170 fwr->ivlan = htons(f->fs.val.ivlan);
1171 fwr->ivlanm = htons(f->fs.mask.ivlan);
1172 fwr->ovlan = htons(f->fs.val.ovlan);
1173 fwr->ovlanm = htons(f->fs.mask.ovlan);
1174 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1175 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1176 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1177 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1178 fwr->lp = htons(f->fs.val.lport);
1179 fwr->lpm = htons(f->fs.mask.lport);
1180 fwr->fp = htons(f->fs.val.fport);
1181 fwr->fpm = htons(f->fs.mask.fport);
1183 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1185 /* Mark the filter as "pending" and ship off the Filter Work Request.
1186 * When we get the Work Request Reply we'll clear the pending status.
1189 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1190 t4_ofld_send(adapter, skb);
1194 /* Delete the filter at a specified index.
1196 static int del_filter_wr(struct adapter *adapter, int fidx)
1198 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1199 struct sk_buff *skb;
1200 struct fw_filter_wr *fwr;
1201 unsigned int len, ftid;
1204 ftid = adapter->tids.ftid_base + fidx;
1206 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1207 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1208 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1210 /* Mark the filter as "pending" and ship off the Filter Work Request.
1211 * When we get the Work Request Reply we'll clear the pending status.
1214 t4_mgmt_tx(adapter, skb);
1218 static inline int is_offload(const struct adapter *adap)
1220 return adap->params.offload;
1224 * Implementation of ethtool operations.
1227 static u32 get_msglevel(struct net_device *dev)
1229 return netdev2adap(dev)->msg_enable;
1232 static void set_msglevel(struct net_device *dev, u32 val)
1234 netdev2adap(dev)->msg_enable = val;
1237 static char stats_strings[][ETH_GSTRING_LEN] = {
1240 "TxBroadcastFrames ",
1241 "TxMulticastFrames ",
1247 "TxFrames128To255 ",
1248 "TxFrames256To511 ",
1249 "TxFrames512To1023 ",
1250 "TxFrames1024To1518 ",
1251 "TxFrames1519ToMax ",
1266 "RxBroadcastFrames ",
1267 "RxMulticastFrames ",
1279 "RxFrames128To255 ",
1280 "RxFrames256To511 ",
1281 "RxFrames512To1023 ",
1282 "RxFrames1024To1518 ",
1283 "RxFrames1519ToMax ",
1295 "RxBG0FramesDropped ",
1296 "RxBG1FramesDropped ",
1297 "RxBG2FramesDropped ",
1298 "RxBG3FramesDropped ",
1299 "RxBG0FramesTrunc ",
1300 "RxBG1FramesTrunc ",
1301 "RxBG2FramesTrunc ",
1302 "RxBG3FramesTrunc ",
1313 static int get_sset_count(struct net_device *dev, int sset)
1317 return ARRAY_SIZE(stats_strings);
1323 #define T4_REGMAP_SIZE (160 * 1024)
1325 static int get_regs_len(struct net_device *dev)
1327 return T4_REGMAP_SIZE;
1330 static int get_eeprom_len(struct net_device *dev)
1335 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1337 struct adapter *adapter = netdev2adap(dev);
1339 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1340 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1341 strlcpy(info->bus_info, pci_name(adapter->pdev),
1342 sizeof(info->bus_info));
1344 if (adapter->params.fw_vers)
1345 snprintf(info->fw_version, sizeof(info->fw_version),
1346 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1347 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1348 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1349 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1350 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1351 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1352 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1353 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1354 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1357 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1359 if (stringset == ETH_SS_STATS)
1360 memcpy(data, stats_strings, sizeof(stats_strings));
1364 * port stats maintained per queue of the port. They should be in the same
1365 * order as in stats_strings above.
1367 struct queue_port_stats {
1377 static void collect_sge_port_stats(const struct adapter *adap,
1378 const struct port_info *p, struct queue_port_stats *s)
1381 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1382 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1384 memset(s, 0, sizeof(*s));
1385 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1387 s->tx_csum += tx->tx_cso;
1388 s->rx_csum += rx->stats.rx_cso;
1389 s->vlan_ex += rx->stats.vlan_ex;
1390 s->vlan_ins += tx->vlan_ins;
1391 s->gro_pkts += rx->stats.lro_pkts;
1392 s->gro_merged += rx->stats.lro_merged;
1396 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1399 struct port_info *pi = netdev_priv(dev);
1400 struct adapter *adapter = pi->adapter;
1402 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1404 data += sizeof(struct port_stats) / sizeof(u64);
1405 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1409 * Return a version number to identify the type of adapter. The scheme is:
1410 * - bits 0..9: chip version
1411 * - bits 10..15: chip revision
1412 * - bits 16..23: register dump version
1414 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1416 return 4 | (ap->params.rev << 10) | (1 << 16);
1419 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1422 u32 *p = buf + start;
1424 for ( ; start <= end; start += sizeof(u32))
1425 *p++ = t4_read_reg(ap, start);
1428 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1431 static const unsigned int reg_ranges[] = {
1652 struct adapter *ap = netdev2adap(dev);
1654 regs->version = mk_adap_vers(ap);
1656 memset(buf, 0, T4_REGMAP_SIZE);
1657 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1658 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1661 static int restart_autoneg(struct net_device *dev)
1663 struct port_info *p = netdev_priv(dev);
1665 if (!netif_running(dev))
1667 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1669 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1673 static int identify_port(struct net_device *dev,
1674 enum ethtool_phys_id_state state)
1677 struct adapter *adap = netdev2adap(dev);
1679 if (state == ETHTOOL_ID_ACTIVE)
1681 else if (state == ETHTOOL_ID_INACTIVE)
1686 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1689 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1693 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1694 type == FW_PORT_TYPE_BT_XAUI) {
1696 if (caps & FW_PORT_CAP_SPEED_100M)
1697 v |= SUPPORTED_100baseT_Full;
1698 if (caps & FW_PORT_CAP_SPEED_1G)
1699 v |= SUPPORTED_1000baseT_Full;
1700 if (caps & FW_PORT_CAP_SPEED_10G)
1701 v |= SUPPORTED_10000baseT_Full;
1702 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1703 v |= SUPPORTED_Backplane;
1704 if (caps & FW_PORT_CAP_SPEED_1G)
1705 v |= SUPPORTED_1000baseKX_Full;
1706 if (caps & FW_PORT_CAP_SPEED_10G)
1707 v |= SUPPORTED_10000baseKX4_Full;
1708 } else if (type == FW_PORT_TYPE_KR)
1709 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1710 else if (type == FW_PORT_TYPE_BP_AP)
1711 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1712 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1713 else if (type == FW_PORT_TYPE_BP4_AP)
1714 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1715 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1716 SUPPORTED_10000baseKX4_Full;
1717 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1718 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1719 v |= SUPPORTED_FIBRE;
1721 if (caps & FW_PORT_CAP_ANEG)
1722 v |= SUPPORTED_Autoneg;
1726 static unsigned int to_fw_linkcaps(unsigned int caps)
1730 if (caps & ADVERTISED_100baseT_Full)
1731 v |= FW_PORT_CAP_SPEED_100M;
1732 if (caps & ADVERTISED_1000baseT_Full)
1733 v |= FW_PORT_CAP_SPEED_1G;
1734 if (caps & ADVERTISED_10000baseT_Full)
1735 v |= FW_PORT_CAP_SPEED_10G;
1739 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1741 const struct port_info *p = netdev_priv(dev);
1743 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1744 p->port_type == FW_PORT_TYPE_BT_XFI ||
1745 p->port_type == FW_PORT_TYPE_BT_XAUI)
1746 cmd->port = PORT_TP;
1747 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1748 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1749 cmd->port = PORT_FIBRE;
1750 else if (p->port_type == FW_PORT_TYPE_SFP) {
1751 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1752 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1753 cmd->port = PORT_DA;
1755 cmd->port = PORT_FIBRE;
1757 cmd->port = PORT_OTHER;
1759 if (p->mdio_addr >= 0) {
1760 cmd->phy_address = p->mdio_addr;
1761 cmd->transceiver = XCVR_EXTERNAL;
1762 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1763 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1765 cmd->phy_address = 0; /* not really, but no better option */
1766 cmd->transceiver = XCVR_INTERNAL;
1767 cmd->mdio_support = 0;
1770 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1771 cmd->advertising = from_fw_linkcaps(p->port_type,
1772 p->link_cfg.advertising);
1773 ethtool_cmd_speed_set(cmd,
1774 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1775 cmd->duplex = DUPLEX_FULL;
1776 cmd->autoneg = p->link_cfg.autoneg;
1782 static unsigned int speed_to_caps(int speed)
1784 if (speed == SPEED_100)
1785 return FW_PORT_CAP_SPEED_100M;
1786 if (speed == SPEED_1000)
1787 return FW_PORT_CAP_SPEED_1G;
1788 if (speed == SPEED_10000)
1789 return FW_PORT_CAP_SPEED_10G;
1793 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1796 struct port_info *p = netdev_priv(dev);
1797 struct link_config *lc = &p->link_cfg;
1798 u32 speed = ethtool_cmd_speed(cmd);
1800 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1803 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1805 * PHY offers a single speed. See if that's what's
1808 if (cmd->autoneg == AUTONEG_DISABLE &&
1809 (lc->supported & speed_to_caps(speed)))
1814 if (cmd->autoneg == AUTONEG_DISABLE) {
1815 cap = speed_to_caps(speed);
1817 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1818 (speed == SPEED_10000))
1820 lc->requested_speed = cap;
1821 lc->advertising = 0;
1823 cap = to_fw_linkcaps(cmd->advertising);
1824 if (!(lc->supported & cap))
1826 lc->requested_speed = 0;
1827 lc->advertising = cap | FW_PORT_CAP_ANEG;
1829 lc->autoneg = cmd->autoneg;
1831 if (netif_running(dev))
1832 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1837 static void get_pauseparam(struct net_device *dev,
1838 struct ethtool_pauseparam *epause)
1840 struct port_info *p = netdev_priv(dev);
1842 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1843 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1844 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1847 static int set_pauseparam(struct net_device *dev,
1848 struct ethtool_pauseparam *epause)
1850 struct port_info *p = netdev_priv(dev);
1851 struct link_config *lc = &p->link_cfg;
1853 if (epause->autoneg == AUTONEG_DISABLE)
1854 lc->requested_fc = 0;
1855 else if (lc->supported & FW_PORT_CAP_ANEG)
1856 lc->requested_fc = PAUSE_AUTONEG;
1860 if (epause->rx_pause)
1861 lc->requested_fc |= PAUSE_RX;
1862 if (epause->tx_pause)
1863 lc->requested_fc |= PAUSE_TX;
1864 if (netif_running(dev))
1865 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1870 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1872 const struct port_info *pi = netdev_priv(dev);
1873 const struct sge *s = &pi->adapter->sge;
1875 e->rx_max_pending = MAX_RX_BUFFERS;
1876 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1877 e->rx_jumbo_max_pending = 0;
1878 e->tx_max_pending = MAX_TXQ_ENTRIES;
1880 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1881 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1882 e->rx_jumbo_pending = 0;
1883 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1886 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1889 const struct port_info *pi = netdev_priv(dev);
1890 struct adapter *adapter = pi->adapter;
1891 struct sge *s = &adapter->sge;
1893 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1894 e->tx_pending > MAX_TXQ_ENTRIES ||
1895 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1896 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1897 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1900 if (adapter->flags & FULL_INIT_DONE)
1903 for (i = 0; i < pi->nqsets; ++i) {
1904 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1905 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1906 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1911 static int closest_timer(const struct sge *s, int time)
1913 int i, delta, match = 0, min_delta = INT_MAX;
1915 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1916 delta = time - s->timer_val[i];
1919 if (delta < min_delta) {
1927 static int closest_thres(const struct sge *s, int thres)
1929 int i, delta, match = 0, min_delta = INT_MAX;
1931 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1932 delta = thres - s->counter_val[i];
1935 if (delta < min_delta) {
1944 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1946 static unsigned int qtimer_val(const struct adapter *adap,
1947 const struct sge_rspq *q)
1949 unsigned int idx = q->intr_params >> 1;
1951 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1955 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1956 * @adap: the adapter
1958 * @us: the hold-off time in us, or 0 to disable timer
1959 * @cnt: the hold-off packet count, or 0 to disable counter
1961 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1962 * one of the two needs to be enabled for the queue to generate interrupts.
1964 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1965 unsigned int us, unsigned int cnt)
1967 if ((us | cnt) == 0)
1974 new_idx = closest_thres(&adap->sge, cnt);
1975 if (q->desc && q->pktcnt_idx != new_idx) {
1976 /* the queue has already been created, update it */
1977 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1978 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1979 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1980 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1985 q->pktcnt_idx = new_idx;
1988 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1989 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1993 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1995 const struct port_info *pi = netdev_priv(dev);
1996 struct adapter *adap = pi->adapter;
1998 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1999 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
2002 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2004 const struct port_info *pi = netdev_priv(dev);
2005 const struct adapter *adap = pi->adapter;
2006 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2008 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2009 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2010 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2015 * eeprom_ptov - translate a physical EEPROM address to virtual
2016 * @phys_addr: the physical EEPROM address
2017 * @fn: the PCI function number
2018 * @sz: size of function-specific area
2020 * Translate a physical EEPROM address to virtual. The first 1K is
2021 * accessed through virtual addresses starting at 31K, the rest is
2022 * accessed through virtual addresses starting at 0.
2024 * The mapping is as follows:
2025 * [0..1K) -> [31K..32K)
2026 * [1K..1K+A) -> [31K-A..31K)
2027 * [1K+A..ES) -> [0..ES-A-1K)
2029 * where A = @fn * @sz, and ES = EEPROM size.
2031 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2034 if (phys_addr < 1024)
2035 return phys_addr + (31 << 10);
2036 if (phys_addr < 1024 + fn)
2037 return 31744 - fn + phys_addr - 1024;
2038 if (phys_addr < EEPROMSIZE)
2039 return phys_addr - 1024 - fn;
2044 * The next two routines implement eeprom read/write from physical addresses.
2046 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2048 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2051 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2052 return vaddr < 0 ? vaddr : 0;
2055 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2057 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2060 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2061 return vaddr < 0 ? vaddr : 0;
2064 #define EEPROM_MAGIC 0x38E2F10C
2066 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2070 struct adapter *adapter = netdev2adap(dev);
2072 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2076 e->magic = EEPROM_MAGIC;
2077 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2078 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2081 memcpy(data, buf + e->offset, e->len);
2086 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2091 u32 aligned_offset, aligned_len, *p;
2092 struct adapter *adapter = netdev2adap(dev);
2094 if (eeprom->magic != EEPROM_MAGIC)
2097 aligned_offset = eeprom->offset & ~3;
2098 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2100 if (adapter->fn > 0) {
2101 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2103 if (aligned_offset < start ||
2104 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2108 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2110 * RMW possibly needed for first or last words.
2112 buf = kmalloc(aligned_len, GFP_KERNEL);
2115 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2116 if (!err && aligned_len > 4)
2117 err = eeprom_rd_phys(adapter,
2118 aligned_offset + aligned_len - 4,
2119 (u32 *)&buf[aligned_len - 4]);
2122 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2126 err = t4_seeprom_wp(adapter, false);
2130 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2131 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2132 aligned_offset += 4;
2136 err = t4_seeprom_wp(adapter, true);
2143 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2146 const struct firmware *fw;
2147 struct adapter *adap = netdev2adap(netdev);
2149 ef->data[sizeof(ef->data) - 1] = '\0';
2150 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2154 ret = t4_load_fw(adap, fw->data, fw->size);
2155 release_firmware(fw);
2157 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2161 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2162 #define BCAST_CRC 0xa0ccc1a6
2164 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2166 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2167 wol->wolopts = netdev2adap(dev)->wol;
2168 memset(&wol->sopass, 0, sizeof(wol->sopass));
2171 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2174 struct port_info *pi = netdev_priv(dev);
2176 if (wol->wolopts & ~WOL_SUPPORTED)
2178 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2179 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2180 if (wol->wolopts & WAKE_BCAST) {
2181 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2184 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2185 ~6ULL, ~0ULL, BCAST_CRC, true);
2187 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2191 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2193 const struct port_info *pi = netdev_priv(dev);
2194 netdev_features_t changed = dev->features ^ features;
2197 if (!(changed & NETIF_F_HW_VLAN_RX))
2200 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2202 !!(features & NETIF_F_HW_VLAN_RX), true);
2204 dev->features = features ^ NETIF_F_HW_VLAN_RX;
2208 static u32 get_rss_table_size(struct net_device *dev)
2210 const struct port_info *pi = netdev_priv(dev);
2212 return pi->rss_size;
2215 static int get_rss_table(struct net_device *dev, u32 *p)
2217 const struct port_info *pi = netdev_priv(dev);
2218 unsigned int n = pi->rss_size;
2225 static int set_rss_table(struct net_device *dev, const u32 *p)
2228 struct port_info *pi = netdev_priv(dev);
2230 for (i = 0; i < pi->rss_size; i++)
2232 if (pi->adapter->flags & FULL_INIT_DONE)
2233 return write_rss(pi, pi->rss);
2237 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2240 const struct port_info *pi = netdev_priv(dev);
2242 switch (info->cmd) {
2243 case ETHTOOL_GRXFH: {
2244 unsigned int v = pi->rss_mode;
2247 switch (info->flow_type) {
2249 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2250 info->data = RXH_IP_SRC | RXH_IP_DST |
2251 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2252 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2253 info->data = RXH_IP_SRC | RXH_IP_DST;
2256 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2257 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2258 info->data = RXH_IP_SRC | RXH_IP_DST |
2259 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2260 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2261 info->data = RXH_IP_SRC | RXH_IP_DST;
2264 case AH_ESP_V4_FLOW:
2266 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2267 info->data = RXH_IP_SRC | RXH_IP_DST;
2270 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2271 info->data = RXH_IP_SRC | RXH_IP_DST |
2272 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2273 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2274 info->data = RXH_IP_SRC | RXH_IP_DST;
2277 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2278 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2279 info->data = RXH_IP_SRC | RXH_IP_DST |
2280 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2281 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2282 info->data = RXH_IP_SRC | RXH_IP_DST;
2285 case AH_ESP_V6_FLOW:
2287 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2288 info->data = RXH_IP_SRC | RXH_IP_DST;
2293 case ETHTOOL_GRXRINGS:
2294 info->data = pi->nqsets;
2300 static const struct ethtool_ops cxgb_ethtool_ops = {
2301 .get_settings = get_settings,
2302 .set_settings = set_settings,
2303 .get_drvinfo = get_drvinfo,
2304 .get_msglevel = get_msglevel,
2305 .set_msglevel = set_msglevel,
2306 .get_ringparam = get_sge_param,
2307 .set_ringparam = set_sge_param,
2308 .get_coalesce = get_coalesce,
2309 .set_coalesce = set_coalesce,
2310 .get_eeprom_len = get_eeprom_len,
2311 .get_eeprom = get_eeprom,
2312 .set_eeprom = set_eeprom,
2313 .get_pauseparam = get_pauseparam,
2314 .set_pauseparam = set_pauseparam,
2315 .get_link = ethtool_op_get_link,
2316 .get_strings = get_strings,
2317 .set_phys_id = identify_port,
2318 .nway_reset = restart_autoneg,
2319 .get_sset_count = get_sset_count,
2320 .get_ethtool_stats = get_stats,
2321 .get_regs_len = get_regs_len,
2322 .get_regs = get_regs,
2325 .get_rxnfc = get_rxnfc,
2326 .get_rxfh_indir_size = get_rss_table_size,
2327 .get_rxfh_indir = get_rss_table,
2328 .set_rxfh_indir = set_rss_table,
2329 .flash_device = set_flash,
2335 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2339 loff_t avail = file->f_path.dentry->d_inode->i_size;
2340 unsigned int mem = (uintptr_t)file->private_data & 3;
2341 struct adapter *adap = file->private_data - mem;
2347 if (count > avail - pos)
2348 count = avail - pos;
2356 ret = t4_mc_read(adap, pos, data, NULL);
2358 ret = t4_edc_read(adap, mem, pos, data, NULL);
2362 ofst = pos % sizeof(data);
2363 len = min(count, sizeof(data) - ofst);
2364 if (copy_to_user(buf, (u8 *)data + ofst, len))
2371 count = pos - *ppos;
2376 static const struct file_operations mem_debugfs_fops = {
2377 .owner = THIS_MODULE,
2378 .open = simple_open,
2380 .llseek = default_llseek,
2383 static void add_debugfs_mem(struct adapter *adap, const char *name,
2384 unsigned int idx, unsigned int size_mb)
2388 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2389 (void *)adap + idx, &mem_debugfs_fops);
2390 if (de && de->d_inode)
2391 de->d_inode->i_size = size_mb << 20;
2394 static int setup_debugfs(struct adapter *adap)
2398 if (IS_ERR_OR_NULL(adap->debugfs_root))
2401 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2402 if (i & EDRAM0_ENABLE)
2403 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2404 if (i & EDRAM1_ENABLE)
2405 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2406 if (i & EXT_MEM_ENABLE)
2407 add_debugfs_mem(adap, "mc", MEM_MC,
2408 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2410 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2416 * upper-layer driver support
2420 * Allocate an active-open TID and set it to the supplied value.
2422 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2426 spin_lock_bh(&t->atid_lock);
2428 union aopen_entry *p = t->afree;
2430 atid = (p - t->atid_tab) + t->atid_base;
2435 spin_unlock_bh(&t->atid_lock);
2438 EXPORT_SYMBOL(cxgb4_alloc_atid);
2441 * Release an active-open TID.
2443 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2445 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2447 spin_lock_bh(&t->atid_lock);
2451 spin_unlock_bh(&t->atid_lock);
2453 EXPORT_SYMBOL(cxgb4_free_atid);
2456 * Allocate a server TID and set it to the supplied value.
2458 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2462 spin_lock_bh(&t->stid_lock);
2463 if (family == PF_INET) {
2464 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2465 if (stid < t->nstids)
2466 __set_bit(stid, t->stid_bmap);
2470 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2475 t->stid_tab[stid].data = data;
2476 stid += t->stid_base;
2479 spin_unlock_bh(&t->stid_lock);
2482 EXPORT_SYMBOL(cxgb4_alloc_stid);
2484 /* Allocate a server filter TID and set it to the supplied value.
2486 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2490 spin_lock_bh(&t->stid_lock);
2491 if (family == PF_INET) {
2492 stid = find_next_zero_bit(t->stid_bmap,
2493 t->nstids + t->nsftids, t->nstids);
2494 if (stid < (t->nstids + t->nsftids))
2495 __set_bit(stid, t->stid_bmap);
2502 t->stid_tab[stid].data = data;
2503 stid += t->stid_base;
2506 spin_unlock_bh(&t->stid_lock);
2509 EXPORT_SYMBOL(cxgb4_alloc_sftid);
2511 /* Release a server TID.
2513 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2515 stid -= t->stid_base;
2516 spin_lock_bh(&t->stid_lock);
2517 if (family == PF_INET)
2518 __clear_bit(stid, t->stid_bmap);
2520 bitmap_release_region(t->stid_bmap, stid, 2);
2521 t->stid_tab[stid].data = NULL;
2523 spin_unlock_bh(&t->stid_lock);
2525 EXPORT_SYMBOL(cxgb4_free_stid);
2528 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2530 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2533 struct cpl_tid_release *req;
2535 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2536 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2537 INIT_TP_WR(req, tid);
2538 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2542 * Queue a TID release request and if necessary schedule a work queue to
2545 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2548 void **p = &t->tid_tab[tid];
2549 struct adapter *adap = container_of(t, struct adapter, tids);
2551 spin_lock_bh(&adap->tid_release_lock);
2552 *p = adap->tid_release_head;
2553 /* Low 2 bits encode the Tx channel number */
2554 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2555 if (!adap->tid_release_task_busy) {
2556 adap->tid_release_task_busy = true;
2557 queue_work(workq, &adap->tid_release_task);
2559 spin_unlock_bh(&adap->tid_release_lock);
2563 * Process the list of pending TID release requests.
2565 static void process_tid_release_list(struct work_struct *work)
2567 struct sk_buff *skb;
2568 struct adapter *adap;
2570 adap = container_of(work, struct adapter, tid_release_task);
2572 spin_lock_bh(&adap->tid_release_lock);
2573 while (adap->tid_release_head) {
2574 void **p = adap->tid_release_head;
2575 unsigned int chan = (uintptr_t)p & 3;
2576 p = (void *)p - chan;
2578 adap->tid_release_head = *p;
2580 spin_unlock_bh(&adap->tid_release_lock);
2582 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2584 schedule_timeout_uninterruptible(1);
2586 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2587 t4_ofld_send(adap, skb);
2588 spin_lock_bh(&adap->tid_release_lock);
2590 adap->tid_release_task_busy = false;
2591 spin_unlock_bh(&adap->tid_release_lock);
2595 * Release a TID and inform HW. If we are unable to allocate the release
2596 * message we defer to a work queue.
2598 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2601 struct sk_buff *skb;
2602 struct adapter *adap = container_of(t, struct adapter, tids);
2604 old = t->tid_tab[tid];
2605 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2607 t->tid_tab[tid] = NULL;
2608 mk_tid_release(skb, chan, tid);
2609 t4_ofld_send(adap, skb);
2611 cxgb4_queue_tid_release(t, chan, tid);
2613 atomic_dec(&t->tids_in_use);
2615 EXPORT_SYMBOL(cxgb4_remove_tid);
2618 * Allocate and initialize the TID tables. Returns 0 on success.
2620 static int tid_init(struct tid_info *t)
2623 unsigned int stid_bmap_size;
2624 unsigned int natids = t->natids;
2626 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
2627 size = t->ntids * sizeof(*t->tid_tab) +
2628 natids * sizeof(*t->atid_tab) +
2629 t->nstids * sizeof(*t->stid_tab) +
2630 t->nsftids * sizeof(*t->stid_tab) +
2631 stid_bmap_size * sizeof(long) +
2632 t->nftids * sizeof(*t->ftid_tab) +
2633 t->nsftids * sizeof(*t->ftid_tab);
2635 t->tid_tab = t4_alloc_mem(size);
2639 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2640 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2641 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
2642 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
2643 spin_lock_init(&t->stid_lock);
2644 spin_lock_init(&t->atid_lock);
2646 t->stids_in_use = 0;
2648 t->atids_in_use = 0;
2649 atomic_set(&t->tids_in_use, 0);
2651 /* Setup the free list for atid_tab and clear the stid bitmap. */
2654 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2655 t->afree = t->atid_tab;
2657 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
2662 * cxgb4_create_server - create an IP server
2664 * @stid: the server TID
2665 * @sip: local IP address to bind server to
2666 * @sport: the server's TCP port
2667 * @queue: queue to direct messages from this server to
2669 * Create an IP server for the given port and address.
2670 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2672 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2673 __be32 sip, __be16 sport, __be16 vlan,
2677 struct sk_buff *skb;
2678 struct adapter *adap;
2679 struct cpl_pass_open_req *req;
2681 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2685 adap = netdev2adap(dev);
2686 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2688 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2689 req->local_port = sport;
2690 req->peer_port = htons(0);
2691 req->local_ip = sip;
2692 req->peer_ip = htonl(0);
2693 chan = rxq_to_chan(&adap->sge, queue);
2694 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2695 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2696 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2697 return t4_mgmt_tx(adap, skb);
2699 EXPORT_SYMBOL(cxgb4_create_server);
2702 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2703 * @mtus: the HW MTU table
2704 * @mtu: the target MTU
2705 * @idx: index of selected entry in the MTU table
2707 * Returns the index and the value in the HW MTU table that is closest to
2708 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2709 * table, in which case that smallest available value is selected.
2711 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2716 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2722 EXPORT_SYMBOL(cxgb4_best_mtu);
2725 * cxgb4_port_chan - get the HW channel of a port
2726 * @dev: the net device for the port
2728 * Return the HW Tx channel of the given port.
2730 unsigned int cxgb4_port_chan(const struct net_device *dev)
2732 return netdev2pinfo(dev)->tx_chan;
2734 EXPORT_SYMBOL(cxgb4_port_chan);
2736 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2738 struct adapter *adap = netdev2adap(dev);
2741 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2742 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2744 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2747 * cxgb4_port_viid - get the VI id of a port
2748 * @dev: the net device for the port
2750 * Return the VI id of the given port.
2752 unsigned int cxgb4_port_viid(const struct net_device *dev)
2754 return netdev2pinfo(dev)->viid;
2756 EXPORT_SYMBOL(cxgb4_port_viid);
2759 * cxgb4_port_idx - get the index of a port
2760 * @dev: the net device for the port
2762 * Return the index of the given port.
2764 unsigned int cxgb4_port_idx(const struct net_device *dev)
2766 return netdev2pinfo(dev)->port_id;
2768 EXPORT_SYMBOL(cxgb4_port_idx);
2770 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2771 struct tp_tcp_stats *v6)
2773 struct adapter *adap = pci_get_drvdata(pdev);
2775 spin_lock(&adap->stats_lock);
2776 t4_tp_get_tcp_stats(adap, v4, v6);
2777 spin_unlock(&adap->stats_lock);
2779 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2781 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2782 const unsigned int *pgsz_order)
2784 struct adapter *adap = netdev2adap(dev);
2786 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2787 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2788 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2789 HPZ3(pgsz_order[3]));
2791 EXPORT_SYMBOL(cxgb4_iscsi_init);
2793 int cxgb4_flush_eq_cache(struct net_device *dev)
2795 struct adapter *adap = netdev2adap(dev);
2798 ret = t4_fwaddrspace_write(adap, adap->mbox,
2799 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
2802 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2804 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2806 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
2810 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2812 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2813 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2818 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2821 struct adapter *adap = netdev2adap(dev);
2822 u16 hw_pidx, hw_cidx;
2825 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2829 if (pidx != hw_pidx) {
2832 if (pidx >= hw_pidx)
2833 delta = pidx - hw_pidx;
2835 delta = size - hw_pidx + pidx;
2837 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2838 QID(qid) | PIDX(delta));
2843 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2845 static struct pci_driver cxgb4_driver;
2847 static void check_neigh_update(struct neighbour *neigh)
2849 const struct device *parent;
2850 const struct net_device *netdev = neigh->dev;
2852 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2853 netdev = vlan_dev_real_dev(netdev);
2854 parent = netdev->dev.parent;
2855 if (parent && parent->driver == &cxgb4_driver.driver)
2856 t4_l2t_update(dev_get_drvdata(parent), neigh);
2859 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2863 case NETEVENT_NEIGH_UPDATE:
2864 check_neigh_update(data);
2866 case NETEVENT_REDIRECT:
2873 static bool netevent_registered;
2874 static struct notifier_block cxgb4_netevent_nb = {
2875 .notifier_call = netevent_cb
2878 static void drain_db_fifo(struct adapter *adap, int usecs)
2883 set_current_state(TASK_UNINTERRUPTIBLE);
2884 schedule_timeout(usecs_to_jiffies(usecs));
2885 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2886 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2891 static void disable_txq_db(struct sge_txq *q)
2893 spin_lock_irq(&q->db_lock);
2895 spin_unlock_irq(&q->db_lock);
2898 static void enable_txq_db(struct sge_txq *q)
2900 spin_lock_irq(&q->db_lock);
2902 spin_unlock_irq(&q->db_lock);
2905 static void disable_dbs(struct adapter *adap)
2909 for_each_ethrxq(&adap->sge, i)
2910 disable_txq_db(&adap->sge.ethtxq[i].q);
2911 for_each_ofldrxq(&adap->sge, i)
2912 disable_txq_db(&adap->sge.ofldtxq[i].q);
2913 for_each_port(adap, i)
2914 disable_txq_db(&adap->sge.ctrlq[i].q);
2917 static void enable_dbs(struct adapter *adap)
2921 for_each_ethrxq(&adap->sge, i)
2922 enable_txq_db(&adap->sge.ethtxq[i].q);
2923 for_each_ofldrxq(&adap->sge, i)
2924 enable_txq_db(&adap->sge.ofldtxq[i].q);
2925 for_each_port(adap, i)
2926 enable_txq_db(&adap->sge.ctrlq[i].q);
2929 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2931 u16 hw_pidx, hw_cidx;
2934 spin_lock_bh(&q->db_lock);
2935 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2938 if (q->db_pidx != hw_pidx) {
2941 if (q->db_pidx >= hw_pidx)
2942 delta = q->db_pidx - hw_pidx;
2944 delta = q->size - hw_pidx + q->db_pidx;
2946 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2947 QID(q->cntxt_id) | PIDX(delta));
2951 spin_unlock_bh(&q->db_lock);
2953 CH_WARN(adap, "DB drop recovery failed.\n");
2955 static void recover_all_queues(struct adapter *adap)
2959 for_each_ethrxq(&adap->sge, i)
2960 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2961 for_each_ofldrxq(&adap->sge, i)
2962 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2963 for_each_port(adap, i)
2964 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2967 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2969 mutex_lock(&uld_mutex);
2970 if (adap->uld_handle[CXGB4_ULD_RDMA])
2971 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2973 mutex_unlock(&uld_mutex);
2976 static void process_db_full(struct work_struct *work)
2978 struct adapter *adap;
2980 adap = container_of(work, struct adapter, db_full_task);
2982 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2983 drain_db_fifo(adap, dbfifo_drain_delay);
2984 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2985 DBFIFO_HP_INT | DBFIFO_LP_INT,
2986 DBFIFO_HP_INT | DBFIFO_LP_INT);
2987 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2990 static void process_db_drop(struct work_struct *work)
2992 struct adapter *adap;
2994 adap = container_of(work, struct adapter, db_drop_task);
2996 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
2998 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2999 drain_db_fifo(adap, 1);
3000 recover_all_queues(adap);
3004 void t4_db_full(struct adapter *adap)
3006 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3007 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3008 queue_work(workq, &adap->db_full_task);
3011 void t4_db_dropped(struct adapter *adap)
3013 queue_work(workq, &adap->db_drop_task);
3016 static void uld_attach(struct adapter *adap, unsigned int uld)
3019 struct cxgb4_lld_info lli;
3022 lli.pdev = adap->pdev;
3023 lli.l2t = adap->l2t;
3024 lli.tids = &adap->tids;
3025 lli.ports = adap->port;
3026 lli.vr = &adap->vres;
3027 lli.mtus = adap->params.mtus;
3028 if (uld == CXGB4_ULD_RDMA) {
3029 lli.rxq_ids = adap->sge.rdma_rxq;
3030 lli.nrxq = adap->sge.rdmaqs;
3031 } else if (uld == CXGB4_ULD_ISCSI) {
3032 lli.rxq_ids = adap->sge.ofld_rxq;
3033 lli.nrxq = adap->sge.ofldqsets;
3035 lli.ntxq = adap->sge.ofldqsets;
3036 lli.nchan = adap->params.nports;
3037 lli.nports = adap->params.nports;
3038 lli.wr_cred = adap->params.ofldq_wr_cred;
3039 lli.adapter_type = adap->params.rev;
3040 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3041 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3042 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3044 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3045 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3047 lli.filt_mode = adap->filter_mode;
3048 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3049 for (i = 0; i < NCHAN; i++)
3051 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3052 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3053 lli.fw_vers = adap->params.fw_vers;
3054 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3055 lli.sge_pktshift = adap->sge.pktshift;
3056 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3058 handle = ulds[uld].add(&lli);
3059 if (IS_ERR(handle)) {
3060 dev_warn(adap->pdev_dev,
3061 "could not attach to the %s driver, error %ld\n",
3062 uld_str[uld], PTR_ERR(handle));
3066 adap->uld_handle[uld] = handle;
3068 if (!netevent_registered) {
3069 register_netevent_notifier(&cxgb4_netevent_nb);
3070 netevent_registered = true;
3073 if (adap->flags & FULL_INIT_DONE)
3074 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3077 static void attach_ulds(struct adapter *adap)
3081 mutex_lock(&uld_mutex);
3082 list_add_tail(&adap->list_node, &adapter_list);
3083 for (i = 0; i < CXGB4_ULD_MAX; i++)
3085 uld_attach(adap, i);
3086 mutex_unlock(&uld_mutex);
3089 static void detach_ulds(struct adapter *adap)
3093 mutex_lock(&uld_mutex);
3094 list_del(&adap->list_node);
3095 for (i = 0; i < CXGB4_ULD_MAX; i++)
3096 if (adap->uld_handle[i]) {
3097 ulds[i].state_change(adap->uld_handle[i],
3098 CXGB4_STATE_DETACH);
3099 adap->uld_handle[i] = NULL;
3101 if (netevent_registered && list_empty(&adapter_list)) {