TULIP NETWORK DRIVERS
M: Grant Grundler <grundler@parisc-linux.org>
-M: Kyle McMartin <kyle@mcmartin.ca>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/tulip/
err = -ENOMEM;
goto out;
}
- atm_dev = atm_dev_register(DEV_LABEL, &adummy_ops, -1, NULL);
+ atm_dev = atm_dev_register(DEV_LABEL, NULL, &adummy_ops, -1, NULL);
if (!atm_dev) {
printk(KERN_ERR DEV_LABEL ": atm_dev_register() failed\n");
err = -ENODEV;
goto out_reset;
}
- dev->atm_dev = atm_dev_register (DEV_LABEL, &amb_ops, -1, NULL);
+ dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1,
+ NULL);
if (!dev->atm_dev) {
PRINTD (DBG_ERR, "failed to register Madge ATM adapter");
err = -EINVAL;
if (!dev_data)
return -ENOMEM;
- dev = atm_dev_register(DEV_LABEL,&atmtcp_v_dev_ops,itf,NULL);
+ dev = atm_dev_register(DEV_LABEL,NULL,&atmtcp_v_dev_ops,itf,NULL);
if (!dev) {
kfree(dev_data);
return itf == -1 ? -ENOMEM : -EBUSY;
&zeroes);
if (!cpu_zeroes) goto out1;
}
- dev = atm_dev_register(DEV_LABEL,&ops,-1,NULL);
+ dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
if (!dev) goto out2;
pci_set_drvdata(pci_dev, dev);
eni_dev->pci_dev = pci_dev;
fs_dev, sizeof (struct fs_dev));
if (!fs_dev)
goto err_out;
- atm_dev = atm_dev_register("fs", &ops, -1, NULL);
+ atm_dev = atm_dev_register("fs", &pci_dev->dev, &ops, -1, NULL);
if (!atm_dev)
goto err_out_free_fs_dev;
static int __devinit
-fore200e_register(struct fore200e* fore200e)
+fore200e_register(struct fore200e* fore200e, struct device *parent)
{
struct atm_dev* atm_dev;
DPRINTK(2, "device %s being registered\n", fore200e->name);
- atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
- NULL);
+ atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
+ -1, NULL);
if (atm_dev == NULL) {
printk(FORE200E "unable to register device %s\n", fore200e->name);
return -ENODEV;
static int __devinit
-fore200e_init(struct fore200e* fore200e)
+fore200e_init(struct fore200e* fore200e, struct device *parent)
{
- if (fore200e_register(fore200e) < 0)
+ if (fore200e_register(fore200e, parent) < 0)
return -ENODEV;
if (fore200e->bus->configure(fore200e) < 0)
sprintf(fore200e->name, "%s-%d", bus->model_name, index);
- err = fore200e_init(fore200e);
+ err = fore200e_init(fore200e, &op->dev);
if (err < 0) {
fore200e_shutdown(fore200e);
kfree(fore200e);
sprintf(fore200e->name, "%s-%d", bus->model_name, index);
- err = fore200e_init(fore200e);
+ err = fore200e_init(fore200e, &pci_dev->dev);
if (err < 0) {
fore200e_shutdown(fore200e);
goto out_free;
goto init_one_failure;
}
- atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
+ atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
if (!atm_dev) {
err = -ENODEV;
goto init_one_failure;
PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p",
iobase, irq, membase);
- dev->atm_dev = atm_dev_register(DEV_LABEL, &hrz_ops, -1, NULL);
+ dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1,
+ NULL);
if (!(dev->atm_dev)) {
PRINTD(DBG_ERR, "failed to register Madge ATM adapter");
err = -EINVAL;
goto err_out_iounmap;
}
- dev = atm_dev_register("idt77252", &idt77252_ops, -1, NULL);
+ dev = atm_dev_register("idt77252", &pcidev->dev, &idt77252_ops, -1,
+ NULL);
if (!dev) {
printk("%s: can't register atm device\n", card->name);
err = -EIO;
ret = -ENODEV;
goto err_out_free_iadev;
}
- dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
+ dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
if (!dev) {
ret = -ENOMEM;
goto err_out_disable_dev;
return -ENOMEM;
}
- atmdev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
+ atmdev = atm_dev_register(DEV_LABEL, &pci->dev, &ops, -1, NULL);
if (atmdev == NULL) {
printk(KERN_ERR DEV_LABEL
": couldn't register atm device!\n");
}
/* Register device */
- card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
+ card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
+ -1, NULL);
if (card->atmdev == NULL) {
printk("nicstar%d: can't register device.\n", i);
error = 17;
static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
static int list_vccs(int vci);
static void release_vccs(struct atm_dev *dev);
-static int atm_init(struct solos_card *);
+static int atm_init(struct solos_card *, struct device *);
static void atm_remove(struct solos_card *);
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
static void solos_bh(unsigned long);
if (db_firmware_upgrade)
flash_upgrade(card, 3);
- err = atm_init(card);
+ err = atm_init(card, &dev->dev);
if (err)
goto out_free_irq;
return err;
}
-static int atm_init(struct solos_card *card)
+static int atm_init(struct solos_card *card, struct device *parent)
{
int i;
skb_queue_head_init(&card->tx_queue[i]);
skb_queue_head_init(&card->cli_queue[i]);
- card->atmdev[i] = atm_dev_register("solos-pci", &fpga_ops, -1, NULL);
+ card->atmdev[i] = atm_dev_register("solos-pci", parent, &fpga_ops, -1, NULL);
if (!card->atmdev[i]) {
dev_err(&card->dev->dev, "Could not register ATM device %d\n", i);
atm_remove(card);
goto out;
}
- dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
+ dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
if (!dev)
goto out_free;
static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 */
{ USB_DEVICE(0x0CF3, 0x3000) },
+
+ /* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0CF3, 0x3002) },
+
{ } /* Terminating entry */
};
/* Broadcom BCM2033 without firmware */
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
+ /* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p failed to resubmit (%d)",
+ if (err != -EPERM)
+ BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p failed to resubmit (%d)",
+ if (err != -EPERM)
+ BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p failed to resubmit (%d)",
+ if (err != -EPERM)
+ BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
static struct cn_dev cdev;
__b44_set_flow_ctrl(bp, pause_enab);
}
-#ifdef SSB_DRIVER_MIPS
-extern char *nvram_get(char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
- const char *str;
+ char buf[20];
u32 val;
int err;
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- str = nvram_get("boardnum");
- if (!str)
+ if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
- if (simple_strtoul(str, NULL, 0) == 2) {
+ if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
if (err)
goto error;
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+ memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-4"
-#define DRV_MODULE_RELDATE "2010/11/01"
+#define DRV_MODULE_VERSION "1.60.01-0"
+#define DRV_MODULE_RELDATE "2010/11/12"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
- ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+ *parsing_data |= (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
if ((xmit_type & XMIT_GSO_V6) &&
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/**
* @return header len
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+ *parsing_data |= ((tcp_hdrlen(skb)/4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
- skb->data) / 2) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+ *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
}
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, fp_index;
dma_addr_t mapping;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
- hlen = bnx2x_set_pbd_csum_e2(bp,
- skb, pbd_e2, xmit_type);
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
} else {
pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd);
if (CHIP_IS_E2(bp))
- bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+ bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+ xmit_type);
else
bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
/* Handle fragmented skb */
/****************************************************************************
* SRC initializations
****************************************************************************/
-
+#ifdef BCM_CNIC
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
-
+#endif
#endif /* BNX2X_INIT_OPS_H */
/*----------------------------- Global variables ----------------------------*/
#ifdef CONFIG_NET_POLL_CONTROLLER
-cpumask_var_t netpoll_block_tx;
+atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
static const char * const version =
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (bond->slave_cnt == 0)
+ if (is_zero_ether_addr(bond->dev->dev_addr))
memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
slave_dev->addr_len);
if (res)
goto out;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) {
- res = -ENOMEM;
- goto out;
- }
-#endif
-
res = register_pernet_subsys(&bond_net_ops);
if (res)
goto out;
rtnl_link_unregister(&bond_link_ops);
err_link:
unregister_pernet_subsys(&bond_net_ops);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
-#endif
goto out;
}
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
+ /*
+ * Make sure we don't have an imbalance on our netpoll blocking
+ */
+ WARN_ON(atomic_read(&netpoll_block_tx));
#endif
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern cpumask_var_t netpoll_block_tx;
+extern atomic_t netpoll_block_tx;
static inline void block_netpoll_tx(void)
{
- preempt_disable();
- BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(),
- netpoll_block_tx));
+ atomic_inc(&netpoll_block_tx);
}
static inline void unblock_netpoll_tx(void)
{
- BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(),
- netpoll_block_tx));
- preempt_enable();
+ atomic_dec(&netpoll_block_tx);
}
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
- return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx);
+ return atomic_read(&netpoll_block_tx);
return 0;
}
#else
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/version.h>
#include <linux/init.h>
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/spinlock.h>
#include <linux/sched.h>
if (index < NEXACT_MAC)
ret++;
else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ *hash |= (1ULL << hash_mac_addr(addr[i]));
}
return ret;
}
{
struct sge *s = &adapter->sge;
int q10g, n10g, qidx, pidx, qs;
+ size_t iqe_size;
/*
* We should not be called till we know how many Queue Sets we can
}
s->ethqsets = qidx;
+ /*
+ * The Ingress Queue Entry Size for our various Response Queues needs
+ * to be big enough to accommodate the largest message we can receive
+ * from the chip/firmware; which is 64 bytes ...
+ */
+ iqe_size = 64;
+
/*
* Set up default Queue Set parameters ... Start off with the
* shortest interrupt holdoff timer.
struct sge_eth_rxq *rxq = &s->ethrxq[qs];
struct sge_eth_txq *txq = &s->ethtxq[qs];
- init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES);
+ init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
rxq->fl.size = 72;
txq->q.size = 1024;
}
* The firmware event queue is used for link state changes and
* notifications of TX DMA completions.
*/
- init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512,
- L1_CACHE_BYTES);
+ init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
/*
* The forwarded interrupt queue is used when we're in MSI interrupt
* any time ...
*/
init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
- L1_CACHE_BYTES);
+ iqe_size);
}
/*
}
+static int ehea_set_flags(struct net_device *dev, u32 data)
+{
+ return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
+ | ETH_FLAG_TXVLAN
+ | ETH_FLAG_RXVLAN);
+}
+
const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_ethtool_stats = ehea_get_ethtool_stats,
.get_rx_csum = ehea_get_rx_csum,
.set_settings = ehea_set_settings,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ehea_set_flags,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
};
int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
pr->port->vgrp);
- if (use_lro) {
+ if (skb->dev->features & NETIF_F_LRO) {
if (vlan_extracted)
lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
pr->port->vgrp,
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (use_lro)
+ if (dev->features & NETIF_F_LRO)
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
| NETIF_F_LLTX;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+ if (use_lro)
+ dev->features |= NETIF_F_LRO;
+
INIT_WORK(&port->reset_task, ehea_reset_port);
ret = register_netdev(dev);
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
- enic_isr_msix_rq(enic->msix_entry[intr].vector, enic);
+ enic_isr_msix_rq(enic->msix_entry[intr].vector,
+ &enic->napi[i]);
}
intr = enic_msix_wq_intr(enic, i);
enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
rcu_read_unlock();
dev_kfree_skb(skb);
stats->tx_dropped++;
+ if (skb_queue_len(&dp->tq) != 0)
+ goto resched;
break;
}
rcu_read_unlock();
adapter->rx_ring[i] = NULL;
}
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
config ICPLUS_PHY
tristate "Drivers for ICPlus PHYs"
---help---
- Currently supports the IP175C PHY.
+ Currently supports the IP175C and IP1001 PHYs.
config REALTEK_PHY
tristate "Drivers for Realtek PHYs"
#include <asm/irq.h>
#include <asm/uaccess.h>
-MODULE_DESCRIPTION("ICPlus IP175C PHY driver");
+MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
MODULE_LICENSE("GPL");
return 0;
}
+static int ip1001_config_init(struct phy_device *phydev)
+{
+ int err, value;
+
+ /* Software Reset PHY */
+ value = phy_read(phydev, MII_BMCR);
+ value |= BMCR_RESET;
+ err = phy_write(phydev, MII_BMCR, value);
+ if (err < 0)
+ return err;
+
+ do {
+ value = phy_read(phydev, MII_BMCR);
+ } while (value & BMCR_RESET);
+
+ /* Additional delay (2ns) used to adjust RX clock phase
+ * at GMII/ RGMII interface */
+ value = phy_read(phydev, 16);
+ value |= 0x3;
+
+ err = phy_write(phydev, 16, value);
+ if (err < 0)
+ return err;
+
+ return err;
+}
+
static int ip175c_read_status(struct phy_device *phydev)
{
if (phydev->addr == 4) /* WAN port */
.driver = { .owner = THIS_MODULE,},
};
-static int __init ip175c_init(void)
+static struct phy_driver ip1001_driver = {
+ .phy_id = 0x02430d90,
+ .name = "ICPlus IP1001",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause,
+ .config_init = &ip1001_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init icplus_init(void)
{
+ int ret = 0;
+
+ ret = phy_driver_register(&ip1001_driver);
+ if (ret < 0)
+ return -ENODEV;
+
return phy_driver_register(&ip175c_driver);
}
-static void __exit ip175c_exit(void)
+static void __exit icplus_exit(void)
{
+ phy_driver_unregister(&ip1001_driver);
phy_driver_unregister(&ip175c_driver);
}
-module_init(ip175c_init);
-module_exit(ip175c_exit);
+module_init(icplus_init);
+module_exit(icplus_exit);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
+ { 0x02430d90, 0x0ffffff0 },
{ }
};
abort:
kfree_skb(skb);
- return 0;
+ return 1;
}
/************************************************************************
u32 mailbox_in;
u32 mailbox_out;
struct mbox_params idc_mbc;
+ struct mutex mpi_mutex;
int tx_ring_size;
int rx_ring_size;
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
+ mutex_init(&qdev->mpi_mutex);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
int status;
unsigned long count;
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
end:
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
return status;
}
static int ql_set_port_cfg(struct ql_adapter *qdev)
{
int status;
- rtnl_lock();
status = ql_mb_set_port_cfg(qdev);
- rtnl_unlock();
if (status)
return status;
status = ql_idc_wait(qdev);
container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
int status;
- rtnl_lock();
status = ql_mb_get_port_cfg(qdev);
- rtnl_unlock();
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to get port config data.\n");
u32 aen;
int timeout;
- rtnl_lock();
aen = mbcp->mbox_out[1] >> 16;
timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
}
break;
}
- rtnl_unlock();
}
void ql_mpi_work(struct work_struct *work)
struct mbox_params *mbcp = &mbc;
int err = 0;
- rtnl_lock();
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- rtnl_unlock();
+ mutex_unlock(&qdev->mpi_mutex);
ql_enable_completion_interrupt(qdev, 0);
}
mdio_write(ioaddr, MII_BMCR, val & 0xffff);
}
-static void rtl8169_check_link_status(struct net_device *dev,
+static void __rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp,
- void __iomem *ioaddr)
+ void __iomem *ioaddr,
+ bool pm)
{
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
/* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(&tp->pci_dev->dev);
+ if (pm)
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- pm_schedule_suspend(&tp->pci_dev->dev, 100);
+ if (pm)
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ __rtl8169_check_link_status(dev, tp, ioaddr, false);
+}
+
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
}
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp, ioaddr);
+ __rtl8169_check_link_status(dev, tp, ioaddr, true);
/* We need to see the lastest version of tp->intr_mask to
* avoid ignoring an MSI interrupt and having to wait for
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
- return 0;
-
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
- return -EBUSY;
+ return tp->TxDescArray ? -EBUSY : 0;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
if (channel->irq)
synchronize_irq(channel->irq);
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
*channel = *old_channel;
+ channel->napi_dev = NULL;
memset(&channel->eventq, 0, sizeof(channel->eventq));
rx_queue = &channel->rx_queue;
if (rc)
goto rollback;
+ efx_init_napi(efx);
+
/* Destroy old channels */
- for (i = 0; i < efx->n_channels; i++)
+ for (i = 0; i < efx->n_channels; i++) {
+ efx_fini_napi_channel(other_channel[i]);
efx_remove_channel(other_channel[i]);
+ }
out:
/* Free unused channel structures */
for (i = 0; i < efx->n_channels; i++)
efx_start_channel(channel);
}
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
/* Switch to event based MCDI completions after enabling interrupts.
/* Disable interrupts and wait for ISR to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
*
**************************************************************************/
-static int efx_init_napi(struct efx_nic *efx)
+static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
}
- return 0;
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+ channel->napi_dev = NULL;
}
static void efx_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_for_each_channel(channel, efx) {
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
- channel->napi_dev = NULL;
- }
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
}
/**************************************************************************
if (rc)
goto fail1;
- rc = efx_init_napi(efx);
- if (rc)
- goto fail2;
+ efx_init_napi(efx);
rc = efx->type->init(efx);
if (rc) {
efx->type->fini(efx);
fail3:
efx_fini_napi(efx);
- fail2:
efx_remove_all(efx);
fail1:
return rc;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
+ * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
struct pci_dev *pci_dev;
const struct efx_nic_type *type;
int legacy_irq;
+ bool legacy_irq_enabled;
struct workqueue_struct *workqueue;
char workqueue_name[16];
struct work_struct reset_work;
u32 queues;
int syserr;
+ /* Could this be ours? If interrupts are disabled then the
+ * channel state may not be valid.
+ */
+ if (!efx->legacy_irq_enabled)
+ return result;
+
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, ®, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
pr_warning("\tno valid MAC address;"
"please, use ifconfig or nwhwconfig!\n");
+ spin_lock_init(&priv->lock);
+
ret = register_netdev(dev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
(dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
- spin_lock_init(&priv->lock);
-
return ret;
}
DMFE_DBUG(0, "dmfe_start_xmit", 0);
- /* Resource flag check */
- netif_stop_queue(dev);
-
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
return NETDEV_TX_OK;
}
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
spin_lock_irqsave(&db->lock, flags);
/* No Tx resource check, it never happen nromally */
/* Packet is complete. Inject into stack. */
/* We have IP packet here */
odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP);
- /* don't check it */
- odev->skb_rx_buf->ip_summed =
- CHECKSUM_UNNECESSARY;
-
skb_reset_mac_header(odev->skb_rx_buf);
/* Ship it off to the kernel */
struct net_device *dev = port->netdev;
card_t* card = port->card;
u8 stat;
+ unsigned count = 0;
spin_lock(&port->lock);
dev->stats.tx_bytes += readw(&desc->len);
}
writeb(0, &desc->stat); /* Free descriptor */
+ count++;
port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
}
- netif_wake_queue(dev);
+ if (count)
+ netif_wake_queue(dev);
spin_unlock(&port->lock);
}
sc->bmisscount = 0;
}
- if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
+ if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
+ sc->opmode == NL80211_IFTYPE_MESH_POINT) {
u64 tsf = ath5k_hw_get_tsf64(ah);
u32 tsftu = TSF_TO_TU(tsf);
int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
/* NB: hw still stops DMA, so proceed */
}
- /* refresh the beacon for AP mode */
- if (sc->opmode == NL80211_IFTYPE_AP)
+ /* refresh the beacon for AP or MESH mode */
+ if (sc->opmode == NL80211_IFTYPE_AP ||
+ sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_beacon_update(sc->hw, vif);
ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
/* Assign the vap/adhoc to a beacon xmit slot. */
if ((avf->opmode == NL80211_IFTYPE_AP) ||
- (avf->opmode == NL80211_IFTYPE_ADHOC)) {
+ (avf->opmode == NL80211_IFTYPE_ADHOC) ||
+ (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
int slot;
WARN_ON(list_empty(&sc->bcbuf));
sc->bslot[avf->bslot] = vif;
if (avf->opmode == NL80211_IFTYPE_AP)
sc->num_ap_vifs++;
- else
+ else if (avf->opmode == NL80211_IFTYPE_ADHOC)
sc->num_adhoc_vifs++;
}
#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
+#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
+
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
}
},
.ctlPowerData_2G = {
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
},
.modalHeader5G = {
/* 4 idle,t1,t2,b (4 bits per setting) */
.ctlPowerData_5G = {
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 0}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 0}, {60, 1}, {60, 1}, {60, 0},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
- {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 0}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
}
},
}
struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
if (is2GHz)
- return ctl_2g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
else
- return ctl_5g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
}
static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
if (is2GHz) {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
- ctl_2g[idx].ctlEdges[edge - 1].flag)
- return ctl_2g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
} else {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
- ctl_5g[idx].ctlEdges[edge - 1].flag)
- return ctl_5g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
}
return AR9300_MAX_RATE_POWER;
u8 tPow2x[14];
} __packed;
-struct cal_ctl_edge_pwr {
- u8 tPower:6,
- flag:2;
-} __packed;
-
struct cal_ctl_data_2g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
} __packed;
struct cal_ctl_data_5g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
} __packed;
struct ar9300_eeprom {
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/completion.h>
+#include <linux/pm_qos_params.h>
#include "debug.h"
#include "common.h"
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_setup(struct ath_softc *sc, int haltype);
-void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
+bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
void ath_draintxq(struct ath_softc *sc,
struct ath_txq *txq, bool retry_tx);
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
struct ath_descdma txsdma;
struct ath_ant_comb ant_comb;
+
+ struct pm_qos_request_list pm_qos_req;
};
struct ath_wiphy {
}
extern struct ieee80211_ops ath9k_ops;
-extern struct pm_qos_request_list ath9k_pm_qos_req;
extern int modparam_nohwcrypt;
extern int led_blink;
for (i = 0; (i < num_band_edges) &&
(pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
- twiceMaxEdgePower = pRdEdgesPower[i].tPower;
+ twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
break;
} else if ((i > 0) &&
(freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
is2GHz))) {
if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
is2GHz) < freq &&
- pRdEdgesPower[i - 1].flag) {
+ CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
twiceMaxEdgePower =
- pRdEdgesPower[i - 1].tPower;
+ CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
}
break;
}
#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
+#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
+#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
+
+#define LNA_CTL_BUF_MODE BIT(0)
+#define LNA_CTL_ISEL_LO BIT(1)
+#define LNA_CTL_ISEL_HI BIT(2)
+#define LNA_CTL_BUF_IN BIT(3)
+#define LNA_CTL_FEM_BAND BIT(4)
+#define LNA_CTL_LOCAL_BIAS BIT(5)
+#define LNA_CTL_FORCE_XPA BIT(6)
+#define LNA_CTL_USE_ANT1 BIT(7)
+
enum eeprom_param {
EEP_NFTHRESH_5,
EEP_NFTHRESH_2,
u8 xatten2Margin[AR5416_MAX_CHAINS];
u8 ob_ch1;
u8 db_ch1;
- u8 useAnt1:1,
- force_xpaon:1,
- local_bias:1,
- femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
+ u8 lna_ctl;
u8 miscBits;
u16 xpaBiasLvlFreq[3];
u8 futureModal[6];
u8 tPow2x[8];
} __packed;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-struct cal_ctl_edges {
- u8 bChannel;
- u8 flag:2, tPower:6;
-} __packed;
-#else
struct cal_ctl_edges {
u8 bChannel;
- u8 tPower:6, flag:2;
+ u8 ctl;
} __packed;
-#endif
struct cal_data_op_loop_ar9287 {
u8 pwrPdg[2][5];
ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
AR_AN_TOP2_LOCALBIAS,
AR_AN_TOP2_LOCALBIAS_S,
- pModal->local_bias);
+ !!(pModal->lna_ctl &
+ LNA_CTL_LOCAL_BIAS));
REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
- pModal->force_xpaon);
+ !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA));
}
REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
- scaledPower = max((u16)0, scaledPower);
-
if (IS_CHAN_2GHZ(chan)) {
numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
SUB_NUM_CTL_MODES_AT_2G_40;
num_ant_config = 1;
- if (pBase->version >= 0x0E0D)
- if (pModal->useAnt1)
- num_ant_config += 1;
+ if (pBase->version >= 0x0E0D &&
+ (pModal->lna_ctl & LNA_CTL_USE_ANT1))
+ num_ant_config += 1;
return num_ant_config;
}
struct hif_device_usb *hif_dev =
(struct hif_device_usb *) usb_get_intfdata(interface);
+ /*
+ * The device has to be set to FULLSLEEP mode in case no
+ * interface is up.
+ */
+ if (!(hif_dev->flags & HIF_USB_START))
+ ath9k_htc_suspend(hif_dev->htc_handle);
+
ath9k_hif_usb_dealloc_urbs(hif_dev);
return 0;
void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
void ath9k_ps_work(struct work_struct *work);
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode);
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_init_leds(struct ath9k_htc_priv *priv);
u16 devid, char *product);
void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
#ifdef CONFIG_PM
+void ath9k_htc_suspend(struct htc_target *htc_handle);
int ath9k_htc_resume(struct htc_target *htc_handle);
#endif
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
}
#ifdef CONFIG_PM
+
+void ath9k_htc_suspend(struct htc_target *htc_handle)
+{
+ ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP);
+}
+
int ath9k_htc_resume(struct htc_target *htc_handle)
{
int ret;
return mode;
}
-static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
- enum ath9k_power_mode mode)
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
{
bool ret;
val = REG_READ(ah, AR7010_GPIO_IN);
return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
} else if (AR_SREV_9300_20_OR_LATER(ah))
- return MS_REG_READ(AR9300, gpio) != 0;
+ return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
+ AR_GPIO_BIT(gpio)) != 0;
else if (AR_SREV_9271(ah))
return MS_REG_READ(AR9271, gpio) != 0;
else if (AR_SREV_9287_11_OR_LATER(ah))
*/
#include <linux/slab.h>
-#include <linux/pm_qos_params.h>
#include "ath9k.h"
.write = ath9k_iowrite32,
};
-struct pm_qos_request_list ath9k_pm_qos_req;
-
/**************************/
/* Initialization */
/**************************/
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_STATION) |
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
- pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
return 0;
}
ieee80211_unregister_hw(hw);
- pm_qos_remove_request(&ath9k_pm_qos_req);
+ pm_qos_remove_request(&sc->pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
- else if ((ads.ds_rxstatus8 & AR_MichaelErr) &&
- rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
else if (ads.ds_rxstatus8 & AR_KeyMiss)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
*/
#include <linux/nl80211.h>
-#include <linux/pm_qos_params.h>
#include "ath9k.h"
#include "btcoex.h"
* the relevant bits of the h/w.
*/
ath9k_hw_set_interrupts(ah, 0);
- ath_drain_all_txq(sc, false);
+ stopped = ath_drain_all_txq(sc, false);
spin_lock_bh(&sc->rx.pcu_lock);
- stopped = ath_stoprecv(sc);
+ if (!ath_stoprecv(sc))
+ stopped = false;
/* XXX: do not flush receive queue here. We don't want
* to flush data frames already in queue because of
ath9k_btcoex_timer_resume(sc);
}
- pm_qos_update_request(&ath9k_pm_qos_req, 55);
+ pm_qos_update_request(&sc->pm_qos_req, 55);
mutex_unlock:
mutex_unlock(&sc->mutex);
sc->sc_flags |= SC_OP_INVALID;
- pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
mutex_unlock(&sc->mutex);
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
- int i;
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ /* Disable SWBA interrupt */
+ sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
ath9k_ps_wakeup(sc);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
ath9k_ps_restore(sc);
+ tasklet_kill(&sc->bcon_tasklet);
}
ath_beacon_return(sc, avp);
sc->sc_flags &= ~SC_OP_BEACONS;
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- if (sc->beacon.bslot[i] == vif) {
- printk(KERN_DEBUG "%s: vif had allocated beacon "
- "slot\n", __func__);
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
+ if (sc->nbcnvifs) {
+ /* Re-enable SWBA interrupt */
+ sc->sc_ah->imask |= ATH9K_INT_SWBA;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
+ ath9k_ps_restore(sc);
}
sc->nvifs--;
struct ath_rx_status *rx_stats,
bool *decrypt_error)
{
+#define is_mc_or_valid_tkip_keyix ((is_mc || \
+ (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
+
struct ath_hw *ah = common->ah;
__le16 fc;
u8 rx_status_len = ah->caps.rx_status_len;
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
*decrypt_error = true;
} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
+ bool is_mc;
/*
* The MIC error bit is only valid if the frame
* is not a control frame or fragment, and it was
* decrypted using a valid TKIP key.
*/
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+
if (!ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- test_bit(rx_stats->rs_keyix, common->tkip_keymap))
+ is_mc_or_valid_tkip_keyix)
rxs->flag |= RX_FLAG_MMIC_ERROR;
else
rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
#define AR9287_GPIO_IN_VAL_S 11
#define AR9271_GPIO_IN_VAL 0xFFFF0000
#define AR9271_GPIO_IN_VAL_S 16
-#define AR9300_GPIO_IN_VAL 0x0001FFFF
-#define AR9300_GPIO_IN_VAL_S 0
#define AR7010_GPIO_IN_VAL 0x0000FFFF
#define AR7010_GPIO_IN_VAL_S 0
+#define AR_GPIO_IN 0x404c
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
+
#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
}
}
-void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
+bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int i, npend = 0;
if (sc->sc_flags & SC_OP_INVALID)
- return;
+ return true;
/* Stop beacon queue */
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
}
}
- if (npend) {
- int r;
-
- ath_print(common, ATH_DBG_FATAL,
- "Failed to stop TX DMA. Resetting hardware!\n");
-
- spin_lock_bh(&sc->sc_resetlock);
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
- if (r)
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d\n",
- r);
- spin_unlock_bh(&sc->sc_resetlock);
- }
+ if (npend)
+ ath_print(common, ATH_DBG_FATAL, "Failed to stop TX DMA!\n");
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i))
ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
}
+
+ return !npend;
}
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
if (SUPP(CARL9170FW_WLANTX_CAB)) {
ar->hw->wiphy->interface_modes |=
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO);
}
}
* supports these modes. The code which will add the
* additional interface_modes is in fw.c.
*/
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
AR9170_TX_MAC_BACKOFF);
- mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &&
+ mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
AR9170_TX_MAC_QOS);
no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
lbs_deb_sdio("call remove card\n");
lbs_stop_card(card->priv);
lbs_remove_card(card->priv);
- card->priv->surpriseremoved = 1;
flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
lbs_stop_card(priv);
lbs_remove_card(priv); /* will call free_netdev */
- priv->surpriseremoved = 1;
free_irq(spi->irq, card);
if_spi_terminate_spi_thread(card);
if (card->pdata->teardown)
lbs_free_adapter(priv);
lbs_cfg_free(priv);
-
- priv->dev = NULL;
free_netdev(dev);
lbs_deb_leave(LBS_DEB_MAIN);
orinoco_add_hostscan_results(priv, buf, len);
kfree(buf);
- } else if (priv->scan_request) {
+ } else {
/* Either abort or complete the scan */
- cfg80211_scan_done(priv->scan_request, (len < 0));
- priv->scan_request = NULL;
+ orinoco_scan_done(priv, (len < 0));
}
spin_lock_irqsave(&priv->scan_lock, flags);
hermes_write_regn(hw, EVACK, 0xffff);
}
+ orinoco_scan_done(priv, true);
+
/* firmware will have to reassociate */
netif_carrier_off(dev);
priv->last_linkstatus = 0xffff;
orinoco_unlock(priv, &flags);
/* Scanning support: Notify scan cancellation */
- if (priv->scan_request) {
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- }
+ orinoco_scan_done(priv, true);
if (priv->hard_reset) {
err = (*priv->hard_reset)(priv);
struct net_device *dev = priv->ndev;
int err = 0;
+ /* If we've called commit, we are reconfiguring or bringing the
+ * interface up. Maintaining countermeasures across this would
+ * be confusing, so note that we've disabled them. The port will
+ * be enabled later in orinoco_commit or __orinoco_up. */
+ priv->tkip_cm_active = 0;
+
err = orinoco_hw_program_rids(priv);
/* FIXME: what about netif_tx_lock */
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
priv->scan_request = NULL;
}
}
+
+void orinoco_scan_done(struct orinoco_private *priv, bool abort)
+{
+ if (priv->scan_request) {
+ cfg80211_scan_done(priv->scan_request, abort);
+ priv->scan_request = NULL;
+ }
+}
void orinoco_add_hostscan_results(struct orinoco_private *dev,
unsigned char *buf,
size_t len);
+void orinoco_scan_done(struct orinoco_private *priv, bool abort);
#endif /* _ORINOCO_SCAN_H_ */
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
hw->eeprom_pda = true;
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
*/
if (param->value) {
priv->tkip_cm_active = 1;
- ret = hermes_enable_port(hw, 0);
+ ret = hermes_disable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
- ret = hermes_disable_port(hw, 0);
+ ret = hermes_enable_port(hw, 0);
}
break;
* condition: callbacks we register can be executed at once, before we have
* initialized the struct atm_dev. To protect against this, all callbacks
* abort if atm_dev->dev_data is NULL. */
- atm_dev = atm_dev_register(instance->driver_name, &usbatm_atm_devops, -1, NULL);
+ atm_dev = atm_dev_register(instance->driver_name,
+ &instance->usb_intf->dev, &usbatm_atm_devops,
+ -1, NULL);
if (!atm_dev) {
usb_err(instance, "%s: failed to register ATM device!\n", __func__);
return -1;
/* temp init ATM device, set to 128kbit */
atm_dev->link_rate = 128 * 1000 / 424;
- ret = sysfs_create_link(&atm_dev->class_dev.kobj,
- &instance->usb_intf->dev.kobj, "device");
- if (ret) {
- atm_err(instance, "%s: sysfs_create_link failed: %d\n",
- __func__, ret);
- goto fail_sysfs;
- }
-
if (instance->driver->atm_start && ((ret = instance->driver->atm_start(instance, atm_dev)) < 0)) {
atm_err(instance, "%s: atm_start failed: %d!\n", __func__, ret);
goto fail;
return 0;
fail:
- sysfs_remove_link(&atm_dev->class_dev.kobj, "device");
- fail_sysfs:
instance->atm_dev = NULL;
atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */
return ret;
/* ATM finalize */
if (instance->atm_dev) {
- sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device");
atm_dev_deregister(instance->atm_dev);
instance->atm_dev = NULL;
}
int r;
if (!write_length)
return 0;
+ write_length += write_address % VHOST_PAGE_SIZE;
write_address /= VHOST_PAGE_SIZE;
for (;;) {
u64 base = (u64)(unsigned long)log_base;
if (write_length <= VHOST_PAGE_SIZE)
break;
write_length -= VHOST_PAGE_SIZE;
- write_address += VHOST_PAGE_SIZE;
+ write_address += 1;
}
return r;
}
#define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb))
-struct atm_dev *atm_dev_register(const char *type,const struct atmdev_ops *ops,
- int number,unsigned long *flags); /* number == -1: pick first available */
+struct atm_dev *atm_dev_register(const char *type, struct device *parent,
+ const struct atmdev_ops *ops,
+ int number, /* -1 == pick first available */
+ unsigned long *flags);
struct atm_dev *atm_dev_lookup(int number);
void atm_dev_deregister(struct atm_dev *dev);
LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
LINUX_MIB_TCPDEFERACCEPTDROP,
LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
+ LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
__LINUX_MIB_MAX
};
/* Initialise core socket variables */
extern void sock_init_data(struct socket *sock, struct sock *sk);
+extern void sk_filter_release_rcu(struct rcu_head *rcu);
+
/**
* sk_filter_release - release a socket filter
* @fp: filter to remove
static inline void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
- kfree(fp);
+ call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
}
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
.dev_uevent = atm_uevent,
};
-int atm_register_sysfs(struct atm_dev *adev)
+int atm_register_sysfs(struct atm_dev *adev, struct device *parent)
{
struct device *cdev = &adev->class_dev;
int i, j, err;
cdev->class = &atm_class;
+ cdev->parent = parent;
dev_set_drvdata(cdev, adev);
dev_set_name(cdev, "%s%d", adev->type, adev->number);
}
EXPORT_SYMBOL(atm_dev_lookup);
-struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
- int number, unsigned long *flags)
+struct atm_dev *atm_dev_register(const char *type, struct device *parent,
+ const struct atmdev_ops *ops, int number,
+ unsigned long *flags)
{
struct atm_dev *dev, *inuse;
goto out_fail;
}
- if (atm_register_sysfs(dev) < 0) {
+ if (atm_register_sysfs(dev, parent) < 0) {
pr_err("atm_register_sysfs failed for dev %s\n", type);
atm_proc_dev_deregister(dev);
goto out_fail;
#endif /* CONFIG_PROC_FS */
-int atm_register_sysfs(struct atm_dev *adev);
+int atm_register_sysfs(struct atm_dev *adev, struct device *parent);
void atm_unregister_sysfs(struct atm_dev *adev);
#endif
int lm = 0;
if (type != SCO_LINK && type != ESCO_LINK)
- return 0;
+ return -EINVAL;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return 0;
+ return -EINVAL;
if (!status) {
struct sco_conn *conn;
BT_DBG("hcon %p reason %d", hcon, reason);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return 0;
+ return -EINVAL;
sco_conn_del(hcon, bt_err(reason));
EXPORT_SYMBOL(sk_chk_filter);
/**
- * sk_filter_rcu_release - Release a socket filter by rcu_head
+ * sk_filter_release_rcu - Release a socket filter by rcu_head
* @rcu: rcu_head that contains the sk_filter to free
*/
-static void sk_filter_rcu_release(struct rcu_head *rcu)
+void sk_filter_release_rcu(struct rcu_head *rcu)
{
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
- sk_filter_release(fp);
-}
-
-static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
-{
- unsigned int size = sk_filter_len(fp);
-
- atomic_sub(size, &sk->sk_omem_alloc);
- call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
+ kfree(fp);
}
+EXPORT_SYMBOL(sk_filter_release_rcu);
/**
* sk_attach_filter - attach a socket filter
rcu_assign_pointer(sk->sk_filter, fp);
if (old_fp)
- sk_filter_delayed_uncharge(sk, old_fp);
+ sk_filter_uncharge(sk, old_fp);
return 0;
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
sock_owned_by_user(sk));
if (filter) {
rcu_assign_pointer(sk->sk_filter, NULL);
- sk_filter_delayed_uncharge(sk, filter);
+ sk_filter_uncharge(sk, filter);
ret = 0;
}
return ret;
struct phy_device *phydev;
unsigned int type;
- skb_push(skb, ETH_HLEN);
+ if (skb_headroom(skb) < ETH_HLEN)
+ return false;
+ __skb_push(skb, ETH_HLEN);
type = classify(skb);
- skb_pull(skb, ETH_HLEN);
+ __skb_pull(skb, ETH_HLEN);
switch (type) {
case PTP_CLASS_V1_IPV4:
err = 0;
switch (cmd) {
case SIOCSIFADDR:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
edev = dev->ec_ptr;
if (edev == NULL) {
{
struct iphdr *ip = ip_hdr(skb);
unsigned char stn = ntohl(ip->saddr) & 0xff;
+ struct dst_entry *dst = skb_dst(skb);
+ struct ec_device *edev = NULL;
struct sock *sk = NULL;
struct sk_buff *newskb;
- struct ec_device *edev = skb->dev->ec_ptr;
+
+ if (dst)
+ edev = dst->dev->ec_ptr;
if (! edev)
goto bad;
SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
+ SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
SNMP_MIB_SENTINEL
};
* socket up. We've got bigger problems than
* non-graceful socket closings.
*/
- LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
}
tcp_update_metrics(sk);
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
- if (init_rcv_wnd &&
- (*rcv_wnd > init_rcv_wnd * mss))
- *rcv_wnd = init_rcv_wnd * mss;
- else if (*rcv_wnd > init_cwnd * mss)
- *rcv_wnd = init_cwnd * mss;
+ if (init_rcv_wnd)
+ *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
+ else
+ *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
}
/* Set the clamp no higher than max representable value */
*/
static u8 tcp_cookie_size_check(u8 desired)
{
- if (desired > 0) {
+ int cookie_size;
+
+ if (desired > 0)
/* previously specified */
return desired;
- }
- if (sysctl_tcp_cookie_size <= 0) {
+
+ cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
+ if (cookie_size <= 0)
/* no default specified */
return 0;
- }
- if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
+
+ if (cookie_size <= TCP_COOKIE_MIN)
/* value too small, specify minimum */
return TCP_COOKIE_MIN;
- }
- if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
+
+ if (cookie_size >= TCP_COOKIE_MAX)
/* value too large, specify maximum */
return TCP_COOKIE_MAX;
- }
- if (0x1 & sysctl_tcp_cookie_size) {
+
+ if (cookie_size & 1)
/* 8-bit multiple, illegal, fix it */
- return (u8)(sysctl_tcp_cookie_size + 0x1);
- }
- return (u8)sysctl_tcp_cookie_size;
+ cookie_size++;
+
+ return (u8)cookie_size;
}
/* Write previously computed TCP options to the packet.
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
+ int win_divisor;
if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
goto send_now;
if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
goto send_now;
- if (sysctl_tcp_tso_win_divisor) {
+ win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
+ if (win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
/* If at least some fraction of a window is available,
* just use it.
*/
- chunk /= sysctl_tcp_tso_win_divisor;
+ chunk /= win_divisor;
if (limit >= chunk)
goto send_now;
} else {
kfree_skb(skb);
goto errout;
}
- rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
+ rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
}
static inline size_t inet6_prefix_nlmsg_size(void)
sizeof (struct ipv6hdr);
dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ dev->mtu-=8;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
static void ip6_tnl_dev_setup(struct net_device *dev)
{
+ struct ip6_tnl *t;
+
dev->netdev_ops = &ip6_tnl_netdev_ops;
dev->destructor = ip6_dev_free;
dev->type = ARPHRD_TUNNEL6;
dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
+ t = netdev_priv(dev);
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ dev->mtu-=8;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_NETNS_LOCAL;
return 0;
}
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+ /* no tunnel matched, let upstream know, ipsec may handle it */
rcu_read_unlock();
+ return 1;
out:
kfree_skb(skb);
return 0;
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP over IP");
MODULE_VERSION("1.0");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
+
+/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
+ * enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
goto out;
rc = -ENODEV;
rtnl_lock();
+ rcu_read_lock();
if (sk->sk_bound_dev_if) {
- llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+ llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (llc->dev) {
if (!addr->sllc_arphrd)
addr->sllc_arphrd = llc->dev->type;
!llc_mac_match(addr->sllc_mac,
llc->dev->dev_addr)) {
rc = -EINVAL;
- dev_put(llc->dev);
llc->dev = NULL;
}
}
} else
llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
addr->sllc_mac);
+ rcu_read_unlock();
rtnl_unlock();
if (!llc->dev)
goto out;
break;
case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ if (is_multicast_ether_addr(mgmt->da) &&
+ !is_broadcast_ether_addr(mgmt->da))
+ return RX_DROP_MONITOR;
+
/* process only for station */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return RX_DROP_MONITOR;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
return;
+ goto out;
}
}
return;
}
+ out:
dev_kfree_skb(skb);
}
list) {
if (!ieee80211_sdata_running(tmp_sdata))
continue;
- if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
+ if (tmp_sdata->vif.type ==
+ NL80211_IFTYPE_MONITOR ||
+ tmp_sdata->vif.type ==
+ NL80211_IFTYPE_AP_VLAN ||
+ tmp_sdata->vif.type ==
+ NL80211_IFTYPE_WDS)
continue;
if (compare_ether_addr(tmp_sdata->vif.addr,
hdr->addr2) == 0) {
int nh_pos, h_pos;
struct sta_info *sta = NULL;
u32 sta_flags = 0;
+ struct sk_buff *tmp_skb;
if (unlikely(skb->len < ETH_HLEN)) {
ret = NETDEV_TX_OK;
goto fail;
}
- nh_pos = skb_network_header(skb) - skb->data;
- h_pos = skb_transport_header(skb) - skb->data;
-
/* convert Ethernet header to proper 802.11 header (based on
* operation mode) */
ethertype = (skb->data[12] << 8) | skb->data[13];
goto fail;
}
+ /*
+ * If the skb is shared we need to obtain our own copy.
+ */
+ if (skb_shared(skb)) {
+ tmp_skb = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ kfree_skb(tmp_skb);
+
+ if (!skb) {
+ ret = NETDEV_TX_OK;
+ goto fail;
+ }
+ }
+
hdr.frame_control = fc;
hdr.duration_id = 0;
hdr.seq_ctrl = 0;
encaps_len = 0;
}
+ nh_pos = skb_network_header(skb) - skb->data;
+ h_pos = skb_transport_header(skb) - skb->data;
+
skb_pull(skb, skip_header_bytes);
nh_pos -= skip_header_bytes;
h_pos -= skip_header_bytes;
struct sctp_association *asoc = NULL;
struct sctp_setpeerprim prim;
struct sctp_chunk *chunk;
+ struct sctp_af *af;
int err;
sp = sctp_sk(sk);
if (!sctp_state(asoc, ESTABLISHED))
return -ENOTCONN;
+ af = sctp_get_af_specific(prim.sspp_addr.ss_family);
+ if (!af)
+ return -EINVAL;
+
+ if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
+ return -EADDRNOTAVAIL;
+
if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
return -EADDRNOTAVAIL;
return ret;
}
+/**
+ * kernel_recvmsg - Receive a message from a socket (kernel space)
+ * @sock: The socket to receive the message from
+ * @msg: Received message
+ * @vec: Input s/g array for message data
+ * @num: Size of input s/g array
+ * @size: Number of bytes to read
+ * @flags: Message flags (MSG_DONTWAIT, etc...)
+ *
+ * On return the msg structure contains the scatter/gather array passed in the
+ * vec argument. The array is modified so that it consists of the unfilled
+ * portion of the original array.
+ *
+ * The returned value is the total number of bytes received, or an error.
+ */
int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size, int flags)
{
list_for_each_safe(entry, tmp, &x25_neigh_list) {
nb = list_entry(entry, struct x25_neigh, node);
__x25_remove_neigh(nb);
+ dev_put(nb->dev);
}
write_unlock_bh(&x25_neigh_list_lock);
}
return xc;
error:
- kfree(xc);
+ xfrm_state_put(xc);
return NULL;
}
EXPORT_SYMBOL(xfrm_state_migrate);