Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 24 Aug 2009 19:25:03 +0000 (12:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 24 Aug 2009 19:25:03 +0000 (12:25 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  smc91x: let smc91x work well under netpoll
  pxaficp-ir: remove incorrect net_device_ops
  NET: llc, zero sockaddr_llc struct
  drivers/net: fixed drivers that support netpoll use ndo_start_xmit()
  netpoll: warning for ndo_start_xmit returns with interrupts enabled
  net: Fix Micrel KSZ8842 Kconfig description
  netfilter: xt_quota: fix wrong return value (error case)
  ipv6: Fix commit 63d9950b08184e6531adceb65f64b429909cc101 (ipv6: Make v4-mapped bindings consistent with IPv4)
  E100: fix interaction with swiotlb on X86.
  pkt_sched: Convert CBQ to tasklet_hrtimer.
  pkt_sched: Convert qdisc_watchdog to tasklet_hrtimer
  rtl8187: always set MSR_LINK_ENEDCA flag with RTL8187B
  ibm_newemac: emac_close() needs to call netif_carrier_off()
  net: fix ks8851 build errors
  net: Rename MAC platform driver for w90p910 platform
  yellowfin: Fix buffer underrun after dev_alloc_skb() failure
  orinoco: correct key bounds check in orinoco_hw_get_tkip_iv
  mac80211: fix todo lock

21 files changed:
drivers/net/Kconfig
drivers/net/arm/w90p910_ether.c
drivers/net/e100.c
drivers/net/fec_mpc52xx.c
drivers/net/ibm_newemac/core.c
drivers/net/irda/pxaficp_ir.c
drivers/net/ixp2000/ixpdev.c
drivers/net/macb.c
drivers/net/mlx4/en_tx.c
drivers/net/smc91x.c
drivers/net/wireless/orinoco/hw.c
drivers/net/wireless/rtl818x/rtl8187_dev.c
drivers/net/yellowfin.c
include/net/pkt_sched.h
net/core/netpoll.c
net/ipv6/af_inet6.c
net/llc/af_llc.c
net/mac80211/key.c
net/netfilter/xt_quota.c
net/sched/sch_api.c
net/sched/sch_cbq.c

index 5f6509a5f640820fd89b6ef7b8b420a1ab0c8e39..5ce7cbabd7a786cb6297617f765064672a68a918 100644 (file)
@@ -1727,12 +1727,14 @@ config KS8842
        tristate "Micrel KSZ8842"
        depends on HAS_IOMEM
        help
-         This platform driver is for Micrel KSZ8842 chip.
+         This platform driver is for Micrel KSZ8842 / KS8842
+         2-port ethernet switch chip (managed, VLAN, QoS).
 
 config KS8851
        tristate "Micrel KS8851 SPI"
        depends on SPI
        select MII
+       select CRC32
        help
          SPI driver for Micrel KS8851 SPI attached network chip.
 
index 616fb7985a3481e45dd2cd7e875ab24323ac13d8..ddd231cb54b796f3e15a6b04374d027e5fb5a713 100644 (file)
@@ -1080,7 +1080,7 @@ static struct platform_driver w90p910_ether_driver = {
        .probe          = w90p910_ether_probe,
        .remove         = __devexit_p(w90p910_ether_remove),
        .driver         = {
-               .name   = "w90p910-emc",
+               .name   = "nuc900-emc",
                .owner  = THIS_MODULE,
        },
 };
@@ -1101,5 +1101,5 @@ module_exit(w90p910_ether_exit);
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910 MAC driver!");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-emc");
+MODULE_ALIAS("platform:nuc900-emc");
 
index 41b648a67fec34d071a5ee681b713e4cada6ee87..3a6735dc9f6a6158bf04196c22c144133de55408 100644 (file)
@@ -1899,7 +1899,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
                                nic->ru_running = RU_SUSPENDED;
                pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
                                               sizeof(struct rfd),
-                                              PCI_DMA_BIDIRECTIONAL);
+                                              PCI_DMA_FROMDEVICE);
                return -ENODATA;
        }
 
index cc786333d95c87cd84d34a1b189c43f9b60367c1..c40113f58963f9f765268141db1d78f367125a8c 100644 (file)
@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mpc52xx_fec_priv *priv = netdev_priv(dev);
        struct bcom_fec_bd *bd;
+       unsigned long flags;
 
        if (bcom_queue_full(priv->tx_dmatsk)) {
                if (net_ratelimit())
@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       spin_lock_irq(&priv->lock);
+       spin_lock_irqsave(&priv->lock, flags);
        dev->trans_start = jiffies;
 
        bd = (struct bcom_fec_bd *)
@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
        }
 
-       spin_unlock_irq(&priv->lock);
+       spin_unlock_irqrestore(&priv->lock, flags);
 
        return NETDEV_TX_OK;
 }
index beb84213b6719ba614641e56af5862dd5581ad9e..f0f890803710bed42418201bf2bb4fa58f25cb99 100644 (file)
@@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev)
 
        free_irq(dev->emac_irq, dev);
 
+       netif_carrier_off(ndev);
+
        return 0;
 }
 
index 3376a4f39e0a1eb02b6287a9355159c3aca7fc0f..77d10edefd2579cd06c7e53f065bdf3dda7a96e7 100644 (file)
@@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = {
        .ndo_stop               = pxa_irda_stop,
        .ndo_start_xmit         = pxa_irda_hard_xmit,
        .ndo_do_ioctl           = pxa_irda_ioctl,
-       .ndo_change_mtu         = eth_change_mtu,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
 };
 
 static int pxa_irda_probe(struct platform_device *pdev)
@@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
        if (!dev)
                goto err_mem_3;
 
+       SET_NETDEV_DEV(dev, &pdev->dev);
        si = netdev_priv(dev);
        si->dev = &pdev->dev;
        si->pdata = pdev->dev.platform_data;
index 2a0174b62e964d613102e0ef14e32828b3b91c34..92fb8235c76689c2ea3b6f1c079a65536bd035a1 100644 (file)
@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ixpdev_priv *ip = netdev_priv(dev);
        struct ixpdev_tx_desc *desc;
        int entry;
+       unsigned long flags;
 
        if (unlikely(skb->len > PAGE_SIZE)) {
                /* @@@ Count drops.  */
@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
 
        dev->trans_start = jiffies;
 
-       local_irq_disable();
+       local_irq_save(flags);
        ip->tx_queue_entries++;
        if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
                netif_stop_queue(dev);
-       local_irq_enable();
+       local_irq_restore(flags);
 
        return 0;
 }
index 5b5c25368d1e559e9f2dbbb71013f13efa293ce7..e3601cf3f9315e8462ec530cd25866ed5a78adbc 100644 (file)
@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_addr_t mapping;
        unsigned int len, entry;
        u32 ctrl;
+       unsigned long flags;
 
 #ifdef DEBUG
        int i;
@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 #endif
 
        len = skb->len;
-       spin_lock_irq(&bp->lock);
+       spin_lock_irqsave(&bp->lock, flags);
 
        /* This is a hard error, log it. */
        if (TX_BUFFS_AVAIL(bp) < 1) {
                netif_stop_queue(dev);
-               spin_unlock_irq(&bp->lock);
+               spin_unlock_irqrestore(&bp->lock, flags);
                dev_err(&bp->pdev->dev,
                        "BUG! Tx Ring full when queue awake!\n");
                dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (TX_BUFFS_AVAIL(bp) < 1)
                netif_stop_queue(dev);
 
-       spin_unlock_irq(&bp->lock);
+       spin_unlock_irqrestore(&bp->lock, flags);
 
        dev->trans_start = jiffies;
 
index 5a88b3f576931f33063eeb2749dcb70e8eff70d4..62208401c4df36d307a83eab67d8df7cfc8f5d84 100644 (file)
@@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
 {
        struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
        struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+       unsigned long flags;
 
        /* If we don't have a pending timer, set one up to catch our recent
           post in case the interface becomes idle */
@@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
 
        /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
        if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-               if (spin_trylock_irq(&ring->comp_lock)) {
+               if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
                        mlx4_en_process_tx_cq(priv->dev, cq);
-                       spin_unlock_irq(&ring->comp_lock);
+                       spin_unlock_irqrestore(&ring->comp_lock, flags);
                }
 }
 
index 1c70e999cc50d866ab5ebdba0956b25bba83659d..9da1fa12a67cb4ba05ce18e79843133c4f49e125 100644 (file)
@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
 /* this enables an interrupt in the interrupt mask register */
 #define SMC_ENABLE_INT(lp, x) do {                                     \
        unsigned char mask;                                             \
-       spin_lock_irq(&lp->lock);                                       \
+       unsigned long smc_enable_flags;                                 \
+       spin_lock_irqsave(&lp->lock, smc_enable_flags);                 \
        mask = SMC_GET_INT_MASK(lp);                                    \
        mask |= (x);                                                    \
        SMC_SET_INT_MASK(lp, mask);                                     \
-       spin_unlock_irq(&lp->lock);                                     \
+       spin_unlock_irqrestore(&lp->lock, smc_enable_flags);            \
 } while (0)
 
 /* this disables an interrupt from the interrupt mask register */
 #define SMC_DISABLE_INT(lp, x) do {                                    \
        unsigned char mask;                                             \
-       spin_lock_irq(&lp->lock);                                       \
+       unsigned long smc_disable_flags;                                \
+       spin_lock_irqsave(&lp->lock, smc_disable_flags);                \
        mask = SMC_GET_INT_MASK(lp);                                    \
        mask &= ~(x);                                                   \
        SMC_SET_INT_MASK(lp, mask);                                     \
-       spin_unlock_irq(&lp->lock);                                     \
+       spin_unlock_irqrestore(&lp->lock, smc_disable_flags);           \
 } while (0)
 
 /*
@@ -520,21 +522,21 @@ static inline void  smc_rcv(struct net_device *dev)
  * any other concurrent access and C would always interrupt B. But life
  * isn't that easy in a SMP world...
  */
-#define smc_special_trylock(lock)                                      \
+#define smc_special_trylock(lock, flags)                               \
 ({                                                                     \
        int __ret;                                                      \
-       local_irq_disable();                                            \
+       local_irq_save(flags);                                          \
        __ret = spin_trylock(lock);                                     \
        if (!__ret)                                                     \
-               local_irq_enable();                                     \
+               local_irq_restore(flags);                               \
        __ret;                                                          \
 })
-#define smc_special_lock(lock)         spin_lock_irq(lock)
-#define smc_special_unlock(lock)       spin_unlock_irq(lock)
+#define smc_special_lock(lock, flags)          spin_lock_irq(lock, flags)
+#define smc_special_unlock(lock, flags)        spin_unlock_irqrestore(lock, flags)
 #else
-#define smc_special_trylock(lock)      (1)
-#define smc_special_lock(lock)         do { } while (0)
-#define smc_special_unlock(lock)       do { } while (0)
+#define smc_special_trylock(lock, flags)       (1)
+#define smc_special_lock(lock, flags)          do { } while (0)
+#define smc_special_unlock(lock, flags)        do { } while (0)
 #endif
 
 /*
@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
        struct sk_buff *skb;
        unsigned int packet_no, len;
        unsigned char *buf;
+       unsigned long flags;
 
        DBG(3, "%s: %s\n", dev->name, __func__);
 
-       if (!smc_special_trylock(&lp->lock)) {
+       if (!smc_special_trylock(&lp->lock, flags)) {
                netif_stop_queue(dev);
                tasklet_schedule(&lp->tx_task);
                return;
@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 
        skb = lp->pending_tx_skb;
        if (unlikely(!skb)) {
-               smc_special_unlock(&lp->lock);
+               smc_special_unlock(&lp->lock, flags);
                return;
        }
        lp->pending_tx_skb = NULL;
@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
                printk("%s: Memory allocation failed.\n", dev->name);
                dev->stats.tx_errors++;
                dev->stats.tx_fifo_errors++;
-               smc_special_unlock(&lp->lock);
+               smc_special_unlock(&lp->lock, flags);
                goto done;
        }
 
@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 
        /* queue the packet for TX */
        SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
-       smc_special_unlock(&lp->lock);
+       smc_special_unlock(&lp->lock, flags);
 
        dev->trans_start = jiffies;
        dev->stats.tx_packets++;
@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct smc_local *lp = netdev_priv(dev);
        void __iomem *ioaddr = lp->base;
        unsigned int numPages, poll_count, status;
+       unsigned long flags;
 
        DBG(3, "%s: %s\n", dev->name, __func__);
 
@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return 0;
        }
 
-       smc_special_lock(&lp->lock);
+       smc_special_lock(&lp->lock, flags);
 
        /* now, try to allocate the memory */
        SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        } while (--poll_count);
 
-       smc_special_unlock(&lp->lock);
+       smc_special_unlock(&lp->lock, flags);
 
        lp->pending_tx_skb = skb;
        if (!poll_count) {
index 632fac86a3085ee33713a1fb2f718e50527da053..b3946272c72e75ed4c3fcfebe644c8404a46d19e 100644 (file)
@@ -70,7 +70,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
        int err = 0;
        u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
 
-       if ((key < 0) || (key > 4))
+       if ((key < 0) || (key >= 4))
                return -EINVAL;
 
        err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
index 294250e294dd548bb295c620e6d52c799f03021f..87a95588a8e3c69793ee6c437a3ba242427eed07 100644 (file)
@@ -869,6 +869,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
        priv->aifsn[3] = 3; /* AIFSN[AC_BE] */
        rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
 
+       /* ENEDCA flag must always be set, transmit issues? */
+       rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
+
        return 0;
 }
 
@@ -1173,13 +1176,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
                        rtl818x_iowrite8(priv, &priv->map->BSSID[i],
                                         info->bssid[i]);
 
+               if (priv->is_rtl8187b)
+                       reg = RTL818X_MSR_ENEDCA;
+               else
+                       reg = 0;
+
                if (is_valid_ether_addr(info->bssid)) {
-                       reg = RTL818X_MSR_INFRA;
-                       if (priv->is_rtl8187b)
-                               reg |= RTL818X_MSR_ENEDCA;
+                       reg |= RTL818X_MSR_INFRA;
                        rtl818x_iowrite8(priv, &priv->map->MSR, reg);
                } else {
-                       reg = RTL818X_MSR_NO_LINK;
+                       reg |= RTL818X_MSR_NO_LINK;
                        rtl818x_iowrite8(priv, &priv->map->MSR, reg);
                }
 
index a07580138e8109c10b958498dad13ca1f200ad1c..c2fd6187773f1df97fab29504ca8dbca85a54193 100644 (file)
@@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static int yellowfin_open(struct net_device *dev);
 static void yellowfin_timer(unsigned long data);
 static void yellowfin_tx_timeout(struct net_device *dev);
-static void yellowfin_init_ring(struct net_device *dev);
+static int yellowfin_init_ring(struct net_device *dev);
 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 static int yellowfin_rx(struct net_device *dev);
@@ -573,19 +573,24 @@ static int yellowfin_open(struct net_device *dev)
 {
        struct yellowfin_private *yp = netdev_priv(dev);
        void __iomem *ioaddr = yp->base;
-       int i;
+       int i, ret;
 
        /* Reset the chip. */
        iowrite32(0x80000000, ioaddr + DMACtrl);
 
-       i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
-       if (i) return i;
+       ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+       if (ret)
+               return ret;
 
        if (yellowfin_debug > 1)
                printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
                           dev->name, dev->irq);
 
-       yellowfin_init_ring(dev);
+       ret = yellowfin_init_ring(dev);
+       if (ret) {
+               free_irq(dev->irq, dev);
+               return ret;
+       }
 
        iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
        iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -725,10 +730,10 @@ static void yellowfin_tx_timeout(struct net_device *dev)
 }
 
 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void yellowfin_init_ring(struct net_device *dev)
+static int yellowfin_init_ring(struct net_device *dev)
 {
        struct yellowfin_private *yp = netdev_priv(dev);
-       int i;
+       int i, j;
 
        yp->tx_full = 0;
        yp->cur_rx = yp->cur_tx = 0;
@@ -753,6 +758,11 @@ static void yellowfin_init_ring(struct net_device *dev)
                yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
                        skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
        }
+       if (i != RX_RING_SIZE) {
+               for (j = 0; j < i; j++)
+                       dev_kfree_skb(yp->rx_skbuff[j]);
+               return -ENOMEM;
+       }
        yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
        yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 
@@ -769,8 +779,6 @@ static void yellowfin_init_ring(struct net_device *dev)
        yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 #else
 {
-       int j;
-
        /* Tx ring needs a pair of descriptors, the second for the status. */
        for (i = 0; i < TX_RING_SIZE; i++) {
                j = 2*i;
@@ -805,7 +813,7 @@ static void yellowfin_init_ring(struct net_device *dev)
 }
 #endif
        yp->tx_tail_desc = &yp->tx_status[0];
-       return;
+       return 0;
 }
 
 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
index 82a3191375f5c380f49ce42b13d9364822293959..7eafb8d54470bbcf7633dc3c5780fc0d173ff673 100644 (file)
@@ -61,8 +61,8 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
 }
 
 struct qdisc_watchdog {
-       struct hrtimer  timer;
-       struct Qdisc    *qdisc;
+       struct tasklet_hrtimer  timer;
+       struct Qdisc            *qdisc;
 };
 
 extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
index df30feb2fc725b047ef744273e7c506714bbac61..1b76eb11deb4a2632a9daca75a414a2906123db4 100644 (file)
@@ -319,6 +319,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
                        udelay(USEC_PER_POLL);
                }
+
+               WARN_ONCE(!irqs_disabled(),
+                       "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+                       dev->name, ops->ndo_start_xmit);
+
                local_irq_restore(flags);
        }
 
index caa0278d30a9dbedb6ca0dc172144c7992e2e485..45f9a2a42d564391134797ac5df3e72c59a60621 100644 (file)
@@ -306,8 +306,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                    v4addr != htonl(INADDR_ANY) &&
                    chk_addr_ret != RTN_LOCAL &&
                    chk_addr_ret != RTN_MULTICAST &&
-                   chk_addr_ret != RTN_BROADCAST)
+                   chk_addr_ret != RTN_BROADCAST) {
+                       err = -EADDRNOTAVAIL;
                        goto out;
+               }
        } else {
                if (addr_type != IPV6_ADDR_ANY) {
                        struct net_device *dev = NULL;
index 9208cf5f2bd554fb4d66fba51c327bcc3d7a5aae..c45eee1c0e8d46719df66883caa4d874f806ec5f 100644 (file)
@@ -914,6 +914,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
        struct llc_sock *llc = llc_sk(sk);
        int rc = 0;
 
+       memset(&sllc, 0, sizeof(sllc));
        lock_sock(sk);
        if (sock_flag(sk, SOCK_ZAPPED))
                goto out;
index ce267565e18076ec4a852e52aa54bcdc418c0bbf..659a42d529e3cbcd20100452e018b5584fa970a9 100644 (file)
@@ -67,6 +67,8 @@ static DECLARE_WORK(todo_work, key_todo);
  *
  * @key: key to add to do item for
  * @flag: todo flag(s)
+ *
+ * Must be called with IRQs or softirqs disabled.
  */
 static void add_todo(struct ieee80211_key *key, u32 flag)
 {
@@ -140,9 +142,9 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
        ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf);
 
        if (!ret) {
-               spin_lock(&todo_lock);
+               spin_lock_bh(&todo_lock);
                key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
-               spin_unlock(&todo_lock);
+               spin_unlock_bh(&todo_lock);
        }
 
        if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
@@ -164,12 +166,12 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
        if (!key || !key->local->ops->set_key)
                return;
 
-       spin_lock(&todo_lock);
+       spin_lock_bh(&todo_lock);
        if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
-               spin_unlock(&todo_lock);
+               spin_unlock_bh(&todo_lock);
                return;
        }
-       spin_unlock(&todo_lock);
+       spin_unlock_bh(&todo_lock);
 
        sta = get_sta_for_key(key);
        sdata = key->sdata;
@@ -188,9 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
                       wiphy_name(key->local->hw.wiphy),
                       key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
 
-       spin_lock(&todo_lock);
+       spin_lock_bh(&todo_lock);
        key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-       spin_unlock(&todo_lock);
+       spin_unlock_bh(&todo_lock);
 }
 
 static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
@@ -437,14 +439,14 @@ void ieee80211_key_link(struct ieee80211_key *key,
 
        __ieee80211_key_replace(sdata, sta, old_key, key);
 
-       spin_unlock_irqrestore(&sdata->local->key_lock, flags);
-
        /* free old key later */
        add_todo(old_key, KEY_FLAG_TODO_DELETE);
 
        add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
        if (netif_running(sdata->dev))
                add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
+
+       spin_unlock_irqrestore(&sdata->local->key_lock, flags);
 }
 
 static void __ieee80211_key_free(struct ieee80211_key *key)
@@ -547,7 +549,7 @@ static void __ieee80211_key_todo(void)
         */
        synchronize_rcu();
 
-       spin_lock(&todo_lock);
+       spin_lock_bh(&todo_lock);
        while (!list_empty(&todo_list)) {
                key = list_first_entry(&todo_list, struct ieee80211_key, todo);
                list_del_init(&key->todo);
@@ -558,7 +560,7 @@ static void __ieee80211_key_todo(void)
                                          KEY_FLAG_TODO_HWACCEL_REMOVE |
                                          KEY_FLAG_TODO_DELETE);
                key->flags &= ~todoflags;
-               spin_unlock(&todo_lock);
+               spin_unlock_bh(&todo_lock);
 
                work_done = false;
 
@@ -591,9 +593,9 @@ static void __ieee80211_key_todo(void)
 
                WARN_ON(!work_done);
 
-               spin_lock(&todo_lock);
+               spin_lock_bh(&todo_lock);
        }
-       spin_unlock(&todo_lock);
+       spin_unlock_bh(&todo_lock);
 }
 
 void ieee80211_key_todo(void)
index 98fc190e8f0eed8683e9836b6be1567ecea74382..390b7d09fe512f2957769ace3b8a579b64f64086 100644 (file)
@@ -52,7 +52,7 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
 
        q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
        if (q->master == NULL)
-               return -ENOMEM;
+               return false;
 
        q->master->quota = q->quota;
        return true;
index 24d17ce9c294f384f1a638b7888540455479f3aa..e1c2bf7e9ba44ea70399d719a89b183141f66915 100644 (file)
@@ -468,8 +468,8 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 
 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 {
-       hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-       wd->timer.function = qdisc_watchdog;
+       tasklet_hrtimer_init(&wd->timer, qdisc_watchdog,
+                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        wd->qdisc = qdisc;
 }
 EXPORT_SYMBOL(qdisc_watchdog_init);
@@ -485,13 +485,13 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
        wd->qdisc->flags |= TCQ_F_THROTTLED;
        time = ktime_set(0, 0);
        time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
-       hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
+       tasklet_hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
 }
 EXPORT_SYMBOL(qdisc_watchdog_schedule);
 
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
-       hrtimer_cancel(&wd->timer);
+       tasklet_hrtimer_cancel(&wd->timer);
        wd->qdisc->flags &= ~TCQ_F_THROTTLED;
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
index d5798e17a83205f3a44ac09d3c4b5318801d24d4..81652d6ccd36aa5038a92f14106a65ff65e1a27e 100644 (file)
@@ -163,7 +163,7 @@ struct cbq_sched_data
        psched_time_t           now_rt;         /* Cached real time */
        unsigned                pmask;
 
-       struct hrtimer          delay_timer;
+       struct tasklet_hrtimer  delay_timer;
        struct qdisc_watchdog   watchdog;       /* Watchdog timer,
                                                   started when CBQ has
                                                   backlog, but cannot
@@ -503,6 +503,8 @@ static void cbq_ovl_delay(struct cbq_class *cl)
                cl->undertime = q->now + delay;
 
                if (delay > 0) {
+                       struct hrtimer *ht;
+
                        sched += delay + cl->penalty;
                        cl->penalized = sched;
                        cl->cpriority = TC_CBQ_MAXPRIO;
@@ -510,12 +512,12 @@ static void cbq_ovl_delay(struct cbq_class *cl)
 
                        expires = ktime_set(0, 0);
                        expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
-                       if (hrtimer_try_to_cancel(&q->delay_timer) &&
-                           ktime_to_ns(ktime_sub(
-                                       hrtimer_get_expires(&q->delay_timer),
-                                       expires)) > 0)
-                               hrtimer_set_expires(&q->delay_timer, expires);
-                       hrtimer_restart(&q->delay_timer);
+                       ht = &q->delay_timer.timer;
+                       if (hrtimer_try_to_cancel(ht) &&
+                           ktime_to_ns(ktime_sub(hrtimer_get_expires(ht),
+                                                 expires)) > 0)
+                               hrtimer_set_expires(ht, expires);
+                       hrtimer_restart(ht);
                        cl->delayed = 1;
                        cl->xstats.overactions++;
                        return;
@@ -621,7 +623,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
 
                time = ktime_set(0, 0);
                time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
-               hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
+               tasklet_hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
        }
 
        sch->flags &= ~TCQ_F_THROTTLED;
@@ -1214,7 +1216,7 @@ cbq_reset(struct Qdisc* sch)
        q->tx_class = NULL;
        q->tx_borrowed = NULL;
        qdisc_watchdog_cancel(&q->watchdog);
-       hrtimer_cancel(&q->delay_timer);
+       tasklet_hrtimer_cancel(&q->delay_timer);
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
        q->now_rt = q->now;
@@ -1397,7 +1399,8 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        q->link.minidle = -0x7FFFFFFF;
 
        qdisc_watchdog_init(&q->watchdog, sch);
-       hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       tasklet_hrtimer_init(&q->delay_timer, cbq_undelay,
+                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        q->delay_timer.function = cbq_undelay;
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();