Merge branch 'autofs-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 13 Feb 2013 00:20:07 +0000 (16:20 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 13 Feb 2013 00:20:07 +0000 (16:20 -0800)
Pull hp parisc automounter fix from Helge Deller:
 "This unbreaks automounter support for the parisc architecture (and
  probably aarch64 as well).""

* 'autofs-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  unbreak automounter support on 64-bit kernel with 32-bit userspace (v2)

110 files changed:
Makefile
arch/arm/common/gic.c
arch/arm/include/asm/memory.h
arch/arm/include/asm/smp_scu.h
arch/arm/kernel/smp_scu.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-highbank/sysregs.h
arch/arm/mach-realview/include/mach/irqs-eb.h
arch/arm/mm/dma-mapping.c
arch/m68k/include/asm/processor.h
arch/s390/kernel/time.c
arch/tile/Kconfig
arch/tile/include/asm/io.h
arch/tile/include/asm/irqflags.h
arch/tile/include/uapi/arch/interrupts_32.h
arch/tile/include/uapi/arch/interrupts_64.h
arch/tile/kernel/intvec_64.S
arch/tile/kernel/process.c
arch/tile/kernel/reboot.c
arch/tile/kernel/setup.c
arch/tile/kernel/stack.c
arch/tile/lib/cacheflush.c
arch/tile/lib/cpumask.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
drivers/atm/iphase.h
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon_nflash.c
drivers/bcma/driver_gpio.c
drivers/bcma/main.c
drivers/gpu/drm/nouveau/core/core/falcon.c
drivers/gpu/drm/nouveau/core/core/subdev.c
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/subdev/fb/base.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/input/joystick/analog.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/c_can/c_can.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/tun.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/rtc/rtc-pl031.c
drivers/ssb/driver_gpio.c
drivers/ssb/main.c
drivers/ssb/ssb_private.h
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
include/linux/usb/usbnet.h
include/net/transp_v6.h
kernel/pid.c
mm/memcontrol.c
mm/mlock.c
mm/page_alloc.c
net/bluetooth/hci_conn.c
net/bluetooth/smp.c
net/core/pktgen.c
net/core/skbuff.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ipv6_sockglue.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/openvswitch/vport-netdev.c
net/packet/af_packet.c
net/sched/sch_netem.c
net/sctp/auth.c
net/sctp/endpointola.c
net/sctp/socket.c
net/sunrpc/svcsock.c
net/wireless/scan.c

index 54dfde5e9f9e10dbada7845c0d1d4f974b46d4ab..08ef9bdb80c701c63234156affc2f152f0575b78 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index 36ae03a3f5d1a07527145ff545ded90888ba0a7b..87dfa9026c5bdcb1ba9d57d718d00da671c8fe21 100644 (file)
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
        irq_set_chained_handler(irq, gic_handle_cascade_irq);
 }
 
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+       void __iomem *base = gic_data_dist_base(gic);
+       u32 mask, i;
+
+       for (i = mask = 0; i < 32; i += 4) {
+               mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+               mask |= mask >> 16;
+               mask |= mask >> 8;
+               if (mask)
+                       break;
+       }
+
+       if (!mask)
+               pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+       return mask;
+}
+
 static void __init gic_dist_init(struct gic_chip_data *gic)
 {
        unsigned int i;
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
        /*
         * Set all global interrupts to this CPU only.
         */
-       cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
+       cpumask = gic_get_cpumask(gic);
+       cpumask |= cpumask << 8;
+       cpumask |= cpumask << 16;
        for (i = 32; i < gic_irqs; i += 4)
                writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
         * Get what the GIC says our CPU mask is.
         */
        BUG_ON(cpu >= NR_GIC_CPU_IF);
-       cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
+       cpu_mask = gic_get_cpumask(gic);
        gic_cpu_map[cpu] = cpu_mask;
 
        /*
index 73cf03aa981e1b665d9a40f4f80847335ca49441..1c4df27f93322604398d70f9c585332e97374b10 100644 (file)
@@ -37,7 +37,7 @@
  */
 #define PAGE_OFFSET            UL(CONFIG_PAGE_OFFSET)
 #define TASK_SIZE              (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
-#define TASK_UNMAPPED_BASE     (UL(CONFIG_PAGE_OFFSET) / 3)
+#define TASK_UNMAPPED_BASE     ALIGN(TASK_SIZE / 3, SZ_16M)
 
 /*
  * The maximum size of a 26-bit user space task.
index 4eb6d005ffaa4b625c9f86fc8532b046d0cf1f6a..86dff32a073755f46c40c379d27a29f9aab5b829 100644 (file)
@@ -7,8 +7,14 @@
 
 #ifndef __ASSEMBLER__
 unsigned int scu_get_core_count(void __iomem *);
-void scu_enable(void __iomem *);
 int scu_power_mode(void __iomem *, unsigned int);
+
+#ifdef CONFIG_SMP
+void scu_enable(void __iomem *scu_base);
+#else
+static inline void scu_enable(void __iomem *scu_base) {}
+#endif
+
 #endif
 
 #endif
index b9f015e843d8d10c461197c4399663942fb721e9..45eac87ed66a692859e7431af684f0b33b3ffeed 100644 (file)
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
 int scu_power_mode(void __iomem *scu_base, unsigned int mode)
 {
        unsigned int val;
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
 
        if (mode > 3 || mode == 1 || cpu > 3)
                return -EINVAL;
index 981dc1e1da518b1ddf5f6340085837ad07d7e444..e6c06128293962b24adbe84892e19d4a876ff373 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <asm/arch_timer.h>
 #include <asm/cacheflush.h>
+#include <asm/cputype.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_twd.h>
 #include <asm/hardware/arm_timer.h>
@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void)
 
 void highbank_set_cpu_jump(int cpu, void *jump_addr)
 {
-       cpu = cpu_logical_map(cpu);
+       cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
        writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
        __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
        outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
index 70af9d13fcefefb1fad252c1a8c8a2bffa71ef3b..5995df7f2622eaa20382d7ad0e90483675a6b5eb 100644 (file)
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base;
 
 static inline void highbank_set_core_pwr(void)
 {
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
        if (scu_base_addr)
                scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
        else
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)
 
 static inline void highbank_clear_core_pwr(void)
 {
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
        if (scu_base_addr)
                scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
        else
index d6b5073692d2fdda8db2656a032f374894e57463..44754230fdcc3e23098f1cfcacee1530af508da4 100644 (file)
 /*
  * Only define NR_IRQS if less than NR_IRQS_EB
  */
-#define NR_IRQS_EB             (IRQ_EB_GIC_START + 96)
+#define NR_IRQS_EB             (IRQ_EB_GIC_START + 128)
 
 #if defined(CONFIG_MACH_REALVIEW_EB) \
        && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
index 076c26d43864f8a5b3da49ac7231a62e8edde0c2..dda3904dc64c1cb3f7681cf20998397c1e103f76 100644 (file)
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 
        if (is_coherent || nommu())
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
-       else if (gfp & GFP_ATOMIC)
+       else if (!(gfp & __GFP_WAIT))
                addr = __alloc_from_pool(size, &page);
        else if (!IS_ENABLED(CONFIG_CMA))
                addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
index ae700f49e51da86b8b86b8ce9ee41b0a1908d6b1..b0768a657920f63c51522f2ddb0ef22da0c8dbc3 100644 (file)
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
 #define start_thread(_regs, _pc, _usp)                  \
 do {                                                    \
        (_regs)->pc = (_pc);                            \
-       ((struct switch_stack *)(_regs))[-1].a6 = 0;    \
        setframeformat(_regs);                          \
        if (current->mm)                                \
                (_regs)->d5 = current->mm->start_data;  \
index a5f4f5a1d24b951ac8e7e10cc9af81e153208c63..0aa98db8a80dff08f4ba392139e80eec173bfd62 100644 (file)
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
        nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
        do_div(nsecs, 125);
        S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+       /* Program the maximum value if we have an overflow (== year 2042) */
+       if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+               S390_lowcore.clock_comparator = -1ULL;
        set_clock_comparator(S390_lowcore.clock_comparator);
        return 0;
 }
index 875d008828b8ec619c295cf1b2d5c8923ea0389b..1bb7ad4aeff4754937bdc09f667110d28348fc96 100644 (file)
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "Tilera-specific configuration"
 
 config NR_CPUS
index 2a9b293fece6c00f2c71d8c5829340bb842970e5..31672918064cf781d9cf0197d812237125c4d8d0 100644 (file)
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
 #define iowrite32 writel
 #define iowrite64 writeq
 
-static inline void memset_io(void *dst, int val, size_t len)
+#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
+
+static inline void memset_io(volatile void *dst, int val, size_t len)
 {
        int x;
        BUG_ON((unsigned long)dst & 0x3);
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
                writel(*(u32 *)(src + x), dst + x);
 }
 
+#endif
+
 /*
  * The Tile architecture does not support IOPORT, even with PCI.
  * Unfortunately we can't yet simply not declare these methods,
index b4e96fef2cf8edc0b4931af8edcce502c7bebd4f..241c0bb60b12e5cf4e9eedce50279d648d313e25 100644 (file)
 #include <arch/interrupts.h>
 #include <arch/chip.h>
 
-#if !defined(__tilegx__) && defined(__ASSEMBLY__)
-
 /*
  * The set of interrupts we want to allow when interrupts are nominally
  * disabled.  The remainder are effectively "NMI" interrupts from
  * the point of view of the generic Linux code.  Note that synchronous
  * interrupts (aka "non-queued") are not blocked by the mask in any case.
  */
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
-#else
-#define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT)))
-#endif
-
-#else
-
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS \
-       (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
-#else
 #define LINUX_MASKABLE_INTERRUPTS \
-       (~(INT_MASK(INT_PERF_COUNT)))
-#endif
+       (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
 
+#if CHIP_HAS_SPLIT_INTR_MASK()
+/* The same macro, but for the two 32-bit SPRs separately. */
+#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
+#define LINUX_MASKABLE_INTERRUPTS_HI \
+       (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
 #endif
 
 #ifndef __ASSEMBLY__
  * to know our current state.
  */
 DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
-#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
+#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
 
 /* Disable interrupts. */
 #define arch_local_irq_disable() \
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Prevent the given interrupt from being enabled next time we enable irqs. */
 #define arch_local_irq_mask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
+       (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
 
 /* Prevent the given interrupt from being enabled immediately. */
 #define arch_local_irq_mask_now(interrupt) do { \
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Allow the given interrupt to be enabled next time we enable irqs. */
 #define arch_local_irq_unmask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
+       (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
 
 /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
 #define arch_local_irq_unmask_now(interrupt) do { \
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 /* Disable interrupts. */
 #define IRQ_DISABLE(tmp0, tmp1)                                        \
        {                                                       \
-        movei  tmp0, -1;                                       \
+        movei  tmp0, LINUX_MASKABLE_INTERRUPTS_LO;             \
         moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI)        \
        };                                                      \
        {                                                       \
index 96b5710505b6897b98e9eeb9c6f9317173fd2b23..2efe3f68b2d6a80059acd078d53acea552cf4631 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __ARCH_INTERRUPTS_H__
 #define __ARCH_INTERRUPTS_H__
 
+#ifndef __KERNEL__
 /** Mask for an interrupt. */
 /* Note: must handle breaking interrupts into high and low words manually. */
 #define INT_MASK_LO(intno) (1 << (intno))
@@ -23,6 +24,7 @@
 #ifndef __ASSEMBLER__
 #define INT_MASK(intno) (1ULL << (intno))
 #endif
+#endif
 
 
 /** Where a given interrupt executes */
 
 #ifndef __ASSEMBLER__
 #define QUEUED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define NONQUEUED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define CRITICAL_MASKED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define CRITICAL_UNMASKED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define MASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define UNMASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define SYNC_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define NON_SYNC_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #endif /* !__ASSEMBLER__ */
 #endif /* !__ARCH_INTERRUPTS_H__ */
index 5bb58b2e4e6f84370d04b6d233c212ddc37ddfad..13c9f91823484bf94b28703c3a3cede1e891c344 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __ARCH_INTERRUPTS_H__
 #define __ARCH_INTERRUPTS_H__
 
+#ifndef __KERNEL__
 /** Mask for an interrupt. */
 #ifdef __ASSEMBLER__
 /* Note: must handle breaking interrupts into high and low words manually. */
@@ -22,6 +23,7 @@
 #else
 #define INT_MASK(intno) (1ULL << (intno))
 #endif
+#endif
 
 
 /** Where a given interrupt executes */
 
 #ifndef __ASSEMBLER__
 #define QUEUED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define NONQUEUED_INTERRUPTS ( \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
     0)
 #define CRITICAL_MASKED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
     0)
 #define CRITICAL_UNMASKED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define MASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
     0)
 #define UNMASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define SYNC_INTERRUPTS ( \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
     0)
 #define NON_SYNC_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #endif /* !__ASSEMBLER__ */
 #endif /* !__ARCH_INTERRUPTS_H__ */
index 54bc9a6678e8d6b03c319d56b7b5b234f502fb6e..4ea08090265426df8f5f61dbcfaf6a0c92904694 100644 (file)
@@ -1035,7 +1035,9 @@ handle_syscall:
        /* Ensure that the syscall number is within the legal range. */
        {
         moveli r20, hw2(sys_call_table)
+#ifdef CONFIG_COMPAT
         blbs   r30, .Lcompat_syscall
+#endif
        }
        {
         cmpltu r21, TREG_SYSCALL_NR_NAME, r21
@@ -1093,6 +1095,7 @@ handle_syscall:
         j      .Lresume_userspace   /* jump into middle of interrupt_return */
        }
 
+#ifdef CONFIG_COMPAT
 .Lcompat_syscall:
        /*
         * Load the base of the compat syscall table in r20, and
@@ -1117,6 +1120,7 @@ handle_syscall:
        { move r15, r4; addxi r4, r4, 0 }
        { move r16, r5; addxi r5, r5, 0 }
        j .Lload_syscall_pointer
+#endif
 
 .Linvalid_syscall:
        /* Report an invalid syscall back to the user program */
index 0e5661e7d00d36e770ccebfe752e891fa24e1e6f..caf93ae117930d407bf6a335039817cfdac67408 100644 (file)
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
 int copy_thread(unsigned long clone_flags, unsigned long sp,
                unsigned long arg, struct task_struct *p)
 {
-       struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs();
+       struct pt_regs *childregs = task_pt_regs(p);
        unsigned long ksp;
        unsigned long *callee_regs;
 
index baa3d905fee21c9fc2ef403449f7e4d671ad7e1e..d1b5c913ae724d4a4f794d0590e263d305a4a58a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/reboot.h>
 #include <linux/smp.h>
 #include <linux/pm.h>
+#include <linux/export.h>
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <hv/hypervisor.h>
@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
 
 /* No interesting distinction to be made here. */
 void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
index 6a649a4462d35ee7b70f0b2305e58bd90028ed87..d1e15f7b59c68ab54ee62a137443270287f59f34 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/timex.h>
 #include <linux/hugetlb.h>
 #include <linux/start_kernel.h>
+#include <linux/screen_info.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
 /* Chip information */
 char chip_model[64] __write_once;
 
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
 struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
index b2f44c28dda6f29c1867c59464462c63ac427e6a..ed258b8ae320229f401e7f22a9515ab4b6cda954 100644 (file)
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
                       p->pc, p->sp, p->ex1);
                p = NULL;
        }
-       if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
+       if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
                return p;
        return NULL;
 }
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
 {
        save_stack_trace_tsk(NULL, trace);
 }
+EXPORT_SYMBOL_GPL(save_stack_trace);
 
 #endif
 
index db4fb89e12d89a4461b8cc19c99885469fcaf22c..8f8ad814b1398619314b5a26d62695279d4f63f1 100644 (file)
@@ -12,6 +12,7 @@
  *   more details.
  */
 
+#include <linux/export.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 #include <arch/icache.h>
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
        __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
 #endif
 }
+EXPORT_SYMBOL_GPL(finv_buffer_remote);
index fdc403614d12b1d06a271942b1f846ef3871ac67..75947edccb26625fa8e68fb43fdff54cbd4900f0 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/ctype.h>
 #include <linux/errno.h>
 #include <linux/smp.h>
+#include <linux/export.h>
 
 /*
  * Allow cropping out bits beyond the end of the array.
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
        } while (*bp != '\0' && *bp != '\n');
        return 0;
 }
+EXPORT_SYMBOL(bitmap_parselist_crop);
index dd5f0a33fdaff95d6b53efb947fa86880d92d706..4385cb6fa00ade132dc1ba9fb3fda09fbc775151 100644 (file)
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
 EXPORT_SYMBOL(hv_dev_close);
 EXPORT_SYMBOL(hv_sysconf);
 EXPORT_SYMBOL(hv_confstr);
+EXPORT_SYMBOL(hv_get_rtc);
+EXPORT_SYMBOL(hv_set_rtc);
 
 /* libgcc.a */
 uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
index 5f7868dcd6d482abc42e00b6349f5234d0c63c3b..1ae911939a18bdd6050e9a928b099471efeb7599 100644 (file)
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
                __set_pte(ptep, pte_set_home(pteval, home));
        }
 }
+EXPORT_SYMBOL(homecache_change_page_home);
 
 struct page *homecache_alloc_pages(gfp_t gfp_mask,
                                   unsigned int order, int home)
index 6a0955e6d4fc92231e59d36e0849dd46c9e0e60b..53ecac5a2161d729fc957b8b4379a526ca89e0d3 100644 (file)
@@ -636,82 +636,82 @@ struct rx_buf_desc {
 #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE  
 #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE  
 
-typedef volatile u_int  freg_t;
+typedef volatile u_int ffreg_t;
 typedef u_int   rreg_t;
 
 typedef struct _ffredn_t {
-        freg_t  idlehead_high;  /* Idle cell header (high)              */
-        freg_t  idlehead_low;   /* Idle cell header (low)               */
-        freg_t  maxrate;        /* Maximum rate                         */
-        freg_t  stparms;        /* Traffic Management Parameters        */
-        freg_t  abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
-        freg_t  rm_type;        /*                                      */
-        u_int   filler5[0x17 - 0x06];
-        freg_t  cmd_reg;        /* Command register                     */
-        u_int   filler18[0x20 - 0x18];
-        freg_t  cbr_base;       /* CBR Pointer Base                     */
-        freg_t  vbr_base;       /* VBR Pointer Base                     */
-        freg_t  abr_base;       /* ABR Pointer Base                     */
-        freg_t  ubr_base;       /* UBR Pointer Base                     */
-        u_int   filler24;
-        freg_t  vbrwq_base;     /* VBR Wait Queue Base                  */
-        freg_t  abrwq_base;     /* ABR Wait Queue Base                  */
-        freg_t  ubrwq_base;     /* UBR Wait Queue Base                  */
-        freg_t  vct_base;       /* Main VC Table Base                   */
-        freg_t  vcte_base;      /* Extended Main VC Table Base          */
-        u_int   filler2a[0x2C - 0x2A];
-        freg_t  cbr_tab_beg;    /* CBR Table Begin                      */
-        freg_t  cbr_tab_end;    /* CBR Table End                        */
-        freg_t  cbr_pointer;    /* CBR Pointer                          */
-        u_int   filler2f[0x30 - 0x2F];
-        freg_t  prq_st_adr;     /* Packet Ready Queue Start Address     */
-        freg_t  prq_ed_adr;     /* Packet Ready Queue End Address       */
-        freg_t  prq_rd_ptr;     /* Packet Ready Queue read pointer      */
-        freg_t  prq_wr_ptr;     /* Packet Ready Queue write pointer     */
-        freg_t  tcq_st_adr;     /* Transmit Complete Queue Start Address*/
-        freg_t  tcq_ed_adr;     /* Transmit Complete Queue End Address  */
-        freg_t  tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
-        freg_t  tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
-        u_int   filler38[0x40 - 0x38];
-        freg_t  queue_base;     /* Base address for PRQ and TCQ         */
-        freg_t  desc_base;      /* Base address of descriptor table     */
-        u_int   filler42[0x45 - 0x42];
-        freg_t  mode_reg_0;     /* Mode register 0                      */
-        freg_t  mode_reg_1;     /* Mode register 1                      */
-        freg_t  intr_status_reg;/* Interrupt Status register            */
-        freg_t  mask_reg;       /* Mask Register                        */
-        freg_t  cell_ctr_high1; /* Total cell transfer count (high)     */
-        freg_t  cell_ctr_lo1;   /* Total cell transfer count (low)      */
-        freg_t  state_reg;      /* Status register                      */
-        u_int   filler4c[0x58 - 0x4c];
-        freg_t  curr_desc_num;  /* Contains the current descriptor num  */
-        freg_t  next_desc;      /* Next descriptor                      */
-        freg_t  next_vc;        /* Next VC                              */
-        u_int   filler5b[0x5d - 0x5b];
-        freg_t  present_slot_cnt;/* Present slot count                  */
-        u_int   filler5e[0x6a - 0x5e];
-        freg_t  new_desc_num;   /* New descriptor number                */
-        freg_t  new_vc;         /* New VC                               */
-        freg_t  sched_tbl_ptr;  /* Schedule table pointer               */
-        freg_t  vbrwq_wptr;     /* VBR wait queue write pointer         */
-        freg_t  vbrwq_rptr;     /* VBR wait queue read pointer          */
-        freg_t  abrwq_wptr;     /* ABR wait queue write pointer         */
-        freg_t  abrwq_rptr;     /* ABR wait queue read pointer          */
-        freg_t  ubrwq_wptr;     /* UBR wait queue write pointer         */
-        freg_t  ubrwq_rptr;     /* UBR wait queue read pointer          */
-        freg_t  cbr_vc;         /* CBR VC                               */
-        freg_t  vbr_sb_vc;      /* VBR SB VC                            */
-        freg_t  abr_sb_vc;      /* ABR SB VC                            */
-        freg_t  ubr_sb_vc;      /* UBR SB VC                            */
-        freg_t  vbr_next_link;  /* VBR next link                        */
-        freg_t  abr_next_link;  /* ABR next link                        */
-        freg_t  ubr_next_link;  /* UBR next link                        */
-        u_int   filler7a[0x7c-0x7a];
-        freg_t  out_rate_head;  /* Out of rate head                     */
-        u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
-        freg_t  cell_ctr_high1_nc;/* Total cell transfer count (high)   */
-        freg_t  cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
-        u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
+       ffreg_t idlehead_high;  /* Idle cell header (high)              */
+       ffreg_t idlehead_low;   /* Idle cell header (low)               */
+       ffreg_t maxrate;        /* Maximum rate                         */
+       ffreg_t stparms;        /* Traffic Management Parameters        */
+       ffreg_t abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
+       ffreg_t rm_type;        /*                                      */
+       u_int   filler5[0x17 - 0x06];
+       ffreg_t cmd_reg;        /* Command register                     */
+       u_int   filler18[0x20 - 0x18];
+       ffreg_t cbr_base;       /* CBR Pointer Base                     */
+       ffreg_t vbr_base;       /* VBR Pointer Base                     */
+       ffreg_t abr_base;       /* ABR Pointer Base                     */
+       ffreg_t ubr_base;       /* UBR Pointer Base                     */
+       u_int   filler24;
+       ffreg_t vbrwq_base;     /* VBR Wait Queue Base                  */
+       ffreg_t abrwq_base;     /* ABR Wait Queue Base                  */
+       ffreg_t ubrwq_base;     /* UBR Wait Queue Base                  */
+       ffreg_t vct_base;       /* Main VC Table Base                   */
+       ffreg_t vcte_base;      /* Extended Main VC Table Base          */
+       u_int   filler2a[0x2C - 0x2A];
+       ffreg_t cbr_tab_beg;    /* CBR Table Begin                      */
+       ffreg_t cbr_tab_end;    /* CBR Table End                        */
+       ffreg_t cbr_pointer;    /* CBR Pointer                          */
+       u_int   filler2f[0x30 - 0x2F];
+       ffreg_t prq_st_adr;     /* Packet Ready Queue Start Address     */
+       ffreg_t prq_ed_adr;     /* Packet Ready Queue End Address       */
+       ffreg_t prq_rd_ptr;     /* Packet Ready Queue read pointer      */
+       ffreg_t prq_wr_ptr;     /* Packet Ready Queue write pointer     */
+       ffreg_t tcq_st_adr;     /* Transmit Complete Queue Start Address*/
+       ffreg_t tcq_ed_adr;     /* Transmit Complete Queue End Address  */
+       ffreg_t tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
+       ffreg_t tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
+       u_int   filler38[0x40 - 0x38];
+       ffreg_t queue_base;     /* Base address for PRQ and TCQ         */
+       ffreg_t desc_base;      /* Base address of descriptor table     */
+       u_int   filler42[0x45 - 0x42];
+       ffreg_t mode_reg_0;     /* Mode register 0                      */
+       ffreg_t mode_reg_1;     /* Mode register 1                      */
+       ffreg_t intr_status_reg;/* Interrupt Status register            */
+       ffreg_t mask_reg;       /* Mask Register                        */
+       ffreg_t cell_ctr_high1; /* Total cell transfer count (high)     */
+       ffreg_t cell_ctr_lo1;   /* Total cell transfer count (low)      */
+       ffreg_t state_reg;      /* Status register                      */
+       u_int   filler4c[0x58 - 0x4c];
+       ffreg_t curr_desc_num;  /* Contains the current descriptor num  */
+       ffreg_t next_desc;      /* Next descriptor                      */
+       ffreg_t next_vc;        /* Next VC                              */
+       u_int   filler5b[0x5d - 0x5b];
+       ffreg_t present_slot_cnt;/* Present slot count                  */
+       u_int   filler5e[0x6a - 0x5e];
+       ffreg_t new_desc_num;   /* New descriptor number                */
+       ffreg_t new_vc;         /* New VC                               */
+       ffreg_t sched_tbl_ptr;  /* Schedule table pointer               */
+       ffreg_t vbrwq_wptr;     /* VBR wait queue write pointer         */
+       ffreg_t vbrwq_rptr;     /* VBR wait queue read pointer          */
+       ffreg_t abrwq_wptr;     /* ABR wait queue write pointer         */
+       ffreg_t abrwq_rptr;     /* ABR wait queue read pointer          */
+       ffreg_t ubrwq_wptr;     /* UBR wait queue write pointer         */
+       ffreg_t ubrwq_rptr;     /* UBR wait queue read pointer          */
+       ffreg_t cbr_vc;         /* CBR VC                               */
+       ffreg_t vbr_sb_vc;      /* VBR SB VC                            */
+       ffreg_t abr_sb_vc;      /* ABR SB VC                            */
+       ffreg_t ubr_sb_vc;      /* UBR SB VC                            */
+       ffreg_t vbr_next_link;  /* VBR next link                        */
+       ffreg_t abr_next_link;  /* ABR next link                        */
+       ffreg_t ubr_next_link;  /* UBR next link                        */
+       u_int   filler7a[0x7c-0x7a];
+       ffreg_t out_rate_head;  /* Out of rate head                     */
+       u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
+       ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high)   */
+       ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
+       u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
 } ffredn_t;
 
 typedef struct _rfredn_t {
index 19e3fbfd5757368790980dfec756135e981b0eaf..cb0c4548857282c3bc6b38cfa836ad9474370604 100644 (file)
@@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 /* driver_gpio.c */
 int bcma_gpio_init(struct bcma_drv_cc *cc);
+int bcma_gpio_unregister(struct bcma_drv_cc *cc);
 #else
 static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
 {
        return -ENOTSUPP;
 }
+static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+       return 0;
+}
 #endif /* CONFIG_BCMA_DRIVER_GPIO */
 
 #endif
index dbda91e4dff5189ccabbb710b7a2a11f0e099651..1f0b83e18f6827f5003669b4015c2a89b2d369c5 100644 (file)
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
        struct bcma_bus *bus = cc->core->bus;
 
        if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
-           cc->core->id.rev != 0x38) {
+           cc->core->id.rev != 38) {
                bcma_err(bus, "NAND flash on unsupported board!\n");
                return -ENOTSUPP;
        }
index 9a6f585da2d9f21d232ac4b58cc3a36b442d9b36..71f755c06fc6637497665f31d15f6432d49c3075 100644 (file)
@@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
 
        return gpiochip_add(chip);
 }
+
+int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+       return gpiochip_remove(&cc->gpio);
+}
index 4a92f647b58bdef4fa28f28b772d960036469c78..324f9debda88007ac096320dae35340db99e3fe5 100644 (file)
@@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)
 void bcma_bus_unregister(struct bcma_bus *bus)
 {
        struct bcma_device *cores[3];
+       int err;
+
+       err = bcma_gpio_unregister(&bus->drv_cc);
+       if (err == -EBUSY)
+               bcma_err(bus, "Some GPIOs are still in use.\n");
+       else if (err)
+               bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
 
        cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
        cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
index 6b0843c338775d6455322a099f568a4390d8db64..e05c15777588616f69b753e915c7c8bcb59a922f 100644 (file)
@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)
        nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
 
        /* wait for 'uc halted' to be signalled before continuing */
-       if (falcon->secret) {
-               nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+       if (falcon->secret && falcon->version < 4) {
+               if (!falcon->version)
+                       nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+               else
+                       nv_wait(falcon, 0x180, 0x80000000, 0);
                nv_wo32(falcon, 0x004, 0x00000010);
        }
 
index f74c30aa33a0e334eda733ae1a0ea5b9a3d10377..48f06378d3f9d5f9e09e03291a11d6e26527f674 100644 (file)
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       mutex_init(&subdev->mutex);
+       __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
        subdev->name = subname;
 
        if (parent) {
index 5982935ee23a96896fb12d98e2dc5f723ab245e0..106bb19fdd9a65ee12b0c92241ea5045a4b692ac 100644 (file)
@@ -50,10 +50,13 @@ int  nouveau_object_fini(struct nouveau_object *, bool suspend);
 
 extern struct nouveau_ofuncs nouveau_object_ofuncs;
 
+/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
+ * ".data". */
 struct nouveau_oclass {
        u32 handle;
-       struct nouveau_ofuncs *ofuncs;
-       struct nouveau_omthds *omthds;
+       struct nouveau_ofuncs * const ofuncs;
+       struct nouveau_omthds * const omthds;
+       struct lock_class_key lock_class_key;
 };
 
 #define nv_oclass(o)    nv_object(o)->oclass
index d6d16007ec1ac08e0e88ca35e8c4e8d20ae4e2f9..d62045f454b265df1c07ebcc9cd77207482b7dea 100644 (file)
@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)
                        return ret;
        }
 
-       if (!nouveau_mm_initialised(&pfb->tags) && tags) {
-               ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+       if (!nouveau_mm_initialised(&pfb->tags)) {
+               ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
                if (ret)
                        return ret;
        }
index 487cb8c6c204dda270d1957e869b42beae4fdccd..eac236ed19b251042ea3bfe1ef75d6da6856d188 100644 (file)
@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
        struct nouveau_bios *bios = nouveau_bios(device);
        const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
        const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-       u32 size;
+       u32 size, tags = 0;
        int ret;
 
        pfb->ram.size = nv_rd32(pfb, 0x10020c);
@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
                        return ret;
 
                pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+               tags = nv_rd32(pfb, 0x100320);
                break;
        }
 
-       return nv_rd32(pfb, 0x100320);
+       return tags;
 }
 
 static int
index 69d7b1d0b9d69e2b24f27be89cc7f869669bd4a3..1699a9083a2f86b179d0b4eba8ea1029fd52b97d 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <core/engine.h>
+#include <linux/swiotlb.h>
 
 #include <subdev/fb.h>
 #include <subdev/vm.h>
index 8b090f1eb51d7f3ebf284814709ef1f0c472a774..5e7aef23825a0d33f08addfdcb97cc8e663f62b8 100644 (file)
@@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        return 0;
 }
 
+static struct lock_class_key drm_client_lock_class_key;
+
 static int
 nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 {
@@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
        if (ret)
                return ret;
+       lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
 
        dev->dev_private = drm;
        drm->dev = dev;
index 7a445666e71f1221ca92cd07bcdea61b29867ded..ee4cff534f100dae84d64bff2122b9b2bc89a9aa 100644 (file)
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                return -EINVAL;
                        }
                        if (tiled) {
-                               dst_offset = ib[idx+1];
+                               dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                p->idx += count + 7;
                        } else {
-                               dst_offset = ib[idx+1];
-                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+                               dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                /* detile bit */
                                                if (idx_value & (1 << 31)) {
                                                        /* tiled src, linear dst */
-                                                       src_offset = ib[idx+1];
+                                                       src_offset = radeon_get_ib_value(p, idx+1);
                                                        src_offset <<= 8;
                                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                                       dst_offset = ib[idx+7];
-                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       dst_offset = radeon_get_ib_value(p, idx+7);
+                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                                } else {
                                                        /* linear src, tiled dst */
-                                                       src_offset = ib[idx+7];
-                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       src_offset = radeon_get_ib_value(p, idx+7);
+                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                                       dst_offset = ib[idx+1];
+                                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                                        dst_offset <<= 8;
                                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                                }
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                /* detile bit */
                                                if (idx_value & (1 << 31)) {
                                                        /* tiled src, linear dst */
-                                                       src_offset = ib[idx+1];
+                                                       src_offset = radeon_get_ib_value(p, idx+1);
                                                        src_offset <<= 8;
                                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                                       dst_offset = ib[idx+7];
-                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       dst_offset = radeon_get_ib_value(p, idx+7);
+                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                                } else {
                                                        /* linear src, tiled dst */
-                                                       src_offset = ib[idx+7];
-                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       src_offset = radeon_get_ib_value(p, idx+7);
+                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                                       dst_offset = ib[idx+1];
+                                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                                        dst_offset <<= 8;
                                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                                }
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        switch (misc) {
                                        case 0:
                                                /* L2L, byte */
-                                               src_offset = ib[idx+2];
-                                               src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                               dst_offset = ib[idx+1];
-                                               dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+2);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
+                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
                                                if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
                                                                 src_offset + count, radeon_bo_size(src_reloc->robj));
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
-                                               dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                               dst2_offset = ib[idx+2];
-                                               dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
-                                               src_offset = ib[idx+3];
-                                               src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
+                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
+                                               dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+3);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        }
                                } else {
                                        /* L2L, dw */
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
                                        if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
                                                         src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
                                return -EINVAL;
                        }
-                       dst_offset = ib[idx+1];
-                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       dst_offset = radeon_get_ib_value(p, idx+1);
+                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
                                dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
                                         dst_offset, radeon_bo_size(dst_reloc->robj));
index 69ec24ab8d636453dfa7e2ab313588786341c977..9b2512bf1a46638f870b5967a45877455fc48c0f 100644 (file)
@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                return -EINVAL;
                        }
                        if (tiled) {
-                               dst_offset = ib[idx+1];
+                               dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                p->idx += count + 5;
                        } else {
-                               dst_offset = ib[idx+1];
-                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+                               dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                /* detile bit */
                                if (idx_value & (1 << 31)) {
                                        /* tiled src, linear dst */
-                                       src_offset = ib[idx+1];
+                                       src_offset = radeon_get_ib_value(p, idx+1);
                                        src_offset <<= 8;
                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                       dst_offset = ib[idx+5];
-                                       dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+5);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                        ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                } else {
                                        /* linear src, tiled dst */
-                                       src_offset = ib[idx+5];
-                                       src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+5);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                        ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                       dst_offset = ib[idx+1];
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset <<= 8;
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                }
                                p->idx += 7;
                        } else {
                                if (p->family >= CHIP_RV770) {
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
 
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                        ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
                                        p->idx += 5;
                                } else {
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
 
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                DRM_ERROR("bad DMA_PACKET_WRITE\n");
                                return -EINVAL;
                        }
-                       dst_offset = ib[idx+1];
-                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       dst_offset = radeon_get_ib_value(p, idx+1);
+                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
                                dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
                                         dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
index 1d8ff2f850ba0d6d4a4a47c68913bc4cbb4392e7..93f760e27a9200a81b94dfee8ce6d14ac935b49a 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/radeon_drm.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/swiotlb.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 
index 44420fca7dfa1c1dc13d66b709e2f66496aada15..8be35c809c7b612c5a33bc5271051273bd58cd92 100644 (file)
@@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_driver *driver = bdev->driver;
 
-       fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+       fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
        if (!fbo)
                return -ENOMEM;
 
@@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        fbo->vm_node = NULL;
        atomic_set(&fbo->cpu_writers, 0);
 
-       fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       spin_lock(&bdev->fence_lock);
+       if (bo->sync_obj)
+               fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       else
+               fbo->sync_obj = NULL;
+       spin_unlock(&bdev->fence_lock);
        kref_init(&fbo->list_kref);
        kref_init(&fbo->kref);
        fbo->destroy = &ttm_transfered_destroy;
@@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                 */
 
                set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-
-               /* ttm_buffer_object_transfer accesses bo->sync_obj */
-               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                spin_unlock(&bdev->fence_lock);
                if (tmp_obj)
                        driver->sync_obj_unref(&tmp_obj);
 
+               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                if (ret)
                        return ret;
 
index 358cd7ee905b7ff4f9a7498e277341037437bf19..7cd74e29cbc87a6495277ecd74d7135ebcd75ad3 100644 (file)
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "PCC"
-#elif defined(CONFIG_MN10300)
+#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x, y)    ((x) - (y))
 #define TIME_NAME      "TSC"
index 1877ed7ca0864ed4adfb614de862ed40dde730bd..1c9e09fbdff8346e99413e0313d03ed401f463e2 100644 (file)
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
                pr_info("%s: Setting primary slave to None.\n",
                        bond->dev->name);
                bond->primary_slave = NULL;
+               memset(bond->params.primary, 0, sizeof(bond->params.primary));
                bond_select_active_slave(bond);
                goto out;
        }
index 58607f196c9ee5e5c3d214c70ca9abe0e2a5b6d0..2282b1ae97653d825fe6f37c2b22cf7ecd37a945 100644 (file)
@@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
 
        priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
                        IFX_WRITE_LOW_16BIT(mask));
+
+       /* According to C_CAN documentation, the reserved bit
+        * in IFx_MASK2 register is fixed 1
+        */
        priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
-                       IFX_WRITE_HIGH_16BIT(mask));
+                       IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
 
        priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
                        IFX_WRITE_LOW_16BIT(id));
index 4eba17b83ba8dae6ce466dd3eecbc957a1c25d0d..f1b3df167ff29a8c68fccce991e43f714d82daf3 100644 (file)
 
 #define DRV_VER                        "4.4.161.0u"
 #define DRV_NAME               "be2net"
-#define BE_NAME                        "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME               "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME                        "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME                        "Emulex BladeEngine2"
+#define BE3_NAME               "Emulex BladeEngine3"
+#define OC_NAME                        "Emulex OneConnect"
 #define OC_NAME_BE             OC_NAME "(be3)"
 #define OC_NAME_LANCER         OC_NAME "(Lancer)"
 #define OC_NAME_SH             OC_NAME "(Skyhawk)"
-#define DRV_DESC               "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC               "Emulex OneConnect 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID           0x19a2
 #define EMULEX_VENDOR_ID       0x10df
index 5c995700e53440331a9fe4cef3a8f68886fae466..4d6f3c54427a903f34189fb7abe74795028fb46f 100644 (file)
@@ -25,7 +25,7 @@
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 
 static unsigned int num_vfs;
index 02a12b69555f6fb5d71fe2089992f5fad5e354cc..4dab6fc265a27450a26664e1417c804961358a4a 100644 (file)
 #define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
 #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
 #define E1000_CTRL_LANPHYPC_VALUE    0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE     0x00080000  /* Memory Error Handling Enable */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
 #define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
 
 #define E1000_PBS_16K E1000_PBA_16K
 
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK       0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK     0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT    8
+#define E1000_PBECCSTS_ECC_ENABLE              0x00010000
+
 #define IFS_MAX       80
 #define IFS_MIN       40
 #define IFS_RATIO     4
 #define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
 #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
 #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */
 #define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
 #define E1000_ICR_RXQ0          0x00100000 /* Rx Queue 0 Interrupt */
 #define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_ECCER     E1000_ICR_ECCER     /* Uncorrectable ECC Error */
 #define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */
 #define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */
 #define E1000_IMS_TXQ0      E1000_ICR_TXQ0      /* Tx Queue 0 Interrupt */
index 6782a2eea1bcbfb893681b8d26428d8b0410981b..7e95f221d60b15e58b17d177ee870ecdfd2b0352 100644 (file)
@@ -309,6 +309,8 @@ struct e1000_adapter {
 
        struct napi_struct napi;
 
+       unsigned int uncorr_errors;     /* uncorrectable ECC errors */
+       unsigned int corr_errors;       /* correctable ECC errors */
        unsigned int restart_queue;
        u32 txd_cmd;
 
index f95bc6ee1c227c89a2e158fde491dc03ef173b4b..fd4772a2691c510fb83e5e2c9b9211c55f116465 100644 (file)
@@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        E1000_STAT("dropped_smbus", stats.mgpdc),
        E1000_STAT("rx_dma_failed", rx_dma_failed),
        E1000_STAT("tx_dma_failed", tx_dma_failed),
+       E1000_STAT("uncorr_ecc_errors", uncorr_errors),
+       E1000_STAT("corr_ecc_errors", corr_errors),
 };
 
 #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
index cf217777586c246561120c0877516f22d3ff6b89..b88676ff3d86d19f365f94e4a9cc973c7ea3f092 100644 (file)
@@ -77,6 +77,7 @@ enum e1e_registers {
 #define E1000_POEMB    E1000_PHY_CTRL  /* PHY OEM Bits */
        E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
        E1000_PBS      = 0x01008, /* Packet Buffer Size */
+       E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
        E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
        E1000_EEWR     = 0x0102C, /* EEPROM Write Register - RW */
        E1000_FLOP     = 0x0103C, /* FLASH Opcode Register */
index 976336547607e31caef68c8eaf528ddac78682da..24d9f61956f095ccfd5937ebba5ed35e8c2dc5d0 100644 (file)
@@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
        if (hw->mac.type == e1000_ich8lan)
                reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
        ew32(RFCTL, reg);
+
+       /* Enable ECC on Lynxpoint */
+       if (hw->mac.type == e1000_pch_lpt) {
+               reg = er32(PBECCSTS);
+               reg |= E1000_PBECCSTS_ECC_ENABLE;
+               ew32(PBECCSTS, reg);
+
+               reg = er32(CTRL);
+               reg |= E1000_CTRL_MEHE;
+               ew32(CTRL, reg);
+       }
 }
 
 /**
index fbf75fdca99422743f35d3d9a114fcdef235563f..643c883dd795a48384c95dcd0795f3bbe95f6291 100644 (file)
@@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+       /* Reset on uncorrectable ECC error */
+       if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+               u32 pbeccsts = er32(PBECCSTS);
+
+               adapter->corr_errors +=
+                   pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+               adapter->uncorr_errors +=
+                   (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+                   E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+               /* Do the reset outside of interrupt context */
+               schedule_work(&adapter->reset_task);
+
+               /* return immediately since reset is imminent */
+               return IRQ_HANDLED;
+       }
+
        if (napi_schedule_prep(&adapter->napi)) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
@@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+       /* Reset on uncorrectable ECC error */
+       if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+               u32 pbeccsts = er32(PBECCSTS);
+
+               adapter->corr_errors +=
+                   pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+               adapter->uncorr_errors +=
+                   (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+                   E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+               /* Do the reset outside of interrupt context */
+               schedule_work(&adapter->reset_task);
+
+               /* return immediately since reset is imminent */
+               return IRQ_HANDLED;
+       }
+
        if (napi_schedule_prep(&adapter->napi)) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
@@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
        if (adapter->msix_entries) {
                ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
                ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+       } else if (hw->mac.type == e1000_pch_lpt) {
+               ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
        } else {
                ew32(IMS, IMS_ENABLE_MASK);
        }
@@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.mgptc += er32(MGTPTC);
        adapter->stats.mgprc += er32(MGTPRC);
        adapter->stats.mgpdc += er32(MGTPDC);
+
+       /* Correctable ECC Errors */
+       if (hw->mac.type == e1000_pch_lpt) {
+               u32 pbeccsts = er32(PBECCSTS);
+               adapter->corr_errors +=
+                   pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+               adapter->uncorr_errors +=
+                   (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+                   E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+       }
 }
 
 /**
index 7992b3e05d3dd5a48f7701114a2264d23f3ce4e8..78ace59efd297531001c171d47351c2fe9bb8ef9 100644 (file)
@@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
                                         rp->tx_skbuff[entry]->len,
                                         PCI_DMA_TODEVICE);
                }
-               dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+               dev_kfree_skb(rp->tx_skbuff[entry]);
                rp->tx_skbuff[entry] = NULL;
                entry = (++rp->dirty_tx) % TX_RING_SIZE;
        }
@@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
        if (intr_status & IntrPCIErr)
                netif_warn(rp, hw, dev, "PCI error\n");
 
-       napi_disable(&rp->napi);
-       rhine_irq_disable(rp);
-       /* Slow and safe. Consider __napi_schedule as a replacement ? */
-       napi_enable(&rp->napi);
-       napi_schedule(&rp->napi);
+       iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
 
 out_unlock:
        mutex_unlock(&rp->task_lock);
index cc09b67c23bcd30402d73a93594f7aef9cb100da..2917a86f4c43ec44a3e18566acd7dd00d071660a 100644 (file)
@@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
 }
 
 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
-                           u16 queue_index)
+                           struct tun_file *tfile)
 {
        struct hlist_head *head;
        struct tun_flow_entry *e;
        unsigned long delay = tun->ageing_time;
+       u16 queue_index = tfile->queue_index;
 
        if (!rxhash)
                return;
@@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 
        rcu_read_lock();
 
-       if (tun->numqueues == 1)
+       /* We may get a very small possibility of OOO during switching, not
+        * worth to optimize.*/
+       if (tun->numqueues == 1 || tfile->detached)
                goto unlock;
 
        e = tun_flow_find(head, rxhash);
@@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
 
        tun = rtnl_dereference(tfile->tun);
 
-       if (tun) {
+       if (tun && !tfile->detached) {
                u16 index = tfile->queue_index;
                BUG_ON(index >= tun->numqueues);
                dev = tun->dev;
 
                rcu_assign_pointer(tun->tfiles[index],
                                   tun->tfiles[tun->numqueues - 1]);
-               rcu_assign_pointer(tfile->tun, NULL);
                ntfile = rtnl_dereference(tun->tfiles[index]);
                ntfile->queue_index = index;
 
                --tun->numqueues;
-               if (clean)
+               if (clean) {
+                       rcu_assign_pointer(tfile->tun, NULL);
                        sock_put(&tfile->sk);
-               else
+               else
                        tun_disable_queue(tun, tfile);
 
                synchronize_net();
@@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        }
 
        if (clean) {
-               if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
-                   !(tun->flags & TUN_PERSIST))
-                       if (tun->dev->reg_state == NETREG_REGISTERED)
+               if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+                       netif_carrier_off(tun->dev);
+
+                       if (!(tun->flags & TUN_PERSIST) &&
+                           tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
+               }
 
                BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
                                 &tfile->socket.flags));
@@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
                rcu_assign_pointer(tfile->tun, NULL);
                --tun->numqueues;
        }
+       list_for_each_entry(tfile, &tun->disabled, next) {
+               wake_up_all(&tfile->wq.wait);
+               rcu_assign_pointer(tfile->tun, NULL);
+       }
        BUG_ON(tun->numqueues != 0);
 
        synchronize_net();
@@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
                goto out;
 
        err = -EINVAL;
-       if (rtnl_dereference(tfile->tun))
+       if (rtnl_dereference(tfile->tun) && !tfile->detached)
                goto out;
 
        err = -EBUSY;
@@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        tun->dev->stats.rx_packets++;
        tun->dev->stats.rx_bytes += len;
 
-       tun_flow_update(tun, rxhash, tfile->queue_index);
+       tun_flow_update(tun, rxhash, tfile);
        return total_len;
 }
 
@@ -1658,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
                        pr_err("Failed to create tun sysfs files\n");
-
-               netif_carrier_on(tun->dev);
        }
 
+       netif_carrier_on(tun->dev);
+
        tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
        if (ifr->ifr_flags & IFF_NO_PI)
@@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = tun_attach(tun, file);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
-               if (!tun || !(tun->flags & TUN_TAP_MQ))
+               if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
                        ret = -EINVAL;
                else
                        __tun_detach(tfile, false);
index 9197b2c72ca36dd9e62bbccafbea388d5fc99793..00d3b2d37828f3f81afb6705965e80e128e7bb3d 100644 (file)
@@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
          .driver_info = (unsigned long)&wwan_info,
        },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+         .driver_info = (unsigned long)&wwan_info,
+       },
 
        /* Infineon(now Intel) HSPA Modem platform */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
index 575a5839ee3458434d355677d785589d3472395d..c8e05e27f38cdca6f5aeb4c30d479e691f15a55c 100644 (file)
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
 
        /* 2. Combined interface devices matching on class+protocol */
        {       /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
+       {       /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
        {       /* Pantech UML290, P4200 and more */
                USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
                .driver_info        = (unsigned long)&qmi_wwan_info,
@@ -461,6 +473,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index f34b2ebee815ff6b832ef280e26ce7039ef6bfd6..5e33606c136639f9b9ad7cf38e7ed8c48c7a51eb 100644 (file)
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
        unsigned long           lockflags;
        size_t                  size = dev->rx_urb_size;
 
+       /* prevent rx skb allocation when error ratio is high */
+       if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+               usb_free_urb(urb);
+               return -ENOLINK;
+       }
+
        skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
                break;
        }
 
+       /* stop rx if packet error rate is high */
+       if (++dev->pkt_cnt > 30) {
+               dev->pkt_cnt = 0;
+               dev->pkt_err = 0;
+       } else {
+               if (state == rx_cleanup)
+                       dev->pkt_err++;
+               if (dev->pkt_err > 20)
+                       set_bit(EVENT_RX_KILL, &dev->flags);
+       }
+
        state = defer_bh(dev, skb, &dev->rxq, state);
 
        if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
                   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
                   "simple");
 
+       /* reset rx error state */
+       dev->pkt_cnt = 0;
+       dev->pkt_err = 0;
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // delay posting reads until we're fully open
        tasklet_schedule (&dev->bh);
        if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        if (info->tx_fixup) {
                skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
                if (!skb) {
-                       if (netif_msg_tx_err(dev)) {
-                               netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
-                               goto drop;
-                       } else {
-                               /* cdc_ncm collected packet; waits for more */
+                       /* packet collected; minidriver waiting for more */
+                       if (info->flags & FLAG_MULTI_PACKET)
                                goto not_drop;
-                       }
+                       netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+                       goto drop;
                }
        }
        length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
                }
        }
 
+       /* restart RX again after disabling due to high error rate */
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // waiting for all pending urbs to complete?
        if (dev->wait) {
                if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
index dc8913c6238c965b0905ef7ad6784bf669f06388..12c6440d16499ebd4a1faaa4e07edc8658b5f198 100644 (file)
@@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
        if (ret & 1) { /* Link is up. */
                printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
                       adapter->netdev->name, adapter->link_speed);
-               if (!netif_carrier_ok(adapter->netdev))
-                       netif_carrier_on(adapter->netdev);
+               netif_carrier_on(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
        } else {
                printk(KERN_INFO "%s: NIC Link is Down\n",
                       adapter->netdev->name);
-               if (netif_carrier_ok(adapter->netdev))
-                       netif_carrier_off(adapter->netdev);
+               netif_carrier_off(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
        netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
 
+       netif_carrier_off(netdev);
        err = register_netdev(netdev);
 
        if (err) {
index 0f71d1d4339dc38ccfdd89428fdc4a62bb5b7bd9..e5fd20994bec256df4ec88f96182006e54ada0d6 100644 (file)
@@ -36,6 +36,7 @@
 #include "debug.h"
 
 #define N_TX_QUEUES    4 /* #tx queues on mac80211<->driver interface */
+#define BRCMS_FLUSH_TIMEOUT    500 /* msec */
 
 /* Flags we support */
 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
        wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
 }
 
+static bool brcms_tx_flush_completed(struct brcms_info *wl)
+{
+       bool result;
+
+       spin_lock_bh(&wl->lock);
+       result = brcms_c_tx_flush_completed(wl->wlc);
+       spin_unlock_bh(&wl->lock);
+       return result;
+}
+
 static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct brcms_info *wl = hw->priv;
+       int ret;
 
        no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
 
-       /* wait for packet queue and dma fifos to run empty */
-       spin_lock_bh(&wl->lock);
-       brcms_c_wait_for_tx_completion(wl->wlc, drop);
-       spin_unlock_bh(&wl->lock);
+       ret = wait_event_timeout(wl->tx_flush_wq,
+                                brcms_tx_flush_completed(wl),
+                                msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+       brcms_dbg_mac80211(wl->wlc->hw->d11core,
+                          "ret=%d\n", jiffies_to_msecs(ret));
 }
 
 static const struct ieee80211_ops brcms_ops = {
@@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)
 
  done:
        spin_unlock_bh(&wl->lock);
+       wake_up(&wl->tx_flush_wq);
 }
 
 /*
@@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
 
        atomic_set(&wl->callbacks, 0);
 
+       init_waitqueue_head(&wl->tx_flush_wq);
+
        /* setup the bottom half handler */
        tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
 
@@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
        spin_lock_bh(&wl->lock);
        return blocked;
 }
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
-       spin_unlock_bh(&wl->lock);
-       msleep(ms);
-       spin_lock_bh(&wl->lock);
-}
index 9358bd5ebd35d016058d1238d11aca6880fdae19..947ccacf43e6f498dacfaceed5c3853c00c99d96 100644 (file)
@@ -68,6 +68,8 @@ struct brcms_info {
        spinlock_t lock;        /* per-device perimeter lock */
        spinlock_t isr_lock;    /* per-device ISR synchronization lock */
 
+       /* tx flush */
+       wait_queue_head_t tx_flush_wq;
 
        /* timer related fields */
        atomic_t callbacks;     /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
 extern void brcms_free_timer(struct brcms_timer *timer);
 extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
 extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
 extern void brcms_dpc(unsigned long data);
 extern void brcms_timer(struct brcms_timer *t);
 extern void brcms_fatal_error(struct brcms_info *wl);
index 17594de4199ef58a30488e2c7579ec02c521d9ea..8b5839008af32a11afef6d4042db67a0d8a9c16d 100644 (file)
@@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 static bool
 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
 {
-       bool morepending = false;
        struct bcma_device *core;
        struct tx_status txstatus, *txs;
        u32 s1, s2;
@@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
        txs = &txstatus;
        core = wlc_hw->d11core;
        *fatal = false;
-       s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
-       while (!(*fatal)
-              && (s1 & TXS_V)) {
-               /* !give others some time to run! */
-               if (n >= max_tx_num) {
-                       morepending = true;
-                       break;
-               }
 
+       while (n < max_tx_num) {
+               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
                if (s1 == 0xffffffff) {
                        brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
                                  __func__);
                        *fatal = true;
                        return false;
                }
-               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+               /* only process when valid */
+               if (!(s1 & TXS_V))
+                       break;
 
+               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
                txs->status = s1 & TXS_STATUS_MASK;
                txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
                txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
                txs->lasttxtime = 0;
 
                *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
-               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+               if (*fatal == true)
+                       return false;
                n++;
        }
 
-       if (*fatal)
-               return false;
-
-       return morepending;
+       return n >= max_tx_num;
 }
 
 static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
        return wlc->band->bandunit;
 }
 
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
 {
-       int timeout = 20;
        int i;
 
        /* Kick DMA to send any pending AMPDU */
        for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
                if (wlc->hw->di[i])
-                       dma_txflush(wlc->hw->di[i]);
-
-       /* wait for queue and DMA fifos to run dry */
-       while (brcms_txpktpendtot(wlc) > 0) {
-               brcms_msleep(wlc->wl, 1);
-
-               if (--timeout == 0)
-                       break;
-       }
+                       dma_kick_tx(wlc->hw->di[i]);
 
-       WARN_ON_ONCE(timeout == 0);
+       return !brcms_txpktpendtot(wlc);
 }
 
 void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
index 4fb2834f4e6483f7c9321666c75598e6f9fa21fd..b0f14b7b8616e63ed5f2ed954ff47eddab7ff0d0 100644 (file)
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
 extern void brcms_c_scan_start(struct brcms_c_info *wlc);
 extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
 extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
-                                          bool drop);
 extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
 extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
 extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
 extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
 extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
 extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
 
 #endif                         /* _BRCM_PUB_H_ */
index 31534f7c05488ba38d4c65c7ce0209a43712fd51..279796419ea0ca036800483aad59321e881efbe5 100644 (file)
@@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        next_reclaimed = ssn;
                }
 
+               if (tid != IWL_TID_NON_QOS) {
+                       priv->tid_data[sta_id][tid].next_reclaimed =
+                               next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+                                                 next_reclaimed);
+               }
+
                iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
 
                iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        if (!is_agg)
                                iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
 
-                       /*
-                        * W/A for FW bug - the seq_ctl isn't updated when the
-                        * queues are flushed. Fetch it from the packet itself
-                        */
-                       if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
-                               next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
-                               next_reclaimed =
-                                       SEQ_TO_SN(next_reclaimed + 0x10);
-                       }
-
                        is_offchannel_skb =
                                (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
                        freed++;
                }
 
-               if (tid != IWL_TID_NON_QOS) {
-                       priv->tid_data[sta_id][tid].next_reclaimed =
-                               next_reclaimed;
-                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
-                                          next_reclaimed);
-               }
-
                WARN_ON(!is_agg && freed != 1);
 
                /*
index 9189a32b7844b713815636d03517e7532da31f94..973a9d90e9ea06542fe6b7e9169c0de6d4922531 100644 (file)
@@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
                        scan_rsp->number_of_sets);
                ret = -1;
-               goto done;
+               goto check_next_scan;
        }
 
        bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                if (!beacon_size || beacon_size > bytes_left) {
                        bss_info += bytes_left;
                        bytes_left = 0;
-                       return -1;
+                       ret = -1;
+                       goto check_next_scan;
                }
 
                /* Initialize the current working beacon pointer for this BSS
@@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                dev_err(priv->adapter->dev,
                                        "%s: bytes left < IE length\n",
                                        __func__);
-                               goto done;
+                               goto check_next_scan;
                        }
                        if (element_id == WLAN_EID_DS_PARAMS) {
                                channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                }
        }
 
+check_next_scan:
        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
        if (list_empty(&adapter->scan_pending_q)) {
                spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                }
        }
 
-done:
        return ret;
 }
 
index 4494d130b37cb0d0ad585eea5142e0c312c86b26..0f8b05185edaeacb6ef686e5e5a1ca0151866916 100644 (file)
@@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                                         is_tx ? "Tx" : "Rx");
 
                                if (is_tx) {
-                                       rtl_lps_leave(hw);
+                                       schedule_work(&rtlpriv->
+                                                     works.lps_leave_work);
                                        ppsc->last_delaylps_stamp_jiffies =
                                            jiffies;
                                }
@@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                }
        } else if (ETH_P_ARP == ether_type) {
                if (is_tx) {
-                       rtl_lps_leave(hw);
+                       schedule_work(&rtlpriv->works.lps_leave_work);
                        ppsc->last_delaylps_stamp_jiffies = jiffies;
                }
 
@@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                         "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
 
                if (is_tx) {
-                       rtl_lps_leave(hw);
+                       schedule_work(&rtlpriv->works.lps_leave_work);
                        ppsc->last_delaylps_stamp_jiffies = jiffies;
                }
 
index f2ecdeb3a90d441ee809af029a1db1c8dab80f48..1535efda3d525a0dfafe54bb4442aacec4b00446 100644 (file)
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
        WARN_ON(skb_queue_empty(&rx_queue));
        while (!skb_queue_empty(&rx_queue)) {
                _skb = skb_dequeue(&rx_queue);
-               _rtl_usb_rx_process_agg(hw, skb);
-               ieee80211_rx_irqsafe(hw, skb);
+               _rtl_usb_rx_process_agg(hw, _skb);
+               ieee80211_rx_irqsafe(hw, _skb);
        }
 }
 
index 94b79c3338c4260200306f180ff053b87006faa6..9d7f1723dd8f750126337d7792a0614fea8ca601 100644 (file)
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
 /* Notify xenvif that ring now has space to send an skb to the frontend */
 void xenvif_notify_tx_completion(struct xenvif *vif);
 
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(struct xenvif *vif);
+
 /* Returns number of ring slots required to send an skb to the frontend */
 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
 
index b7d41f8c338a8372cea8bfe6649386c6a0d8b090..b8c5193bd420030a363ba30c14e8b485a8d6c5cc 100644 (file)
@@ -343,17 +343,22 @@ err:
        return err;
 }
 
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_carrier_off(struct xenvif *vif)
 {
        struct net_device *dev = vif->dev;
-       if (netif_carrier_ok(dev)) {
-               rtnl_lock();
-               netif_carrier_off(dev); /* discard queued packets */
-               if (netif_running(dev))
-                       xenvif_down(vif);
-               rtnl_unlock();
-               xenvif_put(vif);
-       }
+
+       rtnl_lock();
+       netif_carrier_off(dev); /* discard queued packets */
+       if (netif_running(dev))
+               xenvif_down(vif);
+       rtnl_unlock();
+       xenvif_put(vif);
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+       if (netif_carrier_ok(vif->dev))
+               xenvif_carrier_off(vif);
 
        atomic_dec(&vif->refcnt);
        wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
index f2d6b78d901d92d3965fe3bd849751f04fb7ccf3..2b9520c46e97a8de263b10f602bdb8cc35af0277 100644 (file)
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
        atomic_dec(&netbk->netfront_count);
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status);
 static void make_tx_response(struct xenvif *vif,
                             struct xen_netif_tx_request *txp,
                             s8       st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
 
        do {
                make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               if (cons >= end)
+               if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&vif->tx, cons++);
        } while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
        xenvif_put(vif);
 }
 
+static void netbk_fatal_tx_err(struct xenvif *vif)
+{
+       netdev_err(vif->dev, "fatal error; disabling device\n");
+       xenvif_carrier_off(vif);
+       xenvif_put(vif);
+}
+
 static int netbk_count_requests(struct xenvif *vif,
                                struct xen_netif_tx_request *first,
                                struct xen_netif_tx_request *txp,
@@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
 
        do {
                if (frags >= work_to_do) {
-                       netdev_dbg(vif->dev, "Need more frags\n");
+                       netdev_err(vif->dev, "Need more frags\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
                if (unlikely(frags >= MAX_SKB_FRAGS)) {
-                       netdev_dbg(vif->dev, "Too many frags\n");
+                       netdev_err(vif->dev, "Too many frags\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
                memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
                       sizeof(*txp));
                if (txp->size > first->size) {
-                       netdev_dbg(vif->dev, "Frags galore\n");
+                       netdev_err(vif->dev, "Frag is bigger than frame.\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
@@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
                frags++;
 
                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+                       netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
                                 txp->offset, txp->size);
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
        } while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
                pending_idx = netbk->pending_ring[index];
                page = xen_netbk_alloc_page(netbk, skb, pending_idx);
                if (!page)
-                       return NULL;
+                       goto err;
 
                gop->source.u.ref = txp->gref;
                gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
        }
 
        return gop;
+err:
+       /* Unwind, freeing all pages and sending error responses. */
+       while (i-- > start) {
+               xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+                                     XEN_NETIF_RSP_ERROR);
+       }
+       /* The head too, if necessary. */
+       if (start)
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+       return NULL;
 }
 
 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
 {
        struct gnttab_copy *gop = *gopp;
        u16 pending_idx = *((u16 *)skb->data);
-       struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
-       struct xenvif *vif = pending_tx_info[pending_idx].vif;
-       struct xen_netif_tx_request *txp;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
        int i, err, start;
 
        /* Check status of header. */
        err = gop->status;
-       if (unlikely(err)) {
-               pending_ring_idx_t index;
-               index = pending_index(netbk->pending_prod++);
-               txp = &pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
-       }
+       if (unlikely(err))
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
        for (i = start; i < nr_frags; i++) {
                int j, newerr;
-               pending_ring_idx_t index;
 
                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
                if (likely(!newerr)) {
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xen_netbk_idx_release(netbk, pending_idx);
+                               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                        continue;
                }
 
                /* Error on this fragment: respond to client with an error. */
-               txp = &netbk->pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               index = pending_index(netbk->pending_prod++);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
 
                /* First error: invalidate header and preceding fragments. */
                pending_idx = *((u16 *)skb->data);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                for (j = start; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
 
                /* Take an extra reference to offset xen_netbk_idx_release */
                get_page(netbk->mmap_pages[pending_idx]);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
        }
 }
 
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
 
        do {
                if (unlikely(work_to_do-- <= 0)) {
-                       netdev_dbg(vif->dev, "Missing extra info\n");
+                       netdev_err(vif->dev, "Missing extra info\n");
+                       netbk_fatal_tx_err(vif);
                        return -EBADR;
                }
 
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
                if (unlikely(!extra.type ||
                             extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
                        vif->tx.req_cons = ++cons;
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "Invalid extra type: %d\n", extra.type);
+                       netbk_fatal_tx_err(vif);
                        return -EINVAL;
                }
 
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
                             struct xen_netif_extra_info *gso)
 {
        if (!gso->u.gso.size) {
-               netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+               netdev_err(vif->dev, "GSO size must not be zero.\n");
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
        /* Currently only TCPv4 S.O. is supported. */
        if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-               netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
 
                /* Get a netif from the list with work to do. */
                vif = poll_net_schedule_list(netbk);
+               /* This can sometimes happen because the test of
+                * list_empty(net_schedule_list) at the top of the
+                * loop is unlocked.  Just go back and have another
+                * look.
+                */
                if (!vif)
                        continue;
 
+               if (vif->tx.sring->req_prod - vif->tx.req_cons >
+                   XEN_NETIF_TX_RING_SIZE) {
+                       netdev_err(vif->dev,
+                                  "Impossible number of requests. "
+                                  "req_prod %d, req_cons %d, size %ld\n",
+                                  vif->tx.sring->req_prod, vif->tx.req_cons,
+                                  XEN_NETIF_TX_RING_SIZE);
+                       netbk_fatal_tx_err(vif);
+                       continue;
+               }
+
                RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
                if (!work_to_do) {
                        xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        work_to_do = xen_netbk_get_extras(vif, extras,
                                                          work_to_do);
                        idx = vif->tx.req_cons;
-                       if (unlikely(work_to_do < 0)) {
-                               netbk_tx_err(vif, &txreq, idx);
+                       if (unlikely(work_to_do < 0))
                                continue;
-                       }
                }
 
                ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
-               if (unlikely(ret < 0)) {
-                       netbk_tx_err(vif, &txreq, idx - ret);
+               if (unlikely(ret < 0))
                        continue;
-               }
+
                idx += ret;
 
                if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
 
                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
                                   (txreq.offset&~PAGE_MASK) + txreq.size);
-                       netbk_tx_err(vif, &txreq, idx);
+                       netbk_fatal_tx_err(vif);
                        continue;
                }
 
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 
                        if (netbk_set_skb_gso(vif, skb, gso)) {
+                               /* Failure in netbk_set_skb_gso is fatal. */
                                kfree_skb(skb);
-                               netbk_tx_err(vif, &txreq, idx);
                                continue;
                        }
                }
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
                        txp->size -= data_len;
                } else {
                        /* Schedule a response immediately. */
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                }
 
                if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
        xen_netbk_tx_submit(netbk);
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status)
 {
        struct xenvif *vif;
        struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
 
        vif = pending_tx_info->vif;
 
-       make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+       make_tx_response(vif, &pending_tx_info->req, status);
 
        index = pending_index(netbk->pending_prod++);
        netbk->pending_ring[index] = pending_idx;
index 10c1a3454e48ecce74c595d5cd3a33a7b3f4af1d..81c5077feff32af9724da4e81f674b4d29c524e5 100644 (file)
@@ -350,7 +350,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        /* Enable the clockwatch on ST Variants */
        if (vendor->clockwatch)
                data |= RTC_CR_CWEN;
-       writel(data | RTC_CR_EN, ldata->base + RTC_CR);
+       else
+               data |= RTC_CR_EN;
+       writel(data, ldata->base + RTC_CR);
 
        /*
         * On ST PL031 variants, the RTC reset value does not provide correct
index 97ac0a38e3d0c7277aaab702078d2878db20f1cd..eb2753008ef0550b6c3bfeda984ca9dfe47d26bd 100644 (file)
@@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus)
 
        return -1;
 }
+
+int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+       if (ssb_chipco_available(&bus->chipco) ||
+           ssb_extif_available(&bus->extif)) {
+               return gpiochip_remove(&bus->gpio);
+       } else {
+               SSB_WARN_ON(1);
+       }
+
+       return -1;
+}
index 772ad9b5c304fc986b459c4f4c6c5513e70ecd16..24dc331b4701efd796381882e635c6ef791c45cf 100644 (file)
@@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)
 
 void ssb_bus_unregister(struct ssb_bus *bus)
 {
+       int err;
+
+       err = ssb_gpio_unregister(bus);
+       if (err == -EBUSY)
+               ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
+       else if (err)
+               ssb_dprintk(KERN_ERR PFX
+                           "Can not unregister GPIO driver: %i\n", err);
+
        ssb_buses_lock();
        ssb_devices_unregister(bus);
        list_del(&bus->list);
index 6c10b66c796cf7ee23ae3e9334086034198e911c..da38305a2d22a2c24de7b4563da7ed0daa7354a0 100644 (file)
@@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif)
 
 #ifdef CONFIG_SSB_DRIVER_GPIO
 extern int ssb_gpio_init(struct ssb_bus *bus);
+extern int ssb_gpio_unregister(struct ssb_bus *bus);
 #else /* CONFIG_SSB_DRIVER_GPIO */
 static inline int ssb_gpio_init(struct ssb_bus *bus)
 {
        return -ENOTSUPP;
 }
+static inline int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+       return 0;
+}
 #endif /* CONFIG_SSB_DRIVER_GPIO */
 
 #endif /* LINUX_SSB_PRIVATE_H_ */
index ebd08b21b23432696047487367d44f76d4fa4f02..959b1cd89e6a5be5a402a79089077609f8e30716 100644 (file)
@@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)
 }
 
 /* Caller must have TX VQ lock */
-static void tx_poll_start(struct vhost_net *net, struct socket *sock)
+static int tx_poll_start(struct vhost_net *net, struct socket *sock)
 {
+       int ret;
+
        if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
-               return;
-       vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
-       net->tx_poll_state = VHOST_NET_POLL_STARTED;
+               return 0;
+       ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
+       if (!ret)
+               net->tx_poll_state = VHOST_NET_POLL_STARTED;
+       return ret;
 }
 
 /* In case of DMA done not in order in lower device driver for some reason.
@@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,
                vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
 }
 
-static void vhost_net_enable_vq(struct vhost_net *n,
+static int vhost_net_enable_vq(struct vhost_net *n,
                                struct vhost_virtqueue *vq)
 {
        struct socket *sock;
+       int ret;
 
        sock = rcu_dereference_protected(vq->private_data,
                                         lockdep_is_held(&vq->mutex));
        if (!sock)
-               return;
+               return 0;
        if (vq == n->vqs + VHOST_NET_VQ_TX) {
                n->tx_poll_state = VHOST_NET_POLL_STOPPED;
-               tx_poll_start(n, sock);
+               ret = tx_poll_start(n, sock);
        } else
-               vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+               ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+
+       return ret;
 }
 
 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
@@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
                        r = PTR_ERR(ubufs);
                        goto err_ubufs;
                }
-               oldubufs = vq->ubufs;
-               vq->ubufs = ubufs;
+
                vhost_net_disable_vq(n, vq);
                rcu_assign_pointer(vq->private_data, sock);
-               vhost_net_enable_vq(n, vq);
-
                r = vhost_init_used(vq);
                if (r)
-                       goto err_vq;
+                       goto err_used;
+               r = vhost_net_enable_vq(n, vq);
+               if (r)
+                       goto err_used;
+
+               oldubufs = vq->ubufs;
+               vq->ubufs = ubufs;
 
                n->tx_packets = 0;
                n->tx_zcopy_err = 0;
@@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        mutex_unlock(&n->dev.mutex);
        return 0;
 
+err_used:
+       rcu_assign_pointer(vq->private_data, oldsock);
+       vhost_net_enable_vq(n, vq);
+       if (ubufs)
+               vhost_ubuf_put_and_wait(ubufs);
 err_ubufs:
        fput(sock->file);
 err_vq:
index 34389f75fe65693a4ad5baa36715159fd5759bbb..9759249e6d908867cf72f53e5bb8e4ecc30db8ae 100644 (file)
@@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
+       poll->wqh = NULL;
 
        vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
  * keep a reference to a file until after vhost_poll_stop is called. */
-void vhost_poll_start(struct vhost_poll *poll, struct file *file)
+int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 {
        unsigned long mask;
+       int ret = 0;
 
        mask = file->f_op->poll(file, &poll->table);
        if (mask)
                vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+       if (mask & POLLERR) {
+               if (poll->wqh)
+                       remove_wait_queue(poll->wqh, &poll->wait);
+               ret = -EINVAL;
+       }
+
+       return ret;
 }
 
 /* Stop polling a file. After this function returns, it becomes safe to drop the
  * file reference. You must also flush afterwards. */
 void vhost_poll_stop(struct vhost_poll *poll)
 {
-       remove_wait_queue(poll->wqh, &poll->wait);
+       if (poll->wqh) {
+               remove_wait_queue(poll->wqh, &poll->wait);
+               poll->wqh = NULL;
+       }
 }
 
 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
@@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                fput(filep);
 
        if (pollstart && vq->handle_kick)
-               vhost_poll_start(&vq->poll, vq->kick);
+               r = vhost_poll_start(&vq->poll, vq->kick);
 
        mutex_unlock(&vq->mutex);
 
index 2639c58b23ab497ace850895f3322df661a965df..17261e277c022abbffe8effc6a8492336ea7edef 100644 (file)
@@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev);
-void vhost_poll_start(struct vhost_poll *poll, struct file *file);
+int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
 void vhost_poll_flush(struct vhost_poll *poll);
 void vhost_poll_queue(struct vhost_poll *poll);
index 5de7a220e98680f9c9cfc7b36c1f5c2114d7144f..0e5ac93bab101bea0526ee3b54f52ed3b0f72099 100644 (file)
@@ -33,6 +33,7 @@ struct usbnet {
        wait_queue_head_t       *wait;
        struct mutex            phy_mutex;
        unsigned char           suspend_count;
+       unsigned char           pkt_cnt, pkt_err;
 
        /* i/o info: pipes etc */
        unsigned                in, out;
@@ -70,6 +71,7 @@ struct usbnet {
 #              define EVENT_DEV_OPEN   7
 #              define EVENT_DEVICE_REPORT_IDLE 8
 #              define EVENT_NO_RUNTIME_PM      9
+#              define EVENT_RX_KILL    10
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -100,7 +102,6 @@ struct driver_info {
 #define FLAG_LINK_INTR 0x0800          /* updates link (carrier) status */
 
 #define FLAG_POINTTOPOINT 0x1000       /* possibly use "usb%d" names */
-#define FLAG_NOARP     0x2000          /* device can't do ARP */
 
 /*
  * Indicates to usbnet, that USB driver accumulates multiple IP packets.
@@ -108,6 +109,7 @@ struct driver_info {
  */
 #define FLAG_MULTI_PACKET      0x2000
 #define FLAG_RX_ASSEMBLE       0x4000  /* rx packets may span >1 frames */
+#define FLAG_NOARP             0x8000  /* device can't do ARP */
 
        /* init device ... can sleep, or cause probe() failure */
        int     (*bind)(struct usbnet *, struct usb_interface *);
index 498433dd067dd64eb6c728e566e61585093e960f..938b7fd1120477213888f9c7e61f98039bea3e03 100644 (file)
@@ -34,17 +34,17 @@ extern int                          udpv6_connect(struct sock *sk,
                                                      struct sockaddr *uaddr,
                                                      int addr_len);
 
-extern int                     datagram_recv_ctl(struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct sk_buff *skb);
-
-extern int                     datagram_send_ctl(struct net *net,
-                                                 struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct flowi6 *fl6,
-                                                 struct ipv6_txoptions *opt,
-                                                 int *hlimit, int *tclass,
-                                                 int *dontfrag);
+extern int                     ip6_datagram_recv_ctl(struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct sk_buff *skb);
+
+extern int                     ip6_datagram_send_ctl(struct net *net,
+                                                     struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct flowi6 *fl6,
+                                                     struct ipv6_txoptions *opt,
+                                                     int *hlimit, int *tclass,
+                                                     int *dontfrag);
 
 #define                LOOPBACK4_IPV6          cpu_to_be32(0x7f000006)
 
index de9af600006f8cd73020b7d25b3dc4fc28be5eab..f2c6a68250989d4223b788bd368b6dd7a80e4e7d 100644 (file)
@@ -331,7 +331,7 @@ out:
        return pid;
 
 out_unlock:
-       spin_unlock(&pidmap_lock);
+       spin_unlock_irq(&pidmap_lock);
 out_free:
        while (++i <= ns->level)
                free_pidmap(pid->numbers + i);
index 09255ec8159c459624e66ee5ac17ca092d2fcd6b..fbb60b103e64b11021475a749b86da6f7eb7137e 100644 (file)
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
        if (memcg) {
                s->memcg_params->memcg = memcg;
                s->memcg_params->root_cache = root_cache;
-       }
+       } else
+               s->memcg_params->is_root_cache = true;
+
        return 0;
 }
 
index f0b9ce572fc78ddbd0118db8aaa60d9b56086bf9..c9bd528b01d2361aa6e37f9791a831c3b2a443e7 100644 (file)
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
 static int do_mlockall(int flags)
 {
        struct vm_area_struct * vma, * prev = NULL;
-       unsigned int def_flags = 0;
 
        if (flags & MCL_FUTURE)
-               def_flags = VM_LOCKED;
-       current->mm->def_flags = def_flags;
+               current->mm->def_flags |= VM_LOCKED;
+       else
+               current->mm->def_flags &= ~VM_LOCKED;
        if (flags == MCL_FUTURE)
                goto out;
 
index df2022ff0c8a1d9fb7ff13ed4cf88058999485e6..9673d96b1ba72ab5d3bff286e484adde0edd203d 100644 (file)
@@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page)
        set_pageblock_migratetype(page, MIGRATE_CMA);
        __free_pages(page, pageblock_order);
        totalram_pages += pageblock_nr_pages;
+#ifdef CONFIG_HIGHMEM
+       if (PageHighMem(page))
+               totalhigh_pages += pageblock_nr_pages;
+#endif
 }
 #endif
 
index 25bfce0666ebff70258019ddb1bba9b6f1eca967..4925a02ae7e4db7df4d4cdfc972e68375743861c 100644 (file)
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
        __u8 reason = hci_proto_disconn_ind(conn);
 
        switch (conn->type) {
-       case ACL_LINK:
-               hci_acl_disconn(conn, reason);
-               break;
        case AMP_LINK:
                hci_amp_disconn(conn, reason);
                break;
+       default:
+               hci_acl_disconn(conn, reason);
+               break;
        }
 }
 
index 68a9587c96945cdd3264cc310b68b7be95cfd953..5abefb12891d5144f97d1feacb0803937b0c9765 100644 (file)
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
 
        skb_pull(skb, sizeof(code));
 
+       /*
+        * The SMP context must be initialized for all other PDUs except
+        * pairing and security requests. If we get any other PDU when
+        * not initialized simply disconnect (done if this function
+        * returns an error).
+        */
+       if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
+           !conn->smp_chan) {
+               BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
+               kfree_skb(skb);
+               return -ENOTSUPP;
+       }
+
        switch (code) {
        case SMP_CMD_PAIRING_REQ:
                reason = smp_cmd_pairing_req(conn, skb);
index b29dacf900f9496a42a565a3b2c48ad9cf502c74..e6e1cbe863f57cc18dfc3e25ed2eeffca45e618b 100644 (file)
@@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,
                        return -EFAULT;
                i += len;
                mutex_lock(&pktgen_thread_lock);
-               pktgen_add_device(t, f);
+               ret = pktgen_add_device(t, f);
                mutex_unlock(&pktgen_thread_lock);
-               ret = count;
-               sprintf(pg_result, "OK: add_device=%s", f);
+               if (!ret) {
+                       ret = count;
+                       sprintf(pg_result, "OK: add_device=%s", f);
+               } else
+                       sprintf(pg_result, "ERROR: can not add device %s", f);
                goto out;
        }
 
index a9a2ae3e2213a3769bd79df65229822768c09264..32443ebc3e890532810b94c6196b182ccdf558d4 100644 (file)
@@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->network_header     = old->network_header;
        new->mac_header         = old->mac_header;
        new->inner_transport_header = old->inner_transport_header;
-       new->inner_network_header = old->inner_transport_header;
+       new->inner_network_header = old->inner_network_header;
        skb_dst_copy(new, old);
        new->rxhash             = old->rxhash;
        new->ooo_okay           = old->ooo_okay;
index 291f2ed7cc311649a72700d65098b6d70ba338b5..cdf2e707bb100500511d6b2d46ece69e19163cfa 100644 (file)
@@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)
 {
        int cnt; /* increase in packets */
        unsigned int delta = 0;
+       u32 snd_cwnd = tp->snd_cwnd;
+
+       if (unlikely(!snd_cwnd)) {
+               pr_err_once("snd_cwnd is nul, please report this bug.\n");
+               snd_cwnd = 1U;
+       }
 
        /* RFC3465: ABC Slow start
         * Increase only after a full MSS of bytes is acked
@@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)
        if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
                cnt = sysctl_tcp_max_ssthresh >> 1;     /* limited slow start */
        else
-               cnt = tp->snd_cwnd;                     /* exponential increase */
+               cnt = snd_cwnd;                         /* exponential increase */
 
        /* RFC3465: ABC
         * We MAY increase by 2 if discovered delayed ack
@@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)
        tp->bytes_acked = 0;
 
        tp->snd_cwnd_cnt += cnt;
-       while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-               tp->snd_cwnd_cnt -= tp->snd_cwnd;
+       while (tp->snd_cwnd_cnt >= snd_cwnd) {
+               tp->snd_cwnd_cnt -= snd_cwnd;
                delta++;
        }
-       tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
+       tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
index 18f97ca76b00223b25d0f6faed8a829c45fb95c8..ad70a962c20e1daa7637a190d1c81208d6656263 100644 (file)
@@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
                }
        } else {
                if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+                       if (!tcp_packets_in_flight(tp)) {
+                               tcp_enter_frto_loss(sk, 2, flag);
+                               return true;
+                       }
+
                        /* Prevent sending of new data. */
                        tp->snd_cwnd = min(tp->snd_cwnd,
                                           tcp_packets_in_flight(tp));
@@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
         * the remote receives only the retransmitted (regular) SYNs: either
         * the original SYN-data or the corresponding SYN-ACK is lost.
         */
-       syn_drop = (cookie->len <= 0 && data &&
-                   inet_csk(sk)->icsk_retransmits);
+       syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
 
        tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
 
index 70b09ef2463b3678c5da4a1d5fc0edb4656e4940..eadb693eef55dffdaacf538b36ced145b09fa191 100644 (file)
@@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                 * errors returned from accept().
                 */
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -1500,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * clogging syn queue with openreqs with exponentially increasing
         * timeout.
         */
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet_reqsk_alloc(&tcp_request_sock_ops);
        if (!req)
@@ -1666,6 +1669,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0;
 }
 EXPORT_SYMBOL(tcp_v4_conn_request);
index 420e563263840442a150c13098148921559c3f2d..1b5d8cb9b123dff17c85d59f6d1c8e44cfdefa4e 100644 (file)
@@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
        if (dev->addr_len != IEEE802154_ADDR_LEN)
                return -1;
        memcpy(eui, dev->dev_addr, 8);
+       eui[0] ^= 2;
        return 0;
 }
 
index 8edf2601065af07790500809b8dee9fad0fc8eaa..7a778b9a7b859586219a37c7f80bd6ffeefe27c6 100644 (file)
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        sin->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (np->rxopt.all)
-                               datagram_recv_ctl(sk, msg, skb);
+                               ip6_datagram_recv_ctl(sk, msg, skb);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin->sin6_scope_id = IP6CB(skb)->iif;
                } else {
@@ -468,7 +468,8 @@ out:
 }
 
 
-int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+                         struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet6_skb_parm *opt = IP6CB(skb);
@@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
 
-int datagram_send_ctl(struct net *net, struct sock *sk,
-                     struct msghdr *msg, struct flowi6 *fl6,
-                     struct ipv6_txoptions *opt,
-                     int *hlimit, int *tclass, int *dontfrag)
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
+                         struct msghdr *msg, struct flowi6 *fl6,
+                         struct ipv6_txoptions *opt,
+                         int *hlimit, int *tclass, int *dontfrag)
 {
        struct in6_pktinfo *src_info;
        struct cmsghdr *cmsg;
@@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
 exit_f:
        return err;
 }
-EXPORT_SYMBOL_GPL(datagram_send_ctl);
+EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
index 29124b7a04c8dfb335df69014b5348e22b3fb6ab..d6de4b447250b752ec810c5e21b07f667ef95a8c 100644 (file)
@@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                msg.msg_control = (void*)(fl->opt+1);
                memset(&flowi6, 0, sizeof(flowi6));
 
-               err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
-                                       &junk, &junk);
+               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
+                                           &junk, &junk, &junk);
                if (err)
                        goto done;
                err = -EINVAL;
index c727e471275199ef27039f79b95755803435211a..131dd097736d174800cff6c9d554d274a4951884 100644 (file)
@@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
        int ret;
 
        if (!ip6_tnl_xmit_ctl(t))
-               return -1;
+               goto tx_err;
 
        switch (skb->protocol) {
        case htons(ETH_P_IP):
index ee94d31c9d4d494cdfe2dce1ac812e4e094fe65f..d1e2e8ef29c54abd86064728ec145f0478f360af 100644 (file)
@@ -476,8 +476,8 @@ sticky_done:
                msg.msg_controllen = optlen;
                msg.msg_control = (void*)(opt+1);
 
-               retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
-                                        &junk);
+               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
+                                            &junk, &junk);
                if (retv)
                        goto done;
 update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                release_sock(sk);
 
                if (skb) {
-                       int err = datagram_recv_ctl(sk, &msg, skb);
+                       int err = ip6_datagram_recv_ctl(sk, &msg, skb);
                        kfree_skb(skb);
                        if (err)
                                return err;
index 6cd29b1e8b926e26a7bc0df5b5c5263fc2e23d84..70fa8144999780cef19f5102247882ba6d24f6f6 100644 (file)
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (np->rxopt.all)
-               datagram_recv_ctl(sk, msg, skb);
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        err = copied;
        if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index e229a3bc345dc4138a188282c4ab4f1717882832..363d8b7772e8d23f180a8db6c0ae707225ad8a63 100644 (file)
@@ -928,7 +928,7 @@ restart:
        dst_hold(&rt->dst);
        read_unlock_bh(&table->tb6_lock);
 
-       if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
                nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
        else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl6->daddr);
index 93825dd3a7c070be4dd1e294a155b409525337e2..4f43537197ef805927e84a182ae1e703edb0f2c8 100644 (file)
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
 
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
        if (req == NULL)
@@ -1108,6 +1111,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0; /* don't send reset */
 }
 
index dfaa29b8b2939c03fef13fa416f6fdaef031bf5d..fb083295ff0bde8029040b3f867be6ca46bf0901 100644 (file)
@@ -443,7 +443,7 @@ try_again:
                        ip_cmsg_recv(msg, skb);
        } else {
                if (np->rxopt.all)
-                       datagram_recv_ctl(sk, msg, skb);
+                       ip6_datagram_recv_ctl(sk, msg, skb);
        }
 
        err = copied;
@@ -1153,8 +1153,8 @@ do_udp_sendmsg:
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index 1a9f3723c13cb45b608bbc07fe4a9803df926523..2ac884d0e89bd06ff820073351004509b0838902 100644 (file)
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
 
 }
 
+/* Lookup the tunnel socket, possibly involving the fs code if the socket is
+ * owned by userspace.  A struct sock returned from this function must be
+ * released using l2tp_tunnel_sock_put once you're done with it.
+ */
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
+{
+       int err = 0;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       if (!tunnel)
+               goto out;
+
+       if (tunnel->fd >= 0) {
+               /* Socket is owned by userspace, who might be in the process
+                * of closing it.  Look the socket up using the fd to ensure
+                * consistency.
+                */
+               sock = sockfd_lookup(tunnel->fd, &err);
+               if (sock)
+                       sk = sock->sk;
+       } else {
+               /* Socket is owned by kernelspace */
+               sk = tunnel->sock;
+       }
+
+out:
+       return sk;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
+
+/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
+void l2tp_tunnel_sock_put(struct sock *sk)
+{
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       if (tunnel) {
+               if (tunnel->fd >= 0) {
+                       /* Socket is owned by userspace */
+                       sockfd_put(sk->sk_socket);
+               }
+               sock_put(sk);
+       }
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
+
 /* Lookup a session by id in the global session list
  */
 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
        struct udphdr *uh;
        struct inet_sock *inet;
        __wsum csum;
-       int old_headroom;
-       int new_headroom;
        int headroom;
        int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
        int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
         */
        headroom = NET_SKB_PAD + sizeof(struct iphdr) +
                uhlen + hdr_len;
-       old_headroom = skb_headroom(skb);
        if (skb_cow_head(skb, headroom)) {
                kfree_skb(skb);
                return NET_XMIT_DROP;
        }
 
-       new_headroom = skb_headroom(skb);
        skb_orphan(skb);
-       skb->truesize += new_headroom - old_headroom;
-
        /* Setup L2TP header */
        session->build_header(session, __skb_push(skb, hdr_len));
 
@@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        tunnel->old_sk_destruct = sk->sk_destruct;
        sk->sk_destruct = &l2tp_tunnel_destruct;
        tunnel->sock = sk;
+       tunnel->fd = fd;
        lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
 
        sk->sk_allocation = GFP_ATOMIC;
@@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
-       int err = 0;
-       struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
+       int err = -EBADF;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       sk = l2tp_tunnel_sock_lookup(tunnel);
+       if (!sk)
+               goto out;
+
+       sock = sk->sk_socket;
+       BUG_ON(!sock);
 
        /* Force the tunnel socket to close. This will eventually
         * cause the tunnel to be deleted via the normal socket close
         * mechanisms when userspace closes the tunnel socket.
         */
-       if (sock != NULL) {
-               err = inet_shutdown(sock, 2);
+       err = inet_shutdown(sock, 2);
 
-               /* If the tunnel's socket was created by the kernel,
-                * close the socket here since the socket was not
-                * created by userspace.
-                */
-               if (sock->file == NULL)
-                       err = inet_release(sock);
-       }
+       /* If the tunnel's socket was created by the kernel,
+        * close the socket here since the socket was not
+        * created by userspace.
+        */
+       if (sock->file == NULL)
+               err = inet_release(sock);
 
+       l2tp_tunnel_sock_put(sk);
+out:
        return err;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
index 56d583e083a7baf7b0cf707a76882a88701d8025..e62204cad4fe00c91e88f791c7c7896c130249fa 100644 (file)
@@ -188,7 +188,8 @@ struct l2tp_tunnel {
        int (*recv_payload_hook)(struct sk_buff *skb);
        void (*old_sk_destruct)(struct sock *);
        struct sock             *sock;          /* Parent socket */
-       int                     fd;
+       int                     fd;             /* Parent fd, if tunnel socket
+                                                * was created by userspace */
 
        uint8_t                 priv[0];        /* private data */
 };
@@ -228,6 +229,8 @@ out:
        return tunnel;
 }
 
+extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+extern void l2tp_tunnel_sock_put(struct sock *sk);
 extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
 extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
 extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
index 927547171bc7119f57a7b8afecf453ec5eeabf61..8ee4a86ae996ca624e06a1422e4e07a2e957c584 100644 (file)
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                            struct msghdr *msg, size_t len, int noblock,
                            int flags, int *addr_len)
 {
-       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                        lsa->l2tp_scope_id = IP6CB(skb)->iif;
        }
 
-       if (inet->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+       if (np->rxopt.all)
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        if (flags & MSG_TRUNC)
                copied = skb->len;
index 286366ef8930a08981f04952d4459f2b9a413071..716605c241f482e4793e86bdb1dc61d1c3596602 100644 (file)
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        struct l2tp_session *session;
        struct l2tp_tunnel *tunnel;
        struct pppol2tp_session *ps;
-       int old_headroom;
-       int new_headroom;
        int uhlen, headroom;
 
        if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (tunnel == NULL)
                goto abort_put_sess;
 
-       old_headroom = skb_headroom(skb);
        uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
        headroom = NET_SKB_PAD +
                   sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (skb_cow_head(skb, headroom))
                goto abort_put_sess_tun;
 
-       new_headroom = skb_headroom(skb);
-       skb->truesize += new_headroom - old_headroom;
-
        /* Setup PPP header */
        __skb_push(skb, sizeof(ppph));
        skb->data[0] = ppph[0];
index a9327e2e48ce8450212feef7085f91e446766faa..670cbc3518ded5930f900eaca7e0677835cf750b 100644 (file)
 /* Must be called with rcu_read_lock. */
 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 {
-       if (unlikely(!vport)) {
-               kfree_skb(skb);
-               return;
-       }
+       if (unlikely(!vport))
+               goto error;
+
+       if (unlikely(skb_warn_if_lro(skb)))
+               goto error;
 
        /* Make our own copy of the packet.  Otherwise we will mangle the
         * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 
        skb_push(skb, ETH_HLEN);
        ovs_vport_receive(vport, skb);
+       return;
+
+error:
+       kfree_skb(skb);
 }
 
 /* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
                goto error;
        }
 
-       if (unlikely(skb_warn_if_lro(skb)))
-               goto error;
-
        skb->dev = netdev_vport->dev;
        len = skb->len;
        dev_queue_xmit(skb);
index e639645e8fec8ca365c8fc1bc4fb3650db04b5f4..c111bd0e083a5576d86dfc8f9ad91ab320c46efa 100644 (file)
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
 
        packet_flush_mclist(sk);
 
-       memset(&req_u, 0, sizeof(req_u));
-
-       if (po->rx_ring.pg_vec)
+       if (po->rx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 0);
+       }
 
-       if (po->tx_ring.pg_vec)
+       if (po->tx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 1);
+       }
 
        fanout_release(sk);
 
index 298c0ddfb57e3e5625a69d412c548d198005fd3b..3d2acc7a9c8099e677412276eeafe5a8b3f90fcb 100644 (file)
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                if (q->rate) {
                        struct sk_buff_head *list = &sch->q;
 
-                       delay += packet_len_2_sched_time(skb->len, q);
-
                        if (!skb_queue_empty(list)) {
                                /*
-                                * Last packet in queue is reference point (now).
-                                * First packet in queue is already in flight,
-                                * calculate this time bonus and substract
+                                * Last packet in queue is reference point (now),
+                                * calculate this time bonus and subtract
                                 * from delay.
                                 */
-                               delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+                               delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
+                               delay = max_t(psched_tdiff_t, 0, delay);
                                now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
                        }
+
+                       delay += packet_len_2_sched_time(skb->len, q);
                }
 
                cb->time_to_send = now + delay;
index 159b9bc5d63300e53560cf6495f8f65b9fd06449..d8420ae614dcaa2b12dfbe5cef78a39c7bfda8ca 100644 (file)
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
                return;
 
        if (atomic_dec_and_test(&key->refcnt)) {
-               kfree(key);
+               kzfree(key);
                SCTP_DBG_OBJCNT_DEC(keys);
        }
 }
index 17a001bac2cc3c81ab052c2b603b02091bd924e0..1a9c5fb77310a22f609d07918ed5629c0d7cc7d7 100644 (file)
@@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 /* Final destructor for endpoint.  */
 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
 {
+       int i;
+
        SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
 
        /* Free up the HMAC transform. */
@@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
        sctp_inq_free(&ep->base.inqueue);
        sctp_bind_addr_free(&ep->base.bind_addr);
 
+       for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
+               memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
+
        /* Remove and free the port */
        if (sctp_sk(ep->base.sk)->bind_hash)
                sctp_put_port(ep->base.sk);
index 9e65758cb03814f9372eafbf86c4821af124be00..cedd9bf67b8c5942aa0cbf5441342be62abd5e69 100644 (file)
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
 
        ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
 out:
-       kfree(authkey);
+       kzfree(authkey);
        return ret;
 }
 
index 0a148c9d2a5ce7b3915df8c10f5e5f0913b99c25..0f679df7d072794094ece1aa48988e834395d849 100644 (file)
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
 }
 
 /*
- * See net/ipv6/datagram.c : datagram_recv_ctl
+ * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
  */
 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
                                     struct cmsghdr *cmh)
index 01592d7d4789e389fd565e7cf9fb194dc7ee7bb0..45f1618c8e239c2db21692cfa5358f460918b76d 100644 (file)
@@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                                                  &iwe, IW_EV_UINT_LEN);
        }
 
-       buf = kmalloc(30, GFP_ATOMIC);
+       buf = kmalloc(31, GFP_ATOMIC);
        if (buf) {
                memset(&iwe, 0, sizeof(iwe));
                iwe.cmd = IWEVCUSTOM;