Merge remote-tracking branch 'origin/master' into next
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 24 Apr 2013 04:43:36 +0000 (14:43 +1000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 24 Apr 2013 04:43:36 +0000 (14:43 +1000)
Merge upstream to get the audit fixes

192 files changed:
Documentation/kernel-parameters.txt
Makefile
arch/arm/include/asm/glue-cache.h
arch/arm/include/asm/hardware/iop3xx.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/tlbflush.h
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/sched_clock.c
arch/arm/kernel/setup.c
arch/arm/kernel/tcm.c
arch/arm/kernel/tcm.h [deleted file]
arch/arm/kvm/arm.c
arch/arm/kvm/coproc.c
arch/arm/mach-highbank/hotplug.c
arch/arm/mm/Kconfig
arch/arm/mm/Makefile
arch/arm/mm/cache-feroceon-l2.c
arch/arm/mm/cache-v3.S [deleted file]
arch/arm/mm/cache-v4.S
arch/arm/mm/mmu.c
arch/arm/mm/proc-arm740.S
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-mohawk.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-syms.c
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xscale.S
arch/arm/mm/tcm.h [new file with mode: 0644]
arch/avr32/include/asm/io.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/tm.S
arch/powerpc/kvm/e500.h
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/kvm/e500mc.c
arch/s390/include/asm/io.h
arch/s390/include/asm/pgtable.h
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/cputime.h [deleted file]
arch/sparc/include/asm/emergency-restart.h [deleted file]
arch/sparc/include/asm/mutex.h [deleted file]
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/serial.h [deleted file]
arch/sparc/include/asm/smp_32.h
arch/sparc/include/asm/switch_to_64.h
arch/sparc/include/asm/tlbflush_64.h
arch/sparc/include/uapi/asm/Kbuild
arch/sparc/include/uapi/asm/types.h [deleted file]
arch/sparc/kernel/smp_64.c
arch/sparc/lib/bitext.c
arch/sparc/mm/iommu.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/tlb.c
arch/sparc/mm/tsb.c
arch/sparc/mm/ultra.S
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/include/asm/efi.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/microcode_core_early.c
arch/x86/kernel/setup.c
arch/x86/platform/efi/efi.c
block/blk-core.c
drivers/block/rbd.c
drivers/char/hpet.c
drivers/dma/at_hdmac.c
drivers/firmware/Kconfig
drivers/firmware/efivars.c
drivers/idle/intel_idle.c
drivers/input/tablet/wacom_wac.c
drivers/irqchip/irq-gic.c
drivers/md/dm.c
drivers/md/raid5.c
drivers/mtd/mtdchar.c
drivers/net/bonding/bond_main.c
drivers/net/can/mcp251x.c
drivers/net/can/sja1000/sja1000_of_platform.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/tun.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/sbus/char/bbc_i2c.c
drivers/ssb/driver_chipcommon_pmu.c
drivers/video/fbmem.c
drivers/video/mmp/core.c
fs/binfmt_elf.c
fs/bio.c
fs/hfsplus/extents.c
fs/hugetlbfs/inode.c
fs/proc/array.c
include/linux/blktrace_api.h
include/linux/efi.h
include/linux/kexec.h
include/linux/mm.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/sched.h
include/linux/ssb/ssb_driver_chipcommon.h
include/linux/swiotlb.h
include/linux/ucs2_string.h [new file with mode: 0644]
include/net/addrconf.h
include/net/irda/irlmp.h
include/net/scm.h
include/trace/events/block.h
include/trace/events/sched.h
include/uapi/linux/fuse.h
kernel/events/core.c
kernel/hrtimer.c
kernel/kexec.c
kernel/kprobes.c
kernel/kthread.c
kernel/signal.c
kernel/smpboot.c
kernel/trace/blktrace.c
kernel/user_namespace.c
lib/Kconfig
lib/Makefile
lib/swiotlb.c
lib/ucs2_string.c [new file with mode: 0644]
mm/hugetlb.c
mm/memory.c
mm/vmscan.c
net/802/mrp.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/routing.c
net/batman-adv/translation-table.c
net/batman-adv/vis.c
net/bridge/br_if.c
net/bridge/br_private.h
net/bridge/br_stp_if.c
net/core/dev.c
net/ipv4/esp4.c
net/ipv4/ip_fragment.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/syncookies.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/reassembly.c
net/irda/iriap.c
net/irda/irlmp.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_nat_core.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/sched/cls_fw.c
scripts/checkpatch.pl
sound/core/pcm_native.c
tools/power/x86/turbostat/turbostat.c

index 4609e81..8ccbf27 100644 (file)
@@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        is selected automatically. Check
                        Documentation/kdump/kdump.txt for further details.
 
-       crashkernel_low=size[KMG]
-                       [KNL, x86] parts under 4G.
-
        crashkernel=range1:size1[,range2:size2,...][@offset]
                        [KNL] Same as above, but depends on the memory
                        in the running system. The syntax of range is
@@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        a memory unit (amount[KMG]). See also
                        Documentation/kdump/kdump.txt for an example.
 
+       crashkernel=size[KMG],high
+                       [KNL, x86_64] range could be above 4G. Allow kernel
+                       to allocate physical memory region from top, so could
+                       be above 4G if system have more than 4G ram installed.
+                       Otherwise memory region will be allocated below 4G, if
+                       available.
+                       It will be ignored if crashkernel=X is specified.
+       crashkernel=size[KMG],low
+                       [KNL, x86_64] range under 4G. When crashkernel=X,high
+                       is passed, kernel could allocate physical memory region
+                       above 4G, that cause second kernel crash on system
+                       that require some amount of low memory, e.g. swiotlb
+                       requires at least 64M+32K low memory.  Kernel would
+                       try to allocate 72M below 4G automatically.
+                       This one let user to specify own low range under 4G
+                       for second kernel instead.
+                       0: to disable low allocation.
+                       It will be ignored when crashkernel=X,high is not used
+                       or memory reserved is below 4G.
+
        cs89x0_dma=     [HW,NET]
                        Format: <dma>
 
@@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        edd=            [EDD]
                        Format: {"off" | "on" | "skip[mbr]"}
 
+       efi_no_storage_paranoia [EFI; X86]
+                       Using this parameter you can use more than 50% of
+                       your efi variable storage. Use this parameter only if
+                       you are really sure that your UEFI does sane gc and
+                       fulfills the spec otherwise your board may brick.
+
        eisa_irq_edge=  [PARISC,HW]
                        See header of drivers/parisc/eisa.c.
 
index 9cf6783..46263d8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
@@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)
 # Carefully list dependencies so we do not try to build scripts twice
 # in parallel
 PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
+        asm-generic
        $(Q)$(MAKE) $(build)=$(@)
 
 # Objects we will link into vmlinux / subdirs we need to visit
index cca9f15..ea289e1 100644 (file)
 #undef _CACHE
 #undef MULTI_CACHE
 
-#if defined(CONFIG_CPU_CACHE_V3)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE v3
-# endif
-#endif
-
 #if defined(CONFIG_CPU_CACHE_V4)
 # ifdef _CACHE
 #  define MULTI_CACHE 1
index 02fe2fb..ed94b1a 100644 (file)
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
  * IOP3XX processor registers
  */
 #define IOP3XX_PERIPHERAL_PHYS_BASE    0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE    0xfeffe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE    0xfedfe000
 #define IOP3XX_PERIPHERAL_SIZE         0x00002000
 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
                                        IOP3XX_PERIPHERAL_SIZE - 1)
index 6ef8afd..86b8fe3 100644 (file)
 #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
 #define L_PTE_S2_MT_WRITEBACK   (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
 #define L_PTE_S2_RDONLY                 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */
-#define L_PTE_S2_RDWR           (_AT(pteval_t, 2) << 6)   /* HAP[2:1] */
+#define L_PTE_S2_RDWR           (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
 /*
  * Hyp-mode PL2 PTE definitions for LPAE.
index 9e9c041..ab865e6 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <asm/glue.h>
 
-#define TLB_V3_PAGE    (1 << 0)
 #define TLB_V4_U_PAGE  (1 << 1)
 #define TLB_V4_D_PAGE  (1 << 2)
 #define TLB_V4_I_PAGE  (1 << 3)
@@ -22,7 +21,6 @@
 #define TLB_V6_D_PAGE  (1 << 5)
 #define TLB_V6_I_PAGE  (1 << 6)
 
-#define TLB_V3_FULL    (1 << 8)
 #define TLB_V4_U_FULL  (1 << 9)
 #define TLB_V4_D_FULL  (1 << 10)
 #define TLB_V4_I_FULL  (1 << 11)
@@ -52,7 +50,6 @@
  *     =============
  *
  *     We have the following to choose from:
- *       v3    - ARMv3
  *       v4    - ARMv4 without write buffer
  *       v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
  *       v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
        tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
        tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
        tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+       if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
                if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
-                       tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
                        tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
                        tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
                        tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+       if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
            cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
-               tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
                tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
                tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
                tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
        tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
        tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
        tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
index 5dc1aa6..1fd749e 100644 (file)
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+static struct notifier_block dbg_cpu_pm_nb = {
        .notifier_call = dbg_cpu_pm_notify,
 };
 
index 146157d..8c3094d 100644 (file)
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        struct pmu *leader_pmu = event->group_leader->pmu;
 
-       if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+               return 1;
+
+       if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
                return 1;
 
        return armpmu->get_event_idx(hw_events, event) >= 0;
index bd6f56b..59d2adb 100644 (file)
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
 
 static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
 
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
 {
        return (cyc * mult) >> shift;
 }
 
-static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
 {
        u64 epoch_ns;
        u32 epoch_cyc;
index d343a6c..234e339 100644 (file)
@@ -56,7 +56,6 @@
 #include <asm/virt.h>
 
 #include "atags.h"
-#include "tcm.h"
 
 
 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
@@ -798,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
 
        reserve_crashkernel();
 
-       tcm_init();
-
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
index 30ae6bb..f50f19e 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/mach/map.h>
 #include <asm/memory.h>
 #include <asm/system_info.h>
-#include "tcm.h"
 
 static struct gen_pool *tcm_pool;
 static bool dtcm_present;
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/kernel/tcm.h
deleted file mode 100644 (file)
index 8015ad4..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2008-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * TCM memory handling for ARM systems
- *
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Rickard Andersson <rickard.andersson@stericsson.com>
- */
-
-#ifdef CONFIG_HAVE_TCM
-void __init tcm_init(void);
-#else
-/* No TCM support, just blank inlines to be optimized out */
-inline void tcm_init(void)
-{
-}
-#endif
index 5a93698..c1fe498 100644 (file)
@@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext)
                break;
        case KVM_CAP_ARM_SET_DEVICE_ADDR:
                r = 1;
+               break;
        case KVM_CAP_NR_VCPUS:
                r = num_online_cpus();
                break;
index 4ea9a98..7bed755 100644 (file)
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
        u32 val;
        int cpu;
 
-       cpu = get_cpu();
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
+       cpu = get_cpu();
+
        cpumask_setall(&vcpu->arch.require_dcache_flush);
        cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
 
index f30c528..890cae2 100644 (file)
@@ -28,13 +28,11 @@ extern void secondary_startup(void);
  */
 void __ref highbank_cpu_die(unsigned int cpu)
 {
-       flush_cache_all();
-
        highbank_set_cpu_jump(cpu, phys_to_virt(0));
-       highbank_set_core_pwr();
 
-       cpu_do_idle();
+       flush_cache_louis();
+       highbank_set_core_pwr();
 
-       /* We should never return from idle */
-       panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
+       while (1)
+               cpu_do_idle();
 }
index 025d173..4045c49 100644 (file)
@@ -43,7 +43,7 @@ config CPU_ARM740T
        depends on !MMU
        select CPU_32v4T
        select CPU_ABRT_LV4T
-       select CPU_CACHE_V3     # although the core is v4t
+       select CPU_CACHE_V4
        select CPU_CP15_MPU
        select CPU_PABRT_LEGACY
        help
@@ -469,9 +469,6 @@ config CPU_PABRT_V7
        bool
 
 # The cache model
-config CPU_CACHE_V3
-       bool
-
 config CPU_CACHE_V4
        bool
 
index 4e333fa..9e51be9 100644 (file)
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY)        += pabort-legacy.o
 obj-$(CONFIG_CPU_PABRT_V6)     += pabort-v6.o
 obj-$(CONFIG_CPU_PABRT_V7)     += pabort-v7.o
 
-obj-$(CONFIG_CPU_CACHE_V3)     += cache-v3.o
 obj-$(CONFIG_CPU_CACHE_V4)     += cache-v4.o
 obj-$(CONFIG_CPU_CACHE_V4WT)   += cache-v4wt.o
 obj-$(CONFIG_CPU_CACHE_V4WB)   += cache-v4wb.o
index dd3d591..48bc3c0 100644 (file)
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
        outer_cache.inv_range = feroceon_l2_inv_range;
        outer_cache.clean_range = feroceon_l2_clean_range;
        outer_cache.flush_range = feroceon_l2_flush_range;
+       outer_cache.inv_all = l2_inv_all;
 
        enable_l2();
 
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644 (file)
index 8a3fade..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- *  linux/arch/arm/mm/cache-v3.S
- *
- *  Copyright (C) 1997-2002 Russell king
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include "proc-macros.S"
-
-/*
- *     flush_icache_all()
- *
- *     Unconditionally clean and invalidate the entire icache.
- */
-ENTRY(v3_flush_icache_all)
-       mov     pc, lr
-ENDPROC(v3_flush_icache_all)
-
-/*
- *     flush_user_cache_all()
- *
- *     Invalidate all cache entries in a particular address
- *     space.
- *
- *     - mm    - mm_struct describing address space
- */
-ENTRY(v3_flush_user_cache_all)
-       /* FALLTHROUGH */
-/*
- *     flush_kern_cache_all()
- *
- *     Clean and invalidate the entire cache.
- */
-ENTRY(v3_flush_kern_cache_all)
-       /* FALLTHROUGH */
-
-/*
- *     flush_user_cache_range(start, end, flags)
- *
- *     Invalidate a range of cache entries in the specified
- *     address space.
- *
- *     - start - start address (may not be aligned)
- *     - end   - end address (exclusive, may not be aligned)
- *     - flags - vma_area_struct flags describing address space
- */
-ENTRY(v3_flush_user_cache_range)
-       mov     ip, #0
-       mcreq   p15, 0, ip, c7, c0, 0           @ flush ID cache
-       mov     pc, lr
-
-/*
- *     coherent_kern_range(start, end)
- *
- *     Ensure coherency between the Icache and the Dcache in the
- *     region described by start.  If you have non-snooping
- *     Harvard caches, you need to implement this function.
- *
- *     - start  - virtual start address
- *     - end    - virtual end address
- */
-ENTRY(v3_coherent_kern_range)
-       /* FALLTHROUGH */
-
-/*
- *     coherent_user_range(start, end)
- *
- *     Ensure coherency between the Icache and the Dcache in the
- *     region described by start.  If you have non-snooping
- *     Harvard caches, you need to implement this function.
- *
- *     - start  - virtual start address
- *     - end    - virtual end address
- */
-ENTRY(v3_coherent_user_range)
-       mov     r0, #0
-       mov     pc, lr
-
-/*
- *     flush_kern_dcache_area(void *page, size_t size)
- *
- *     Ensure no D cache aliasing occurs, either with itself or
- *     the I cache
- *
- *     - addr  - kernel address
- *     - size  - region size
- */
-ENTRY(v3_flush_kern_dcache_area)
-       /* FALLTHROUGH */
-
-/*
- *     dma_flush_range(start, end)
- *
- *     Clean and invalidate the specified virtual address range.
- *
- *     - start  - virtual start address
- *     - end    - virtual end address
- */
-ENTRY(v3_dma_flush_range)
-       mov     r0, #0
-       mcr     p15, 0, r0, c7, c0, 0           @ flush ID cache
-       mov     pc, lr
-
-/*
- *     dma_unmap_area(start, size, dir)
- *     - start - kernel virtual start address
- *     - size  - size of region
- *     - dir   - DMA direction
- */
-ENTRY(v3_dma_unmap_area)
-       teq     r2, #DMA_TO_DEVICE
-       bne     v3_dma_flush_range
-       /* FALLTHROUGH */
-
-/*
- *     dma_map_area(start, size, dir)
- *     - start - kernel virtual start address
- *     - size  - size of region
- *     - dir   - DMA direction
- */
-ENTRY(v3_dma_map_area)
-       mov     pc, lr
-ENDPROC(v3_dma_unmap_area)
-ENDPROC(v3_dma_map_area)
-
-       .globl  v3_flush_kern_cache_louis
-       .equ    v3_flush_kern_cache_louis, v3_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v3
index 43e5d77..a7ba68f 100644 (file)
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
 ENTRY(v4_flush_user_cache_range)
 #ifdef CONFIG_CPU_CP15
        mov     ip, #0
-       mcreq   p15, 0, ip, c7, c7, 0           @ flush ID cache
+       mcr     p15, 0, ip, c7, c7, 0           @ flush ID cache
        mov     pc, lr
 #else
        /* FALLTHROUGH */
index 7897894..a84ff76 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/mach/pci.h>
 
 #include "mm.h"
+#include "tcm.h"
 
 /*
  * empty_zero_page is a special page that is used for
@@ -1277,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
        dma_contiguous_remap();
        devicemaps_init(mdesc);
        kmap_init();
+       tcm_init();
 
        top_pmd = pmd_off_k(0xffff0000);
 
index dc5de5d..fde2d2a 100644 (file)
@@ -77,24 +77,27 @@ __arm740_setup:
        mcr     p15, 0, r0, c6, c0              @ set area 0, default
 
        ldr     r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
-       ldr     r1, =(CONFIG_DRAM_SIZE >> 12)   @ size of RAM (must be >= 4KB)
-       mov     r2, #10                         @ 11 is the minimum (4KB)
-1:     add     r2, r2, #1                      @ area size *= 2
-       mov     r1, r1, lsr #1
+       ldr     r3, =(CONFIG_DRAM_SIZE >> 12)   @ size of RAM (must be >= 4KB)
+       mov     r4, #10                         @ 11 is the minimum (4KB)
+1:     add     r4, r4, #1                      @ area size *= 2
+       movs    r3, r3, lsr #1
        bne     1b                              @ count not zero r-shift
-       orr     r0, r0, r2, lsl #1              @ the area register value
+       orr     r0, r0, r4, lsl #1              @ the area register value
        orr     r0, r0, #1                      @ set enable bit
        mcr     p15, 0, r0, c6, c1              @ set area 1, RAM
 
        ldr     r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
-       ldr     r1, =(CONFIG_FLASH_SIZE >> 12)  @ size of FLASH (must be >= 4KB)
-       mov     r2, #10                         @ 11 is the minimum (4KB)
-1:     add     r2, r2, #1                      @ area size *= 2
-       mov     r1, r1, lsr #1
+       ldr     r3, =(CONFIG_FLASH_SIZE >> 12)  @ size of FLASH (must be >= 4KB)
+       cmp     r3, #0
+       moveq   r0, #0
+       beq     2f
+       mov     r4, #10                         @ 11 is the minimum (4KB)
+1:     add     r4, r4, #1                      @ area size *= 2
+       movs    r3, r3, lsr #1
        bne     1b                              @ count not zero r-shift
-       orr     r0, r0, r2, lsl #1              @ the area register value
+       orr     r0, r0, r4, lsl #1              @ the area register value
        orr     r0, r0, #1                      @ set enable bit
-       mcr     p15, 0, r0, c6, c2              @ set area 2, ROM/FLASH
+2:     mcr     p15, 0, r0, c6, c2              @ set area 2, ROM/FLASH
 
        mov     r0, #0x06
        mcr     p15, 0, r0, c2, c0              @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
        .long   0x41807400
        .long   0xfffffff0
        .long   0
+       .long   0
        b       __arm740_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
-       .long   HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+       .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
        .long   cpu_arm740_name
        .long   arm740_processor_functions
        .long   0
        .long   0
-       .long   v3_cache_fns                    @ cache model
+       .long   v4_cache_fns                    @ cache model
        .size   __arm740_proc_info, . - __arm740_proc_info
index 2c3b942..2556cf1 100644 (file)
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm920_suspend_size
 .equ   cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_arm920_do_suspend)
        stmfd   sp!, {r4 - r6, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index f1803f7..344c8a5 100644 (file)
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm926_suspend_size
 .equ   cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_arm926_do_suspend)
        stmfd   sp!, {r4 - r6, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index 82f9cdc..0b60dd3 100644 (file)
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
 
 .globl cpu_mohawk_suspend_size
 .equ   cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_mohawk_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index 3aa0da1..d92dfd0 100644 (file)
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
 
 .globl cpu_sa1100_suspend_size
 .equ   cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_sa1100_do_suspend)
        stmfd   sp!, {r4 - r6, lr}
        mrc     p15, 0, r4, c3, c0, 0           @ domain ID
index 3e6210b..054b491 100644 (file)
@@ -17,7 +17,9 @@
 
 #ifndef MULTI_CPU
 EXPORT_SYMBOL(cpu_dcache_clean_area);
+#ifdef CONFIG_MMU
 EXPORT_SYMBOL(cpu_set_pte_ext);
+#endif
 #else
 EXPORT_SYMBOL(processor);
 #endif
index bcaaa8d..5c07ee4 100644 (file)
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
 .globl cpu_v6_suspend_size
 .equ   cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_v6_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
index eb93d64..e8efd83 100644 (file)
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
 
 .globl cpu_xsc3_suspend_size
 .equ   cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_xsc3_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index 2551036..e766f88 100644 (file)
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
 
 .globl cpu_xscale_suspend_size
 .equ   cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_xscale_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
new file mode 100644 (file)
index 0000000..8015ad4
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#ifdef CONFIG_HAVE_TCM
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+inline void tcm_init(void)
+{
+}
+#endif
index cf60d0a..fc6483f 100644 (file)
@@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
 #define readw_be                       __raw_readw
 #define readl_be                       __raw_readl
 
+#define writeb_relaxed                 writeb
+#define writew_relaxed                 writew
+#define writel_relaxed                 writel
+
 #define writeb_be                      __raw_writeb
 #define writew_be                      __raw_writew
 #define writel_be                      __raw_writel
index 256c5bf..04d69c4 100644 (file)
@@ -304,7 +304,7 @@ syscall_exit_work:
        subi    r12,r12,TI_FLAGS
 
 4:     /* Anything else left to do? */
-       SET_DEFAULT_THREAD_PPR(r3, r9)          /* Set thread.ppr = 3 */
+       SET_DEFAULT_THREAD_PPR(r3, r10)         /* Set thread.ppr = 3 */
        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
        beq     .ret_from_except_lite
 
@@ -657,7 +657,7 @@ resume_kernel:
        /* Clear _TIF_EMULATE_STACK_STORE flag */
        lis     r11,_TIF_EMULATE_STACK_STORE@h
        addi    r5,r9,TI_FLAGS
-       ldarx   r4,0,r5
+0:     ldarx   r4,0,r5
        andc    r4,r4,r11
        stdcx.  r4,0,r5
        bne-    0b
index 834805c..c0dea6f 100644 (file)
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
                new->thread.regs->msr |=
                        (MSR_FP | new->thread.fpexc_mode);
        }
+#ifdef CONFIG_ALTIVEC
        if (msr & MSR_VEC) {
                do_load_up_transact_altivec(&new->thread);
                new->thread.regs->msr |= MSR_VEC;
        }
+#endif
        /* We may as well turn on VSX too since all the state is restored now */
        if (msr & MSR_VSX)
                new->thread.regs->msr |= MSR_VSX;
index 3acb28e..95068bf 100644 (file)
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
                do_load_up_transact_fpu(&current->thread);
                regs->msr |= (MSR_FP | current->thread.fpexc_mode);
        }
+#ifdef CONFIG_ALTIVEC
        if (msr & MSR_VEC) {
                do_load_up_transact_altivec(&current->thread);
                regs->msr |= MSR_VEC;
        }
+#endif
 
        return 0;
 }
index 995f854..c179428 100644 (file)
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                do_load_up_transact_fpu(&current->thread);
                regs->msr |= (MSR_FP | current->thread.fpexc_mode);
        }
+#ifdef CONFIG_ALTIVEC
        if (msr & MSR_VEC) {
                do_load_up_transact_altivec(&current->thread);
                regs->msr |= MSR_VEC;
        }
+#endif
 
        return err;
 }
index 84dbace..2da67e7 100644 (file)
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
        or      r5, r6, r5                      /* Set MSR.FP+.VSX/.VEC */
        mtmsr   r5
 
+#ifdef CONFIG_ALTIVEC
        /* FP and VEC registers:  These are recheckpointed from thread.fpr[]
         * and thread.vr[] respectively.  The thread.transact_fpr[] version
         * is more modern, and will be loaded subsequently by any FPUnavailable
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
        REST_32VRS(0, r5, r3)                   /* r5 scratch, r3 THREAD ptr */
        ld      r5, THREAD_VRSAVE(r3)
        mtspr   SPRN_VRSAVE, r5
+#endif
 
 dont_restore_vec:
        andi.   r0, r4, MSR_FP
index 41cefd4..33db48a 100644 (file)
 #define E500_PID_NUM   3
 #define E500_TLB_NUM   2
 
-#define E500_TLB_VALID 1
-#define E500_TLB_BITMAP 2
+/* entry is mapped somewhere in host TLB */
+#define E500_TLB_VALID         (1 << 0)
+/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
+#define E500_TLB_BITMAP                (1 << 1)
+/* TLB1 entry is mapped by host TLB0 */
 #define E500_TLB_TLB0          (1 << 2)
 
 struct tlbe_ref {
-       pfn_t pfn;
-       unsigned int flags; /* E500_TLB_* */
+       pfn_t pfn;              /* valid only for TLB0, except briefly */
+       unsigned int flags;     /* E500_TLB_* */
 };
 
 struct tlbe_priv {
-       struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+       struct tlbe_ref ref;
 };
 
 #ifdef CONFIG_KVM_E500V2
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
 
        unsigned int gtlb_nv[E500_TLB_NUM];
 
-       /*
-        * information associated with each host TLB entry --
-        * TLB1 only for now.  If/when guest TLB1 entries can be
-        * mapped with host TLB0, this will be used for that too.
-        *
-        * We don't want to use this for guest TLB0 because then we'd
-        * have the overhead of doing the translation again even if
-        * the entry is still in the guest TLB (e.g. we swapped out
-        * and back, and our host TLB entries got evicted).
-        */
-       struct tlbe_ref *tlb_refs[E500_TLB_NUM];
        unsigned int host_tlb1_nv;
 
        u32 svr;
index a222edf..1c6a9d7 100644 (file)
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
 
        /* Don't bother with unmapped entries */
-       if (!(ref->flags & E500_TLB_VALID))
-               return;
+       if (!(ref->flags & E500_TLB_VALID)) {
+               WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
+                    "%s: flags %x\n", __func__, ref->flags);
+               WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
+       }
 
        if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
                u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
                                         pfn_t pfn)
 {
        ref->pfn = pfn;
-       ref->flags = E500_TLB_VALID;
+       ref->flags |= E500_TLB_VALID;
 
        if (tlbe_is_writable(gtlbe))
                kvm_set_pfn_dirty(pfn);
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 {
        if (ref->flags & E500_TLB_VALID) {
+               /* FIXME: don't log bogus pfn for TLB1 */
                trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
                ref->flags = 0;
        }
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
 
 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
-       int tlbsel = 0;
-       int i;
-
-       for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
-               struct tlbe_ref *ref =
-                       &vcpu_e500->gtlb_priv[tlbsel][i].ref;
-               kvmppc_e500_ref_release(ref);
-       }
-}
-
-static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
-       int stlbsel = 1;
+       int tlbsel;
        int i;
 
-       kvmppc_e500_tlbil_all(vcpu_e500);
-
-       for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
-               struct tlbe_ref *ref =
-                       &vcpu_e500->tlb_refs[stlbsel][i];
-               kvmppc_e500_ref_release(ref);
+       for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
+               for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+                       struct tlbe_ref *ref =
+                               &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+                       kvmppc_e500_ref_release(ref);
+               }
        }
-
-       clear_tlb_privs(vcpu_e500);
 }
 
 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-       clear_tlb_refs(vcpu_e500);
+       kvmppc_e500_tlbil_all(vcpu_e500);
+       clear_tlb_privs(vcpu_e500);
        clear_tlb1_bitmap(vcpu_e500);
 }
 
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
        }
 
-       /* Drop old ref and setup new one. */
-       kvmppc_e500_ref_release(ref);
        kvmppc_e500_ref_setup(ref, gtlbe, pfn);
 
        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
        if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
                vcpu_e500->host_tlb1_nv = 0;
 
-       vcpu_e500->tlb_refs[1][sesel] = *ref;
-       vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
-       vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
        if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
-               unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
+               unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
                vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
        }
-       vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
+
+       vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+       vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+       vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
+       WARN_ON(!(ref->flags & E500_TLB_VALID));
 
        return sesel;
 }
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
                struct kvm_book3e_206_tlb_entry *stlbe, int esel)
 {
-       struct tlbe_ref ref;
+       struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
        int sesel;
        int r;
 
-       ref.flags = 0;
        r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
-                                  &ref);
+                                  ref);
        if (r)
                return r;
 
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        }
 
        /* Otherwise map into TLB1 */
-       sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
+       sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
        write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
 
        return 0;
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
        case 0:
                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
-               /* Triggers after clear_tlb_refs or on initial mapping */
+               /* Triggers after clear_tlb_privs or on initial mapping */
                if (!(priv->ref.flags & E500_TLB_VALID)) {
                        kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
                } else {
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
                host_tlb_params[0].entries / host_tlb_params[0].ways;
        host_tlb_params[1].sets = 1;
 
-       vcpu_e500->tlb_refs[0] =
-               kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
-                       GFP_KERNEL);
-       if (!vcpu_e500->tlb_refs[0])
-               goto err;
-
-       vcpu_e500->tlb_refs[1] =
-               kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
-                       GFP_KERNEL);
-       if (!vcpu_e500->tlb_refs[1])
-               goto err;
-
        vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
                                           host_tlb_params[1].entries,
                                           GFP_KERNEL);
        if (!vcpu_e500->h2g_tlb1_rmap)
-               goto err;
+               return -EINVAL;
 
        return 0;
-
-err:
-       kfree(vcpu_e500->tlb_refs[0]);
-       kfree(vcpu_e500->tlb_refs[1]);
-       return -EINVAL;
 }
 
 void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        kfree(vcpu_e500->h2g_tlb1_rmap);
-       kfree(vcpu_e500->tlb_refs[0]);
-       kfree(vcpu_e500->tlb_refs[1]);
 }
index 1f89d26..2f4baa0 100644 (file)
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
 {
 }
 
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
+
 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
        mtspr(SPRN_GESR, vcpu->arch.shared->esr);
 
-       if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+       if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
+           __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
                kvmppc_e500_tlbil_all(vcpu_e500);
+               __get_cpu_var(last_vcpu_on_cpu) = vcpu;
+       }
 
        kvmppc_load_guest_fp(vcpu);
 }
index 27cb321..379d96e 100644 (file)
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 #define ioremap_nocache(addr, size)    ioremap(addr, size)
 #define ioremap_wc                     ioremap_nocache
 
-/* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)               \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 {
        return (void __iomem *) offset;
index 4a54431..3cb47cf 100644 (file)
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
         (((unsigned long)(vaddr)) &zero_page_mask))))
 #define __HAVE_COLOR_ZERO_PAGE
 
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)               \
+       remap_pfn_range(vma, vaddr, pfn, size, prot)
+
 #endif /* !__ASSEMBLY__ */
 
 /*
index e26d430..ff18e3c 100644 (file)
@@ -2,11 +2,16 @@
 
 
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += div64.h
+generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += local64.h
+generic-y += mutex.h
 generic-y += irq_regs.h
 generic-y += local.h
 generic-y += module.h
+generic-y += serial.h
 generic-y += trace_clock.h
+generic-y += types.h
 generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h
deleted file mode 100644 (file)
index 1a642b8..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_CPUTIME_H
-#define __SPARC_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SPARC_CPUTIME_H */
diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 108d8c4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h
deleted file mode 100644 (file)
index 458c1f7..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
index 08fcce9..7619f2f 100644 (file)
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 }
 
+#include <asm/tlbflush.h>
 #include <asm-generic/pgtable.h>
 
 /* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h
deleted file mode 100644 (file)
index f90d61c..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_SERIAL_H
-#define __SPARC_SERIAL_H
-
-#define BASE_BAUD ( 1843200 / 16 )
-
-#endif /* __SPARC_SERIAL_H */
index b73da3c..3c8917f 100644 (file)
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
                       unsigned long, unsigned long);
 
 void cpu_panic(void);
-extern void smp4m_irq_rotate(int cpu);
 
 /*
  *     General functions that each host system must provide.
@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
 void sun4d_init_smp(void);
 
 void smp_callin(void);
-void smp_boot_cpus(void);
 void smp_store_cpu_info(int);
 
 void smp_resched_interrupt(void);
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
 
 #define raw_smp_processor_id()         (current_thread_info()->cpu)
 
-#define prof_multiplier(__cpu)         cpu_data(__cpu).multiplier
-#define prof_counter(__cpu)            cpu_data(__cpu).counter
-
 void smp_setup_cpu_possible_map(void);
 
 #endif /* !(__ASSEMBLY__) */
index cad36f5..c7de332 100644 (file)
@@ -18,8 +18,7 @@ do {                                          \
         * and 2 stores in this critical code path.  -DaveM
         */
 #define switch_to(prev, next, last)                                    \
-do {   flush_tlb_pending();                                            \
-       save_and_clear_fpu();                                           \
+do {   save_and_clear_fpu();                                           \
        /* If you are tempted to conditionalize the following */        \
        /* so that ASI is only written if it changes, think again. */   \
        __asm__ __volatile__("wr %%g0, %0, %%asi"                       \
index 2ef4634..f0d6a97 100644 (file)
 struct tlb_batch {
        struct mm_struct *mm;
        unsigned long tlb_nr;
+       unsigned long active;
        unsigned long vaddrs[TLB_BATCH_NR];
 };
 
 extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
 extern void flush_tsb_user(struct tlb_batch *tb);
+extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
 
 /* TLB flush operations. */
 
-extern void flush_tlb_pending(void);
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+                                 unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+}
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 
-#define flush_tlb_range(vma,start,end) \
-       do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_page(vma,addr)       flush_tlb_pending()
-#define flush_tlb_mm(mm)               flush_tlb_pending()
+extern void flush_tlb_pending(void);
+extern void arch_enter_lazy_mmu_mode(void);
+extern void arch_leave_lazy_mmu_mode(void);
+#define arch_flush_lazy_mmu_mode()      do {} while (0)
 
 /* Local cpu only.  */
 extern void __flush_tlb_all(void);
-
+extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
 extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifndef CONFIG_SMP
@@ -38,15 +54,24 @@ do {        flush_tsb_kernel_range(start,end); \
        __flush_tlb_kernel_range(start,end); \
 } while (0)
 
+static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+}
+
 #else /* CONFIG_SMP */
 
 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
 
 #define flush_tlb_kernel_range(start, end) \
 do {   flush_tsb_kernel_range(start,end); \
        smp_flush_tlb_kernel_range(start, end); \
 } while (0)
 
+#define global_flush_tlb_page(mm, vaddr) \
+       smp_flush_tlb_page(mm, vaddr)
+
 #endif /* ! CONFIG_SMP */
 
 #endif /* _SPARC64_TLBFLUSH_H */
index ce175af..b5843ee 100644 (file)
@@ -44,7 +44,6 @@ header-y += swab.h
 header-y += termbits.h
 header-y += termios.h
 header-y += traps.h
-header-y += types.h
 header-y += uctx.h
 header-y += unistd.h
 header-y += utrap.h
diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h
deleted file mode 100644 (file)
index 383d156..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _SPARC_TYPES_H
-#define _SPARC_TYPES_H
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue.  However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-#if defined(__sparc__)
-
-#include <asm-generic/int-ll64.h>
-
-#endif /* defined(__sparc__) */
-
-#endif /* defined(_SPARC_TYPES_H) */
index 537eb66..ca64d2a 100644 (file)
@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
 }
 
 extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_page;
 extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_fetch_glob_regs;
 extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,23 +1074,56 @@ local_flush_and_out:
        put_cpu();
 }
 
+struct tlb_pending_info {
+       unsigned long ctx;
+       unsigned long nr;
+       unsigned long *vaddrs;
+};
+
+static void tlb_pending_func(void *info)
+{
+       struct tlb_pending_info *t = info;
+
+       __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
+}
+
 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
 {
        u32 ctx = CTX_HWBITS(mm->context);
+       struct tlb_pending_info info;
        int cpu = get_cpu();
 
+       info.ctx = ctx;
+       info.nr = nr;
+       info.vaddrs = vaddrs;
+
        if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
                cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
        else
-               smp_cross_call_masked(&xcall_flush_tlb_pending,
-                                     ctx, nr, (unsigned long) vaddrs,
-                                     mm_cpumask(mm));
+               smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+                                      &info, 1);
 
        __flush_tlb_pending(ctx, nr, vaddrs);
 
        put_cpu();
 }
 
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       unsigned long context = CTX_HWBITS(mm->context);
+       int cpu = get_cpu();
+
+       if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+               cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+       else
+               smp_cross_call_masked(&xcall_flush_tlb_page,
+                                     context, vaddr, 0,
+                                     mm_cpumask(mm));
+       __flush_tlb_page(context, vaddr);
+
+       put_cpu();
+}
+
 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        start &= PAGE_MASK;
index 48d00e7..8ec4e9c 100644 (file)
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
 
 void bit_map_init(struct bit_map *t, unsigned long *map, int size)
 {
-
-       if ((size & 07) != 0)
-               BUG();
-       memset(map, 0, size>>3);
-
+       bitmap_zero(map, size);
        memset(t, 0, sizeof *t);
        spin_lock_init(&t->lock);
        t->map = map;
index 0f4f719..28f96f2 100644 (file)
@@ -34,7 +34,7 @@
 #define IOMMU_RNGE     IOMMU_RNGE_256MB
 #define IOMMU_START    0xF0000000
 #define IOMMU_WINSIZE  (256*1024*1024U)
-#define IOMMU_NPTES    (IOMMU_WINSIZE/PAGE_SIZE)       /* 64K PTEs, 265KB */
+#define IOMMU_NPTES    (IOMMU_WINSIZE/PAGE_SIZE)       /* 64K PTEs, 256KB */
 #define IOMMU_ORDER    6                               /* 4096 * (1<<6) */
 
 /* srmmu.c */
index c38bb72..036c279 100644 (file)
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
                SRMMU_NOCACHE_ALIGN_MAX, 0UL);
        memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
 
-       srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
+       srmmu_nocache_bitmap =
+               __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+                               SMP_CACHE_BYTES, 0UL);
        bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
 
        srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
index ba6ae7f..272aa4f 100644 (file)
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
 void flush_tlb_pending(void)
 {
        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+       struct mm_struct *mm = tb->mm;
 
-       if (tb->tlb_nr) {
-               flush_tsb_user(tb);
+       if (!tb->tlb_nr)
+               goto out;
 
-               if (CTX_VALID(tb->mm->context)) {
+       flush_tsb_user(tb);
+
+       if (CTX_VALID(mm->context)) {
+               if (tb->tlb_nr == 1) {
+                       global_flush_tlb_page(mm, tb->vaddrs[0]);
+               } else {
 #ifdef CONFIG_SMP
                        smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
                                              &tb->vaddrs[0]);
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
                                            tb->tlb_nr, &tb->vaddrs[0]);
 #endif
                }
-               tb->tlb_nr = 0;
        }
 
+       tb->tlb_nr = 0;
+
+out:
        put_cpu_var(tlb_batch);
 }
 
+void arch_enter_lazy_mmu_mode(void)
+{
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+       tb->active = 1;
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+       if (tb->tlb_nr)
+               flush_tlb_pending();
+       tb->active = 0;
+}
+
 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
                              bool exec)
 {
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
                nr = 0;
        }
 
+       if (!tb->active) {
+               global_flush_tlb_page(mm, vaddr);
+               flush_tsb_user_page(mm, vaddr);
+               return;
+       }
+
        if (nr == 0)
                tb->mm = mm;
 
index 428982b..2cc3bce 100644 (file)
@@ -7,11 +7,10 @@
 #include <linux/preempt.h>
 #include <linux/slab.h>
 #include <asm/page.h>
-#include <asm/tlbflush.h>
-#include <asm/tlb.h>
-#include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/mmu_context.h>
 #include <asm/tsb.h>
+#include <asm/tlb.h>
 #include <asm/oplib.h>
 
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
        }
 }
 
-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
-                           unsigned long tsb, unsigned long nentries)
+static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
+                                 unsigned long hash_shift,
+                                 unsigned long nentries)
 {
-       unsigned long i;
+       unsigned long tag, ent, hash;
 
-       for (i = 0; i < tb->tlb_nr; i++) {
-               unsigned long v = tb->vaddrs[i];
-               unsigned long tag, ent, hash;
+       v &= ~0x1UL;
+       hash = tsb_hash(v, hash_shift, nentries);
+       ent = tsb + (hash * sizeof(struct tsb));
+       tag = (v >> 22UL);
 
-               v &= ~0x1UL;
+       tsb_flush(ent, tag);
+}
 
-               hash = tsb_hash(v, hash_shift, nentries);
-               ent = tsb + (hash * sizeof(struct tsb));
-               tag = (v >> 22UL);
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+                           unsigned long tsb, unsigned long nentries)
+{
+       unsigned long i;
 
-               tsb_flush(ent, tag);
-       }
+       for (i = 0; i < tb->tlb_nr; i++)
+               __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
 }
 
 void flush_tsb_user(struct tlb_batch *tb)
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
        spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       unsigned long nentries, base, flags;
+
+       spin_lock_irqsave(&mm->context.lock, flags);
+
+       base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+       nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+       if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+               base = __pa(base);
+       __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+               base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+               nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+               if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+                       base = __pa(base);
+               __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
+       }
+#endif
+       spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
 #define HV_PGSZ_IDX_BASE       HV_PGSZ_IDX_8K
 #define HV_PGSZ_MASK_BASE      HV_PGSZ_MASK_8K
 
index f8e13d4..432aa0c 100644 (file)
@@ -52,6 +52,33 @@ __flush_tlb_mm:              /* 18 insns */
        nop
        nop
 
+       .align          32
+       .globl          __flush_tlb_page
+__flush_tlb_page:      /* 22 insns */
+       /* %o0 = context, %o1 = vaddr */
+       rdpr            %pstate, %g7
+       andn            %g7, PSTATE_IE, %g2
+       wrpr            %g2, %pstate
+       mov             SECONDARY_CONTEXT, %o4
+       ldxa            [%o4] ASI_DMMU, %g2
+       stxa            %o0, [%o4] ASI_DMMU
+       andcc           %o1, 1, %g0
+       andn            %o1, 1, %o3
+       be,pn           %icc, 1f
+        or             %o3, 0x10, %o3
+       stxa            %g0, [%o3] ASI_IMMU_DEMAP
+1:     stxa            %g0, [%o3] ASI_DMMU_DEMAP
+       membar          #Sync
+       stxa            %g2, [%o4] ASI_DMMU
+       sethi           %hi(KERNBASE), %o4
+       flush           %o4
+       retl
+        wrpr           %g7, 0x0, %pstate
+       nop
+       nop
+       nop
+       nop
+
        .align          32
        .globl          __flush_tlb_pending
 __flush_tlb_pending:   /* 26 insns */
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
        retl
         wrpr           %g7, 0x0, %pstate
 
+__cheetah_flush_tlb_page:      /* 22 insns */
+       /* %o0 = context, %o1 = vaddr */
+       rdpr            %pstate, %g7
+       andn            %g7, PSTATE_IE, %g2
+       wrpr            %g2, 0x0, %pstate
+       wrpr            %g0, 1, %tl
+       mov             PRIMARY_CONTEXT, %o4
+       ldxa            [%o4] ASI_DMMU, %g2
+       srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %o3
+       sllx            %o3, CTX_PGSZ1_NUC_SHIFT, %o3
+       or              %o0, %o3, %o0   /* Preserve nucleus page size fields */
+       stxa            %o0, [%o4] ASI_DMMU
+       andcc           %o1, 1, %g0
+       be,pn           %icc, 1f
+        andn           %o1, 1, %o3
+       stxa            %g0, [%o3] ASI_IMMU_DEMAP
+1:     stxa            %g0, [%o3] ASI_DMMU_DEMAP       
+       membar          #Sync
+       stxa            %g2, [%o4] ASI_DMMU
+       sethi           %hi(KERNBASE), %o4
+       flush           %o4
+       wrpr            %g0, 0, %tl
+       retl
+        wrpr           %g7, 0x0, %pstate
+
 __cheetah_flush_tlb_pending:   /* 27 insns */
        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
        rdpr            %pstate, %g7
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
        retl
         nop
 
+__hypervisor_flush_tlb_page: /* 11 insns */
+       /* %o0 = context, %o1 = vaddr */
+       mov             %o0, %g2
+       mov             %o1, %o0              /* ARG0: vaddr + IMMU-bit */
+       mov             %g2, %o1              /* ARG1: mmu context */
+       mov             HV_MMU_ALL, %o2       /* ARG2: flags */
+       srlx            %o0, PAGE_SHIFT, %o0
+       sllx            %o0, PAGE_SHIFT, %o0
+       ta              HV_MMU_UNMAP_ADDR_TRAP
+       brnz,pn         %o0, __hypervisor_tlb_tl0_error
+        mov            HV_MMU_UNMAP_ADDR_TRAP, %o1
+       retl
+        nop
+
 __hypervisor_flush_tlb_pending: /* 16 insns */
        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
        sllx            %o1, 3, %g1
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
        call            tlb_patch_one
         mov            19, %o2
 
+       sethi           %hi(__flush_tlb_page), %o0
+       or              %o0, %lo(__flush_tlb_page), %o0
+       sethi           %hi(__cheetah_flush_tlb_page), %o1
+       or              %o1, %lo(__cheetah_flush_tlb_page), %o1
+       call            tlb_patch_one
+        mov            22, %o2
+
        sethi           %hi(__flush_tlb_pending), %o0
        or              %o0, %lo(__flush_tlb_pending), %o0
        sethi           %hi(__cheetah_flush_tlb_pending), %o1
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm:        /* 21 insns */
        nop
        nop
 
-       .globl          xcall_flush_tlb_pending
-xcall_flush_tlb_pending:       /* 21 insns */
-       /* %g5=context, %g1=nr, %g7=vaddrs[] */
-       sllx            %g1, 3, %g1
+       .globl          xcall_flush_tlb_page
+xcall_flush_tlb_page:  /* 17 insns */
+       /* %g5=context, %g1=vaddr */
        mov             PRIMARY_CONTEXT, %g4
        ldxa            [%g4] ASI_DMMU, %g2
        srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending:  /* 21 insns */
        or              %g5, %g4, %g5
        mov             PRIMARY_CONTEXT, %g4
        stxa            %g5, [%g4] ASI_DMMU
-1:     sub             %g1, (1 << 3), %g1
-       ldx             [%g7 + %g1], %g5
-       andcc           %g5, 0x1, %g0
+       andcc           %g1, 0x1, %g0
        be,pn           %icc, 2f
-
-        andn           %g5, 0x1, %g5
+        andn           %g1, 0x1, %g5
        stxa            %g0, [%g5] ASI_IMMU_DEMAP
 2:     stxa            %g0, [%g5] ASI_DMMU_DEMAP
        membar          #Sync
-       brnz,pt         %g1, 1b
-        nop
        stxa            %g2, [%g4] ASI_DMMU
        retry
        nop
+       nop
 
        .globl          xcall_flush_tlb_kernel_range
 xcall_flush_tlb_kernel_range:  /* 25 insns */
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
        membar          #Sync
        retry
 
-       .globl          __hypervisor_xcall_flush_tlb_pending
-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
-       /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
-       sllx            %g1, 3, %g1
+       .globl          __hypervisor_xcall_flush_tlb_page
+__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+       /* %g5=ctx, %g1=vaddr */
        mov             %o0, %g2
        mov             %o1, %g3
        mov             %o2, %g4
-1:     sub             %g1, (1 << 3), %g1
-       ldx             [%g7 + %g1], %o0        /* ARG0: virtual address */
+       mov             %g1, %o0                /* ARG0: virtual address */
        mov             %g5, %o1                /* ARG1: mmu context */
        mov             HV_MMU_ALL, %o2         /* ARG2: flags */
        srlx            %o0, PAGE_SHIFT, %o0
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
        mov             HV_MMU_UNMAP_ADDR_TRAP, %g6
        brnz,a,pn       %o0, __hypervisor_tlb_xcall_error
         mov            %o0, %g5
-       brnz,pt         %g1, 1b
-        nop
        mov             %g2, %o0
        mov             %g3, %o1
        mov             %g4, %o2
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
        call            tlb_patch_one
         mov            10, %o2
 
+       sethi           %hi(__flush_tlb_page), %o0
+       or              %o0, %lo(__flush_tlb_page), %o0
+       sethi           %hi(__hypervisor_flush_tlb_page), %o1
+       or              %o1, %lo(__hypervisor_flush_tlb_page), %o1
+       call            tlb_patch_one
+        mov            11, %o2
+
        sethi           %hi(__flush_tlb_pending), %o0
        or              %o0, %lo(__flush_tlb_pending), %o0
        sethi           %hi(__hypervisor_flush_tlb_pending), %o1
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
        call            tlb_patch_one
         mov            21, %o2
 
-       sethi           %hi(xcall_flush_tlb_pending), %o0
-       or              %o0, %lo(xcall_flush_tlb_pending), %o0
-       sethi           %hi(__hypervisor_xcall_flush_tlb_pending), %o1
-       or              %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
+       sethi           %hi(xcall_flush_tlb_page), %o0
+       or              %o0, %lo(xcall_flush_tlb_page), %o0
+       sethi           %hi(__hypervisor_xcall_flush_tlb_page), %o1
+       or              %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
        call            tlb_patch_one
-        mov            21, %o2
+        mov            17, %o2
 
        sethi           %hi(xcall_flush_tlb_kernel_range), %o0
        or              %o0, %lo(xcall_flush_tlb_kernel_range), %o0
index 70c0f3d..15b5cef 100644 (file)
@@ -1549,6 +1549,7 @@ config X86_SMAP
 config EFI
        bool "EFI runtime service support"
        depends on ACPI
+       select UCS2_STRING
        ---help---
          This enables the kernel to use EFI runtime services that are
          available (such as the EFI variable services).
index c205035..8615f75 100644 (file)
@@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
        *size = len;
 }
 
+static efi_status_t setup_efi_vars(struct boot_params *params)
+{
+       struct setup_data *data;
+       struct efi_var_bootdata *efidata;
+       u64 store_size, remaining_size, var_size;
+       efi_status_t status;
+
+       if (!sys_table->runtime->query_variable_info)
+               return EFI_UNSUPPORTED;
+
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       status = efi_call_phys4(sys_table->runtime->query_variable_info,
+                               EFI_VARIABLE_NON_VOLATILE |
+                               EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                               EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
+                               &remaining_size, &var_size);
+
+       if (status != EFI_SUCCESS)
+               return status;
+
+       status = efi_call_phys3(sys_table->boottime->allocate_pool,
+                               EFI_LOADER_DATA, sizeof(*efidata), &efidata);
+
+       if (status != EFI_SUCCESS)
+               return status;
+
+       efidata->data.type = SETUP_EFI_VARS;
+       efidata->data.len = sizeof(struct efi_var_bootdata) -
+               sizeof(struct setup_data);
+       efidata->data.next = 0;
+       efidata->store_size = store_size;
+       efidata->remaining_size = remaining_size;
+       efidata->max_var_size = var_size;
+
+       if (data)
+               data->next = (unsigned long)efidata;
+       else
+               params->hdr.setup_data = (unsigned long)efidata;
+
+}
+
 static efi_status_t setup_efi_pci(struct boot_params *params)
 {
        efi_pci_io_protocol *pci;
@@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
 
        setup_graphics(boot_params);
 
+       setup_efi_vars(boot_params);
+
        setup_efi_pci(boot_params);
 
        status = efi_call_phys3(sys_table->boottime->allocate_pool,
index 60c89f3..2fb5d58 100644 (file)
@@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
 extern void efi_unmap_memmap(void);
 extern void efi_memory_uc(u64 addr, unsigned long size);
 
+struct efi_var_bootdata {
+       struct setup_data data;
+       u64 store_size;
+       u64 remaining_size;
+       u64 max_var_size;
+};
+
 #ifdef CONFIG_EFI
 
 static inline bool efi_is_native(void)
index c15ddaf..0874424 100644 (file)
@@ -6,6 +6,7 @@
 #define SETUP_E820_EXT                 1
 #define SETUP_DTB                      2
 #define SETUP_PCI                      3
+#define SETUP_EFI_VARS                 4
 
 /* ram_size flags */
 #define RAMDISK_IMAGE_START_MASK       0x07FF
index a7d26d8..8f4be53 100644 (file)
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
        if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return false;
 
-       /*
-        * Xen emulates Hyper-V to support enlightened Windows.
-        * Check to see first if we are on a Xen Hypervisor.
-        */
-       if (xen_cpuid_base())
-               return false;
-
        cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
              &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
 
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
 
        if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
                clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
-#if IS_ENABLED(CONFIG_HYPERV)
-       /*
-        * Setup the IDT for hypervisor callback.
-        */
-       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
-#endif
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
 
 void hv_register_vmbus_handler(int irq, irq_handler_t handler)
 {
+       /*
+        * Setup the IDT for hypervisor callback.
+        */
+       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+
        vmbus_irq = irq;
        vmbus_isr = handler;
 }
index dab7580..cc45deb 100644 (file)
@@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
 };
 
 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
-       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
-       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
+       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
+       EVENT_EXTRA_END
+};
+
+static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
+       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
        EVENT_EXTRA_END
 };
 
@@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_snb_event_constraints;
                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               x86_pmu.extra_regs = intel_snb_extra_regs;
+               if (boot_cpu_data.x86_model == 45)
+                       x86_pmu.extra_regs = intel_snbep_extra_regs;
+               else
+                       x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
@@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_ivb_event_constraints;
                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               x86_pmu.extra_regs = intel_snb_extra_regs;
+               if (boot_cpu_data.x86_model == 62)
+                       x86_pmu.extra_regs = intel_snbep_extra_regs;
+               else
+                       x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
index 577db84..833d51d 100644 (file)
@@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
        u32 eax = 0x00000000;
        u32 ebx, ecx = 0, edx;
 
-       if (!have_cpuid_p())
-               return X86_VENDOR_UNKNOWN;
-
        native_cpuid(&eax, &ebx, &ecx, &edx);
 
        if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
@@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
        return X86_VENDOR_UNKNOWN;
 }
 
+static int __cpuinit x86_family(void)
+{
+       u32 eax = 0x00000001;
+       u32 ebx, ecx = 0, edx;
+       int x86;
+
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+
+       x86 = (eax >> 8) & 0xf;
+       if (x86 == 15)
+               x86 += (eax >> 20) & 0xff;
+
+       return x86;
+}
+
 void __init load_ucode_bsp(void)
 {
-       int vendor = x86_vendor();
+       int vendor, x86;
+
+       if (!have_cpuid_p())
+               return;
 
-       if (vendor == X86_VENDOR_INTEL)
+       vendor = x86_vendor();
+       x86 = x86_family();
+
+       if (vendor == X86_VENDOR_INTEL && x86 >= 6)
                load_ucode_intel_bsp();
 }
 
 void __cpuinit load_ucode_ap(void)
 {
-       int vendor = x86_vendor();
+       int vendor, x86;
+
+       if (!have_cpuid_p())
+               return;
+
+       vendor = x86_vendor();
+       x86 = x86_family();
 
-       if (vendor == X86_VENDOR_INTEL)
+       if (vendor == X86_VENDOR_INTEL && x86 >= 6)
                load_ucode_intel_ap();
 }
index 90d8cc9..fae9134 100644 (file)
@@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
 /*
  * Keep the crash kernel below this limit.  On 32 bits earlier kernels
  * would limit the kernel to the low 512 MiB due to mapping restrictions.
+ * On 64bit, old kexec-tools need to under 896MiB.
  */
 #ifdef CONFIG_X86_32
-# define CRASH_KERNEL_ADDR_MAX (512 << 20)
+# define CRASH_KERNEL_ADDR_LOW_MAX     (512 << 20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX    (512 << 20)
 #else
-# define CRASH_KERNEL_ADDR_MAX MAXMEM
+# define CRASH_KERNEL_ADDR_LOW_MAX     (896UL<<20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX    MAXMEM
 #endif
 
 static void __init reserve_crashkernel_low(void)
@@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void)
        unsigned long long low_base = 0, low_size = 0;
        unsigned long total_low_mem;
        unsigned long long base;
+       bool auto_set = false;
        int ret;
 
        total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
+       /* crashkernel=Y,low */
        ret = parse_crashkernel_low(boot_command_line, total_low_mem,
                                                &low_size, &base);
-       if (ret != 0 || low_size <= 0)
-               return;
+       if (ret != 0) {
+               /*
+                * two parts from lib/swiotlb.c:
+                *      swiotlb size: user specified with swiotlb= or default.
+                *      swiotlb overflow buffer: now is hardcoded to 32k.
+                *              We round it to 8M for other buffers that
+                *              may need to stay low too.
+                */
+               low_size = swiotlb_size_or_default() + (8UL<<20);
+               auto_set = true;
+       } else {
+               /* passed with crashkernel=0,low ? */
+               if (!low_size)
+                       return;
+       }
 
        low_base = memblock_find_in_range(low_size, (1ULL<<32),
                                        low_size, alignment);
 
        if (!low_base) {
-               pr_info("crashkernel low reservation failed - No suitable area found.\n");
+               if (!auto_set)
+                       pr_info("crashkernel low reservation failed - No suitable area found.\n");
 
                return;
        }
@@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void)
        const unsigned long long alignment = 16<<20;    /* 16M */
        unsigned long long total_mem;
        unsigned long long crash_size, crash_base;
+       bool high = false;
        int ret;
 
        total_mem = memblock_phys_mem_size();
 
+       /* crashkernel=XM */
        ret = parse_crashkernel(boot_command_line, total_mem,
                        &crash_size, &crash_base);
-       if (ret != 0 || crash_size <= 0)
-               return;
+       if (ret != 0 || crash_size <= 0) {
+               /* crashkernel=X,high */
+               ret = parse_crashkernel_high(boot_command_line, total_mem,
+                               &crash_size, &crash_base);
+               if (ret != 0 || crash_size <= 0)
+                       return;
+               high = true;
+       }
 
        /* 0 means: find the address automatically */
        if (crash_base <= 0) {
@@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void)
                 *  kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
                 */
                crash_base = memblock_find_in_range(alignment,
-                              CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
+                                       high ? CRASH_KERNEL_ADDR_HIGH_MAX :
+                                              CRASH_KERNEL_ADDR_LOW_MAX,
+                                       crash_size, alignment);
 
                if (!crash_base) {
                        pr_info("crashkernel reservation failed - No suitable area found.\n");
index 5f2ecaf..e4a86a6 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/io.h>
 #include <linux/reboot.h>
 #include <linux/bcd.h>
+#include <linux/ucs2_string.h>
 
 #include <asm/setup.h>
 #include <asm/efi.h>
 
 #define EFI_DEBUG      1
 
+/*
+ * There's some additional metadata associated with each
+ * variable. Intel's reference implementation is 60 bytes - bump that
+ * to account for potential alignment constraints
+ */
+#define VAR_METADATA_SIZE 64
+
 struct efi __read_mostly efi = {
        .mps        = EFI_INVALID_TABLE_ADDR,
        .acpi       = EFI_INVALID_TABLE_ADDR,
@@ -69,6 +77,13 @@ struct efi_memory_map memmap;
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
+static u64 efi_var_store_size;
+static u64 efi_var_remaining_size;
+static u64 efi_var_max_var_size;
+static u64 boot_used_size;
+static u64 boot_var_size;
+static u64 active_size;
+
 unsigned long x86_efi_facility;
 
 /*
@@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg)
 }
 early_param("add_efi_memmap", setup_add_efi_memmap);
 
+static bool efi_no_storage_paranoia;
+
+static int __init setup_storage_paranoia(char *arg)
+{
+       efi_no_storage_paranoia = true;
+       return 0;
+}
+early_param("efi_no_storage_paranoia", setup_storage_paranoia);
+
 
 static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
@@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
                                               efi_char16_t *name,
                                               efi_guid_t *vendor)
 {
-       return efi_call_virt3(get_next_variable,
-                             name_size, name, vendor);
+       efi_status_t status;
+       static bool finished = false;
+       static u64 var_size;
+
+       status = efi_call_virt3(get_next_variable,
+                               name_size, name, vendor);
+
+       if (status == EFI_NOT_FOUND) {
+               finished = true;
+               if (var_size < boot_used_size) {
+                       boot_var_size = boot_used_size - var_size;
+                       active_size += boot_var_size;
+               } else {
+                       printk(KERN_WARNING FW_BUG  "efi: Inconsistent initial sizes\n");
+               }
+       }
+
+       if (boot_used_size && !finished) {
+               unsigned long size;
+               u32 attr;
+               efi_status_t s;
+               void *tmp;
+
+               s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
+
+               if (s != EFI_BUFFER_TOO_SMALL || !size)
+                       return status;
+
+               tmp = kmalloc(size, GFP_ATOMIC);
+
+               if (!tmp)
+                       return status;
+
+               s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
+
+               if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
+                       var_size += size;
+                       var_size += ucs2_strsize(name, 1024);
+                       active_size += size;
+                       active_size += VAR_METADATA_SIZE;
+                       active_size += ucs2_strsize(name, 1024);
+               }
+
+               kfree(tmp);
+       }
+
+       return status;
 }
 
 static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
                                          unsigned long data_size,
                                          void *data)
 {
-       return efi_call_virt5(set_variable,
-                             name, vendor, attr,
-                             data_size, data);
+       efi_status_t status;
+       u32 orig_attr = 0;
+       unsigned long orig_size = 0;
+
+       status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
+                                      NULL);
+
+       if (status != EFI_BUFFER_TOO_SMALL)
+               orig_size = 0;
+
+       status = efi_call_virt5(set_variable,
+                               name, vendor, attr,
+                               data_size, data);
+
+       if (status == EFI_SUCCESS) {
+               if (orig_size) {
+                       active_size -= orig_size;
+                       active_size -= ucs2_strsize(name, 1024);
+                       active_size -= VAR_METADATA_SIZE;
+               }
+               if (data_size) {
+                       active_size += data_size;
+                       active_size += ucs2_strsize(name, 1024);
+                       active_size += VAR_METADATA_SIZE;
+               }
+       }
+
+       return status;
 }
 
 static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -682,6 +776,9 @@ void __init efi_init(void)
        char vendor[100] = "unknown";
        int i = 0;
        void *tmp;
+       struct setup_data *data;
+       struct efi_var_bootdata *efi_var_data;
+       u64 pa_data;
 
 #ifdef CONFIG_X86_32
        if (boot_params.efi_info.efi_systab_hi ||
@@ -699,6 +796,22 @@ void __init efi_init(void)
        if (efi_systab_init(efi_phys.systab))
                return;
 
+       pa_data = boot_params.hdr.setup_data;
+       while (pa_data) {
+               data = early_ioremap(pa_data, sizeof(*efi_var_data));
+               if (data->type == SETUP_EFI_VARS) {
+                       efi_var_data = (struct efi_var_bootdata *)data;
+
+                       efi_var_store_size = efi_var_data->store_size;
+                       efi_var_remaining_size = efi_var_data->remaining_size;
+                       efi_var_max_var_size = efi_var_data->max_var_size;
+               }
+               pa_data = data->next;
+               early_iounmap(data, sizeof(*efi_var_data));
+       }
+
+       boot_used_size = efi_var_store_size - efi_var_remaining_size;
+
        set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
 
        /*
@@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)
        }
        return 0;
 }
+
+/*
+ * Some firmware has serious problems when using more than 50% of the EFI
+ * variable store, i.e. it triggers bugs that can brick machines. Ensure that
+ * we never use more than this safe limit.
+ *
+ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
+ * store.
+ */
+efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+{
+       efi_status_t status;
+       u64 storage_size, remaining_size, max_size;
+
+       status = efi.query_variable_info(attributes, &storage_size,
+                                        &remaining_size, &max_size);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       if (!max_size && remaining_size > size)
+               printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
+                           " is returning MaxVariableSize=0\n");
+       /*
+        * Some firmware implementations refuse to boot if there's insufficient
+        * space in the variable store. We account for that by refusing the
+        * write if permitting it would reduce the available space to under
+        * 50%. However, some firmware won't reclaim variable space until
+        * after the used (not merely the actively used) space drops below
+        * a threshold. We can approximate that case with the value calculated
+        * above. If both the firmware and our calculations indicate that the
+        * available space would drop below 50%, refuse the write.
+        */
+
+       if (!storage_size || size > remaining_size ||
+           (max_size && size > max_size))
+               return EFI_OUT_OF_RESOURCES;
+
+       if (!efi_no_storage_paranoia &&
+           ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
+            (remaining_size - size < storage_size / 2)))
+               return EFI_OUT_OF_RESOURCES;
+
+       return EFI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(efi_query_variable_store);
index 074b758..7c28835 100644 (file)
@@ -39,6 +39,7 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
 
 DEFINE_IDA(blk_queue_ida);
index f556f8a..b7b7a88 100644 (file)
@@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
        struct rbd_device *rbd_dev = img_request->rbd_dev;
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
        struct rbd_obj_request *obj_request;
+       struct rbd_obj_request *next_obj_request;
 
        dout("%s: img %p\n", __func__, img_request);
-       for_each_obj_request(img_request, obj_request) {
+       for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
                int ret;
 
                obj_request->callback = rbd_img_obj_callback;
index e3f9a99..d784650 100644 (file)
@@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
        struct hpet_dev *devp;
        unsigned long addr;
 
-       if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
-               return -EINVAL;
-
        devp = file->private_data;
        addr = devp->hd_hpets->hp_hpet_phys;
 
        if (addr & (PAGE_SIZE - 1))
                return -ENOSYS;
 
-       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
-                                       PAGE_SIZE, vma->vm_page_prot)) {
-               printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
-                       __func__);
-               return -EAGAIN;
-       }
-
-       return 0;
+       return vm_iomap_memory(vma, addr, PAGE_SIZE);
 #else
        return -ENOSYS;
 #endif
index 6e13f26..88cfc61 100644 (file)
@@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)
 
        dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
 
-       BUG_ON(atc_chan_is_enabled(atchan));
-
        /*
         * Submit queued descriptors ASAP, i.e. before we go through
         * the completed ones.
@@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan)
 {
        dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
 
+       if (atc_chan_is_enabled(atchan))
+               return;
+
        if (list_empty(&atchan->active_list) ||
            list_is_singular(&atchan->active_list)) {
                atc_complete_all(atchan);
@@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan)
                return;
 
        spin_lock_irqsave(&atchan->lock, flags);
-       if (!atc_chan_is_enabled(atchan)) {
-               atc_advance_work(atchan);
-       }
+       atc_advance_work(atchan);
        spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
index 42c759a..3e53200 100644 (file)
@@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP
 config EFI_VARS
        tristate "EFI Variable Support via sysfs"
        depends on EFI
+       select UCS2_STRING
        default n
        help
          If you say Y here, you are able to get EFI (Extensible Firmware
index 7acafb8..182ce94 100644 (file)
@@ -80,6 +80,7 @@
 #include <linux/slab.h>
 #include <linux/pstore.h>
 #include <linux/ctype.h>
+#include <linux/ucs2_string.h>
 
 #include <linux/fs.h>
 #include <linux/ramfs.h>
@@ -172,51 +173,6 @@ static void efivar_update_sysfs_entries(struct work_struct *);
 static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
 static bool efivar_wq_enabled = true;
 
-/* Return the number of unicode characters in data */
-static unsigned long
-utf16_strnlen(efi_char16_t *s, size_t maxlength)
-{
-       unsigned long length = 0;
-
-       while (*s++ != 0 && length < maxlength)
-               length++;
-       return length;
-}
-
-static inline unsigned long
-utf16_strlen(efi_char16_t *s)
-{
-       return utf16_strnlen(s, ~0UL);
-}
-
-/*
- * Return the number of bytes is the length of this string
- * Note: this is NOT the same as the number of unicode characters
- */
-static inline unsigned long
-utf16_strsize(efi_char16_t *data, unsigned long maxlength)
-{
-       return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
-}
-
-static inline int
-utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
-{
-       while (1) {
-               if (len == 0)
-                       return 0;
-               if (*a < *b)
-                       return -1;
-               if (*a > *b)
-                       return 1;
-               if (*a == 0) /* implies *b == 0 */
-                       return 0;
-               a++;
-               b++;
-               len--;
-       }
-}
-
 static bool
 validate_device_path(struct efi_variable *var, int match, u8 *buffer,
                     unsigned long len)
@@ -268,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
        u16 filepathlength;
        int i, desclength = 0, namelen;
 
-       namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
+       namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
 
        /* Either "Boot" or "Driver" followed by four digits of hex */
        for (i = match; i < match+4; i++) {
@@ -291,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
         * There's no stored length for the description, so it has to be
         * found by hand
         */
-       desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+       desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
 
        /* Each boot entry must have a descriptor */
        if (!desclength)
@@ -436,24 +392,12 @@ static efi_status_t
 check_var_size_locked(struct efivars *efivars, u32 attributes,
                        unsigned long size)
 {
-       u64 storage_size, remaining_size, max_size;
-       efi_status_t status;
        const struct efivar_operations *fops = efivars->ops;
 
-       if (!efivars->ops->query_variable_info)
+       if (!efivars->ops->query_variable_store)
                return EFI_UNSUPPORTED;
 
-       status = fops->query_variable_info(attributes, &storage_size,
-                                          &remaining_size, &max_size);
-
-       if (status != EFI_SUCCESS)
-               return status;
-
-       if (!storage_size || size > remaining_size || size > max_size ||
-           (remaining_size - size) < (storage_size / 2))
-               return EFI_OUT_OF_RESOURCES;
-
-       return status;
+       return fops->query_variable_store(attributes, size);
 }
 
 
@@ -593,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
        spin_lock_irq(&efivars->lock);
 
        status = check_var_size_locked(efivars, new_var->Attributes,
-              new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+              new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
 
        if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
                status = efivars->ops->set_variable(new_var->VariableName,
@@ -771,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file,
         * QueryVariableInfo() isn't supported by the firmware.
         */
 
-       varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
+       varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);
        status = check_var_size(efivars, attributes, varsize);
 
        if (status != EFI_SUCCESS) {
@@ -1223,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
 
                inode = NULL;
 
-               len = utf16_strlen(entry->var.VariableName);
+               len = ucs2_strlen(entry->var.VariableName);
 
                /* name, plus '-', plus GUID, plus NUL*/
                name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
@@ -1481,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
 
                if (efi_guidcmp(entry->var.VendorGuid, vendor))
                        continue;
-               if (utf16_strncmp(entry->var.VariableName, efi_name,
-                                 utf16_strlen(efi_name))) {
+               if (ucs2_strncmp(entry->var.VariableName, efi_name,
+                                 ucs2_strlen(efi_name))) {
                        /*
                         * Check if an old format,
                         * which doesn't support holding
@@ -1494,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
                        for (i = 0; i < DUMP_NAME_LEN; i++)
                                efi_name_old[i] = name_old[i];
 
-                       if (utf16_strncmp(entry->var.VariableName, efi_name_old,
-                                         utf16_strlen(efi_name_old)))
+                       if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
+                                         ucs2_strlen(efi_name_old)))
                                continue;
                }
 
@@ -1573,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
         * Does this variable already exist?
         */
        list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
-               strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
-               strsize2 = utf16_strsize(new_var->VariableName, 1024);
+               strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
+               strsize2 = ucs2_strsize(new_var->VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(&(search_efivar->var.VariableName),
                                new_var->VariableName, strsize1) &&
@@ -1590,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
        }
 
        status = check_var_size_locked(efivars, new_var->Attributes,
-              new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+              new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
 
        if (status && status != EFI_UNSUPPORTED) {
                spin_unlock_irq(&efivars->lock);
@@ -1614,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
 
        /* Create the entry in sysfs.  Locking is not required here */
        status = efivar_create_sysfs_entry(efivars,
-                                          utf16_strsize(new_var->VariableName,
+                                          ucs2_strsize(new_var->VariableName,
                                                         1024),
                                           new_var->VariableName,
                                           &new_var->VendorGuid);
@@ -1644,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
         * Does this variable already exist?
         */
        list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
-               strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
-               strsize2 = utf16_strsize(del_var->VariableName, 1024);
+               strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
+               strsize2 = ucs2_strsize(del_var->VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(&(search_efivar->var.VariableName),
                                del_var->VariableName, strsize1) &&
@@ -1691,9 +1635,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
        unsigned long strsize1, strsize2;
        bool found = false;
 
-       strsize1 = utf16_strsize(variable_name, 1024);
+       strsize1 = ucs2_strsize(variable_name, 1024);
        list_for_each_entry_safe(entry, n, &efivars->list, list) {
-               strsize2 = utf16_strsize(entry->var.VariableName, 1024);
+               strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(variable_name, &(entry->var.VariableName),
                                strsize2) &&
@@ -2131,7 +2075,7 @@ efivars_init(void)
        ops.get_variable = efi.get_variable;
        ops.set_variable = efi.set_variable;
        ops.get_next_variable = efi.get_next_variable;
-       ops.query_variable_info = efi.query_variable_info;
+       ops.query_variable_store = efi_query_variable_store;
 
        error = register_efivars(&__efivars, &ops, efi_kobj);
        if (error)
index 5d66750..1a38dd7 100644 (file)
@@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
        ICPU(0x3c, idle_cpu_hsw),
        ICPU(0x3f, idle_cpu_hsw),
        ICPU(0x45, idle_cpu_hsw),
+       ICPU(0x46, idle_cpu_hsw),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
index 1daa979..0bfd8cf 100644 (file)
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x802: /* Intuos4 General Pen */
                case 0x804: /* Intuos4 Marker Pen */
                case 0x40802: /* Intuos4 Classic Pen */
-               case 0x18803: /* DTH2242 Grip Pen */
+               case 0x18802: /* DTH2242 Grip Pen */
                case 0x022:
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
        { "Wacom Intuos4 12x19",  WACOM_PKGLEN_INTUOS,    97536, 60960, 2047,
          63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xBC =
-       { "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40840, 25400, 2047,
+       { "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40640, 25400, 2047,
          63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0x26 =
        { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS,  31496, 19685, 2047,
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x44) },
        { USB_DEVICE_WACOM(0x45) },
        { USB_DEVICE_WACOM(0x59) },
-       { USB_DEVICE_WACOM(0x5D) },
+       { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
        { USB_DEVICE_WACOM(0xB0) },
        { USB_DEVICE_WACOM(0xB1) },
        { USB_DEVICE_WACOM(0xB2) },
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_WACOM(0xF4) },
        { USB_DEVICE_WACOM(0xF8) },
-       { USB_DEVICE_WACOM(0xF6) },
+       { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
        { USB_DEVICE_WACOM(0xFA) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index a32e0d5..fc6aebf 100644 (file)
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
        if (gic_arch_extn.irq_retrigger)
                return gic_arch_extn.irq_retrigger(d);
 
-       return -ENXIO;
+       /* the genirq layer expects 0 if we can't retrigger in hardware */
+       return 0;
 }
 
 #ifdef CONFIG_SMP
index 7e46926..9a0bdad 100644 (file)
@@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error)
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
+                       trace_block_bio_complete(md->queue, bio, io_error);
                        bio_endio(bio, io_error);
                }
        }
index 24909eb..f4e87bf 100644 (file)
@@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi)
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
                bi->bi_size = 0;
+               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+                                        bi, 0);
                bio_endio(bi, 0);
                bi = return_bi;
        }
@@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error)
        rdev_dec_pending(rdev, conf->mddev);
 
        if (!error && uptodate) {
+               trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+                                        raid_bi, 0);
                bio_endio(raid_bi, 0);
                if (atomic_dec_and_test(&conf->active_aligned_reads))
                        wake_up(&conf->wait_for_stripe);
@@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                if ( rw == WRITE )
                        md_write_end(mddev);
 
+               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+                                        bi, 0);
                bio_endio(bi, 0);
        }
 }
@@ -4758,8 +4764,11 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
                handled++;
        }
        remaining = raid5_dec_bi_active_stripes(raid_bio);
-       if (remaining == 0)
+       if (remaining == 0) {
+               trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+                                        raid_bio, 0);
                bio_endio(raid_bio, 0);
+       }
        if (atomic_dec_and_test(&conf->active_aligned_reads))
                wake_up(&conf->wait_for_stripe);
        return handled;
index 92ab30a..dc571eb 100644 (file)
@@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
 }
 #endif
 
-static inline unsigned long get_vm_size(struct vm_area_struct *vma)
-{
-       return vma->vm_end - vma->vm_start;
-}
-
-static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
-{
-       return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
-}
-
-/*
- * Set a new vm offset.
- *
- * Verify that the incoming offset really works as a page offset,
- * and that the offset and size fit in a resource_size_t.
- */
-static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
-{
-       pgoff_t pgoff = off >> PAGE_SHIFT;
-       if (off != (resource_size_t) pgoff << PAGE_SHIFT)
-               return -EINVAL;
-       if (off + get_vm_size(vma) - 1 < off)
-               return -EINVAL;
-       vma->vm_pgoff = pgoff;
-       return 0;
-}
-
 /*
  * set up a mapping for shared memory segments
  */
@@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
        struct map_info *map = mtd->priv;
-       resource_size_t start, off;
-       unsigned long len, vma_len;
 
         /* This is broken because it assumes the MTD device is map-based
           and that mtd->priv is a valid struct map_info.  It should be
           replaced with something that uses the mtd_get_unmapped_area()
           operation properly. */
        if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
-               off = get_vm_offset(vma);
-               start = map->phys;
-               len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
-               start &= PAGE_MASK;
-               vma_len = get_vm_size(vma);
-
-               /* Overflow in off+len? */
-               if (vma_len + off < off)
-                       return -EINVAL;
-               /* Does it fit in the mapping? */
-               if (vma_len + off > len)
-                       return -EINVAL;
-
-               off += start;
-               /* Did that overflow? */
-               if (off < start)
-                       return -EINVAL;
-               if (set_vm_offset(vma, off) < 0)
-                       return -EINVAL;
-               vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-
 #ifdef pgprot_noncached
-               if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
+               if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
                        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 #endif
-               if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                                      vma->vm_end - vma->vm_start,
-                                      vma->vm_page_prot))
-                       return -EAGAIN;
-
-               return 0;
+               return vm_iomap_memory(vma, map->phys, map->size);
        }
        return -ENOSYS;
 #else
index 07401a3..dbbea0e 100644 (file)
@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(old_active->dev, -1);
 
+               netif_addr_lock_bh(bond->dev);
                netdev_for_each_mc_addr(ha, bond->dev)
                        dev_mc_del(old_active->dev, ha->addr);
+               netif_addr_unlock_bh(bond->dev);
        }
 
        if (new_active) {
@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(new_active->dev, 1);
 
+               netif_addr_lock_bh(bond->dev);
                netdev_for_each_mc_addr(ha, bond->dev)
                        dev_mc_add(new_active->dev, ha->addr);
+               netif_addr_unlock_bh(bond->dev);
        }
 }
 
@@ -1901,11 +1905,29 @@ err_dest_symlinks:
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
 
 err_detach:
+       if (!USES_PRIMARY(bond->params.mode)) {
+               netif_addr_lock_bh(bond_dev);
+               bond_mc_list_flush(bond_dev, slave_dev);
+               netif_addr_unlock_bh(bond_dev);
+       }
+       bond_del_vlans_from_slave(bond, slave_dev);
        write_lock_bh(&bond->lock);
        bond_detach_slave(bond, new_slave);
+       if (bond->primary_slave == new_slave)
+               bond->primary_slave = NULL;
        write_unlock_bh(&bond->lock);
+       if (bond->curr_active_slave == new_slave) {
+               read_lock(&bond->lock);
+               write_lock_bh(&bond->curr_slave_lock);
+               bond_change_active_slave(bond, NULL);
+               bond_select_active_slave(bond);
+               write_unlock_bh(&bond->curr_slave_lock);
+               read_unlock(&bond->lock);
+       }
+       slave_disable_netpoll(new_slave);
 
 err_close:
+       slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
 err_unset_master:
@@ -3168,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event,
                                   struct net_device *slave_dev)
 {
        struct slave *slave = bond_slave_get_rtnl(slave_dev);
-       struct bonding *bond = slave->bond;
-       struct net_device *bond_dev = slave->bond->dev;
+       struct bonding *bond;
+       struct net_device *bond_dev;
        u32 old_speed;
        u8 old_duplex;
 
+       /* A netdev event can be generated while enslaving a device
+        * before netdev_rx_handler_register is called in which case
+        * slave will be NULL
+        */
+       if (!slave)
+               return NOTIFY_DONE;
+       bond_dev = slave->bond->dev;
+       bond = slave->bond;
+
        switch (event) {
        case NETDEV_UNREGISTER:
                if (bond->setup_by_slave)
@@ -3286,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
  */
 static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
+       const struct ethhdr *data;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
        u32 v6hash;
-       __be32 *s, *d;
+       const __be32 *s, *d;
 
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_network_may_pull(skb, sizeof(*iph))) {
                iph = ip_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
                        (data->h_dest[5] ^ data->h_source[5])) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_network_may_pull(skb, sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
                v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
@@ -3318,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
        u32 layer4_xor = 0;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
-       __be32 *s, *d;
-       __be16 *layer4hdr;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
+       const __be32 *s, *d;
+       const __be16 *l4 = NULL;
+       __be16 _l4[2];
+       int noff = skb_network_offset(skb);
+       int poff;
 
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_may_pull(skb, noff + sizeof(*iph))) {
                iph = ip_hdr(skb);
-               if (!ip_is_fragment(iph) &&
-                   (iph->protocol == IPPROTO_TCP ||
-                    iph->protocol == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(iph->protocol);
+
+               if (!ip_is_fragment(iph) && poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                return (layer4_xor ^
                        ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
-               if ((ipv6h->nexthdr == IPPROTO_TCP ||
-                    ipv6h->nexthdr == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)(ipv6h + 1);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(ipv6h->nexthdr);
+               if (poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
index f32b9fc..9aa0c64 100644 (file)
@@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net)
        struct mcp251x_priv *priv = netdev_priv(net);
        struct spi_device *spi = priv->spi;
        struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+       unsigned long flags;
        int ret;
 
        ret = open_candev(net);
@@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net)
        priv->tx_skb = NULL;
        priv->tx_len = 0;
 
+       flags = IRQF_ONESHOT;
+       if (pdata->irq_flags)
+               flags |= pdata->irq_flags;
+       else
+               flags |= IRQF_TRIGGER_FALLING;
+
        ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
-                 pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
-                 DEVICE_NAME, priv);
+                                  flags, DEVICE_NAME, priv);
        if (ret) {
                dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
                if (pdata->transceiver_enable)
index 6433b81..8e0c4a0 100644 (file)
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
        struct net_device *dev;
        struct sja1000_priv *priv;
        struct resource res;
-       const u32 *prop;
-       int err, irq, res_size, prop_size;
+       u32 prop;
+       int err, irq, res_size;
        void __iomem *base;
 
        err = of_address_to_resource(np, 0, &res);
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
        priv->read_reg = sja1000_ofp_read_reg;
        priv->write_reg = sja1000_ofp_write_reg;
 
-       prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
-       if (prop && (prop_size ==  sizeof(u32)))
-               priv->can.clock.freq = *prop / 2;
+       err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
+       if (!err)
+               priv->can.clock.freq = prop / 2;
        else
                priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
 
-       prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
-       if (prop && (prop_size == sizeof(u32)))
-               priv->ocr |= *prop & OCR_MODE_MASK;
+       err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
+       if (!err)
+               priv->ocr |= prop & OCR_MODE_MASK;
        else
                priv->ocr |= OCR_MODE_NORMAL; /* default */
 
-       prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
-       if (prop && (prop_size == sizeof(u32)))
-               priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+       err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
+       if (!err)
+               priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
        else
                priv->ocr |= OCR_TX0_PULLDOWN; /* default */
 
-       prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
-       if (prop && (prop_size == sizeof(u32)) && *prop) {
-               u32 divider = priv->can.clock.freq * 2 / *prop;
+       err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
+       if (!err && prop) {
+               u32 divider = priv->can.clock.freq * 2 / prop;
 
                if (divider > 1)
                        priv->cdr |= divider / 2 - 1;
@@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
                priv->cdr |= CDR_CLK_OFF; /* default */
        }
 
-       prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
-       if (!prop)
+       if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
                priv->cdr |= CDR_CBP; /* default */
 
        priv->irq_flags = IRQF_SHARED;
index cab306a..e1d2643 100644 (file)
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
        struct ei_device *ei_local;
        struct ax_device *ax;
        struct resource *irq, *mem, *mem2;
-       resource_size_t mem_size, mem2_size = 0;
+       unsigned long mem_size, mem2_size = 0;
        int ret = 0;
 
        dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
index 4046f97..57619dd 100644 (file)
@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                        }
                }
 
+               /* initialize FW coalescing state machines in RAM */
+               bnx2x_update_coalesce(bp);
+
                /* setup the leading queue */
                rc = bnx2x_setup_leading(bp);
                if (rc) {
@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
        u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
        u32 addr = BAR_CSTRORM_INTMEM +
                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
-       u16 flags = REG_RD16(bp, addr);
+       u8 flags = REG_RD8(bp, addr);
        /* clear and set */
        flags &= ~HC_INDEX_DATA_HC_ENABLED;
        flags |= enable_flag;
-       REG_WR16(bp, addr, flags);
+       REG_WR8(bp, addr, flags);
        DP(NETIF_MSG_IFUP,
           "port %x fw_sb_id %d sb_index %d disable %d\n",
           port, fw_sb_id, sb_index, disable);
index 8e58da9..c50696b 100644 (file)
@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                                REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
                        }
                }
+               if (!CHIP_IS_E1x(bp))
+                       /* block FW from writing to host */
+                       REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+
                /* wait until BRB is empty */
                tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
                while (timer_count) {
index 08e54f3..2886c9b 100644 (file)
@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
 
        if (vlan_tx_tag_present(skb)) {
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
-               __vlan_put_tag(skb, vlan_tag);
-               skb->vlan_tci = 0;
+               skb = __vlan_put_tag(skb, vlan_tag);
+               if (skb)
+                       skb->vlan_tci = 0;
        }
 
        return skb;
index f292c3a..73195f6 100644 (file)
@@ -1002,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
        } else {
                if (fep->link) {
                        fec_stop(ndev);
+                       fep->link = phy_dev->link;
                        status_change = 1;
                }
        }
index 2515140..ab577a7 100644 (file)
@@ -284,18 +284,10 @@ struct igb_q_vector {
 enum e1000_ring_flags_t {
        IGB_RING_FLAG_RX_SCTP_CSUM,
        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
-       IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
        IGB_RING_FLAG_TX_CTX_IDX,
        IGB_RING_FLAG_TX_DETECT_HANG
 };
 
-#define ring_uses_build_skb(ring) \
-       test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define set_ring_build_skb_enabled(ring) \
-       set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define clear_ring_build_skb_enabled(ring) \
-       clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-
 #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
 
 #define IGB_RX_DESC(R, i)          \
index 8496adf..64f7529 100644 (file)
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
-static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
-                                 struct igb_ring *rx_ring)
-{
-#define IGB_MAX_BUILD_SKB_SIZE \
-       (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
-        (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
-
-       /* set build_skb flag */
-       if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
-               set_ring_build_skb_enabled(rx_ring);
-       else
-               clear_ring_build_skb_enabled(rx_ring);
-}
-
 /**
  * igb_configure_rx - Configure receive Unit after Reset
  * @adapter: board private structure
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *rx_ring = adapter->rx_ring[i];
-               igb_set_rx_buffer_len(adapter, rx_ring);
-               igb_configure_rx_ring(adapter, rx_ring);
-       }
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
 /**
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
        return igb_can_reuse_rx_page(rx_buffer, page, truesize);
 }
 
-static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
-                                          union e1000_adv_rx_desc *rx_desc)
-{
-       struct igb_rx_buffer *rx_buffer;
-       struct sk_buff *skb;
-       struct page *page;
-       void *page_addr;
-       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
-       unsigned int truesize = IGB_RX_BUFSZ;
-#else
-       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-                               SKB_DATA_ALIGN(NET_SKB_PAD +
-                                              NET_IP_ALIGN +
-                                              size);
-#endif
-
-       /* If we spanned a buffer we have a huge mess so test for it */
-       BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
-
-       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-       page = rx_buffer->page;
-       prefetchw(page);
-
-       page_addr = page_address(page) + rx_buffer->page_offset;
-
-       /* prefetch first cache line of first page */
-       prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
-#if L1_CACHE_BYTES < 128
-       prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
-#endif
-
-       /* build an skb to around the page buffer */
-       skb = build_skb(page_addr, truesize);
-       if (unlikely(!skb)) {
-               rx_ring->rx_stats.alloc_failed++;
-               return NULL;
-       }
-
-       /* we are reusing so sync this buffer for CPU use */
-       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                     rx_buffer->dma,
-                                     rx_buffer->page_offset,
-                                     IGB_RX_BUFSZ,
-                                     DMA_FROM_DEVICE);
-
-       /* update pointers within the skb to store the data */
-       skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
-       __skb_put(skb, size);
-
-       /* pull timestamp out of packet data */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
-               __skb_pull(skb, IGB_TS_HDR_LEN);
-       }
-
-       if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
-               /* hand second half of page back to the ring */
-               igb_reuse_rx_page(rx_ring, rx_buffer);
-       } else {
-               /* we are not reusing the buffer so unmap it */
-               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-       }
-
-       /* clear contents of buffer_info */
-       rx_buffer->dma = 0;
-       rx_buffer->page = NULL;
-
-       return skb;
-}
-
 static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
                                           union e1000_adv_rx_desc *rx_desc,
                                           struct sk_buff *skb)
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                rmb();
 
                /* retrieve a buffer from the ring */
-               if (ring_uses_build_skb(rx_ring))
-                       skb = igb_build_rx_buffer(rx_ring, rx_desc);
-               else
-                       skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+               skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
 
                /* exit if we failed to retrieve a buffer */
                if (!skb)
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
        return true;
 }
 
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
-{
-       if (ring_uses_build_skb(rx_ring))
-               return NET_SKB_PAD + NET_IP_ALIGN;
-       else
-               return 0;
-}
-
 /**
  * igb_alloc_rx_buffers - Replace used receive buffers; packet split
  * @adapter: address of board private structure
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
                 * Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
                 */
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
-                                                    bi->page_offset +
-                                                    igb_rx_offset(rx_ring));
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 
                rx_desc++;
                bi++;
index d44b4d2..97e3366 100644 (file)
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
        if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
                return -EINVAL;
        if (vlan || qos) {
+               if (adapter->vfinfo[vf].pf_vlan)
+                       err = ixgbe_set_vf_vlan(adapter, false,
+                                               adapter->vfinfo[vf].pf_vlan,
+                                               vf);
+               if (err)
+                       goto out;
                err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
                if (err)
                        goto out;
index edfba93..434e33c 100644 (file)
@@ -33,6 +33,7 @@ config MV643XX_ETH
 
 config MVMDIO
        tristate "Marvell MDIO interface support"
+       select PHYLIB
        ---help---
          This driver supports the MDIO interface found in the network
          interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
@@ -45,7 +46,6 @@ config MVMDIO
 config MVNETA
        tristate "Marvell Armada 370/XP network interface support"
        depends on MACH_ARMADA_370_XP
-       select PHYLIB
        select MVMDIO
        ---help---
          This driver supports the network interface units in the
index 1e628ce..a47a097 100644 (file)
@@ -374,7 +374,6 @@ static int rxq_number = 8;
 static int txq_number = 8;
 
 static int rxq_def;
-static int txq_def;
 
 #define MVNETA_DRIVER_NAME "mvneta"
 #define MVNETA_DRIVER_VERSION "1.0"
@@ -1475,7 +1474,8 @@ error:
 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct mvneta_port *pp = netdev_priv(dev);
-       struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+       u16 txq_id = skb_get_queue_mapping(skb);
+       struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
        struct mvneta_tx_desc *tx_desc;
        struct netdev_queue *nq;
        int frags = 0;
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
                goto out;
 
        frags = skb_shinfo(skb)->nr_frags + 1;
-       nq    = netdev_get_tx_queue(dev, txq_def);
+       nq    = netdev_get_tx_queue(dev, txq_id);
 
        /* Get a descriptor for the first part of the packet */
        tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+       dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
        if (!dev)
                return -ENOMEM;
 
@@ -2844,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO);
 module_param(txq_number, int, S_IRUGO);
 
 module_param(rxq_def, int, S_IRUGO);
-module_param(txq_def, int, S_IRUGO);
index cd5ae88..edd63f1 100644 (file)
@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
                }
        } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
 
+       /* Make sure carrier is off and queue is stopped during loopback */
+       if (netif_running(netdev)) {
+               netif_carrier_off(netdev);
+               netif_stop_queue(netdev);
+       }
+
        ret = qlcnic_do_lb_test(adapter, mode);
 
        qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
 {
        struct qlcnic_cmd_args cmd;
+       struct net_device *netdev = adapter->netdev;
        int ret = 0;
 
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_TX, &ret);
        if (ret) {
-               dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+               netdev_err(netdev, "Error getting Tx stats\n");
                goto out;
        }
        /* Get MAC stats */
@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_MAC, &ret);
        if (ret) {
-               dev_info(&adapter->pdev->dev,
-                        "Error getting Rx stats\n");
+               netdev_err(netdev, "Error getting MAC stats\n");
                goto out;
        }
        /* Get Rx stats */
@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_RX, &ret);
        if (ret)
-               dev_info(&adapter->pdev->dev,
-                        "Error getting Tx stats\n");
+               netdev_err(netdev, "Error getting Rx stats\n");
 out:
        qlcnic_free_mbx_args(&cmd);
 }
index 0e63006..5fa847f 100644 (file)
@@ -358,8 +358,7 @@ set_flags:
                memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
        }
        opcode = TX_ETHER_PKT;
-       if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
-           skb_shinfo(skb)->gso_size > 0) {
+       if (skb_is_gso(skb)) {
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
                first_desc->total_hdr_length = hdr_len;
index 987fb6f..5ef328a 100644 (file)
@@ -200,10 +200,10 @@ beacon_err:
        }
 
        err = qlcnic_config_led(adapter, b_state, b_rate);
-       if (!err)
+       if (!err) {
                err = len;
-       else
                ahw->beacon_state = b_state;
+       }
 
        if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
                qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
index a131d7b..7e8d682 100644 (file)
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.31"
+#define DRV_VERSION    "v1.00.00.32"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
index 6f316ab..0780e03 100644 (file)
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
 
        ecmd->supported = SUPPORTED_10000baseT_Full;
        ecmd->advertising = ADVERTISED_10000baseT_Full;
-       ecmd->autoneg = AUTONEG_ENABLE;
        ecmd->transceiver = XCVR_EXTERNAL;
        if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
                                STS_LINK_TYPE_10GBASET) {
                ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
                ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
                ecmd->port = PORT_TP;
+               ecmd->autoneg = AUTONEG_ENABLE;
        } else {
                ecmd->supported |= SUPPORTED_FIBRE;
                ecmd->advertising |= ADVERTISED_FIBRE;
index b13ab54..8033555 100644 (file)
@@ -1434,11 +1434,13 @@ map_error:
 }
 
 /* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+                                struct rx_ring *rx_ring)
 {
        struct nic_stats *stats = &qdev->nic_stats;
 
        stats->rx_err_count++;
+       rx_ring->rx_errors++;
 
        switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
        case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
        struct napi_struct *napi = &rx_ring->napi;
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               put_page(lbq_desc->p.pg_chunk.page);
+               return;
+       }
        napi->dev = qdev->ndev;
 
        skb = napi_get_frags(napi);
@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
        addr = lbq_desc->p.pg_chunk.va;
        prefetch(addr);
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               goto err_out;
+       }
+
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
        memcpy(skb_put(new_skb, length), skb->data, length);
        skb = new_skb;
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
        /* loopback self test for ethtool */
        if (test_bit(QL_SELFTEST, &qdev->flags)) {
                ql_check_lb_frame(qdev, skb);
@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
                return;
        }
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
 
        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
 
-       /* Frame error, so drop the packet. */
-       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
-               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
-               return (unsigned long)length;
-       }
-
        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
                /* The data and headers are split into
                 * separate buffers.
index 0c74a70..50617c5 100644 (file)
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
 {
        writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
        writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
 }
 
 /* This reads the MAC core counters (if actaully supported).
index 80cad06..4781d3d 100644 (file)
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
 
                if (data->dual_emac) {
-                       if (of_property_read_u32(node, "dual_emac_res_vlan",
+                       if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
                                pr_err("Missing dual_emac_res_vlan in DT.\n");
                                slave_data->dual_emac_res_vlan = i+1;
index b7c457a..729ed53 100644 (file)
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                if (tun->flags & TUN_TAP_MQ &&
                    (tun->numqueues + tun->numdisabled > 1))
-                       return err;
+                       return -EBUSY;
        }
        else {
                char *name;
index 16c8429..6bd9167 100644 (file)
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                goto error;
 
        if (skb) {
-               if (skb->len <= sizeof(ETH_HLEN))
+               if (skb->len <= ETH_HLEN)
                        goto error;
 
                /* mapping VLANs to MBIM sessions:
index 968d5d5..2a3579f 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
+#include <linux/etherdevice.h>
 #include <linux/mii.h>
 #include <linux/usb.h>
 #include <linux/usb/cdc.h>
@@ -52,6 +53,96 @@ struct qmi_wwan_state {
        struct usb_interface *data;
 };
 
+/* default ethernet address used by the modem */
+static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
+
+/* Make up an ethernet header if the packet doesn't have one.
+ *
+ * A firmware bug common among several devices cause them to send raw
+ * IP packets under some circumstances.  There is no way for the
+ * driver/host to know when this will happen.  And even when the bug
+ * hits, some packets will still arrive with an intact header.
+ *
+ * The supported devices are only capably of sending IPv4, IPv6 and
+ * ARP packets on a point-to-point link. Any packet with an ethernet
+ * header will have either our address or a broadcast/multicast
+ * address as destination.  ARP packets will always have a header.
+ *
+ * This means that this function will reliably add the appropriate
+ * header iff necessary, provided our hardware address does not start
+ * with 4 or 6.
+ *
+ * Another common firmware bug results in all packets being addressed
+ * to 00:a0:c6:00:00:00 despite the host address being different.
+ * This function will also fixup such packets.
+ */
+static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+       __be16 proto;
+
+       /* usbnet rx_complete guarantees that skb->len is at least
+        * hard_header_len, so we can inspect the dest address without
+        * checking skb->len
+        */
+       switch (skb->data[0] & 0xf0) {
+       case 0x40:
+               proto = htons(ETH_P_IP);
+               break;
+       case 0x60:
+               proto = htons(ETH_P_IPV6);
+               break;
+       case 0x00:
+               if (is_multicast_ether_addr(skb->data))
+                       return 1;
+               /* possibly bogus destination - rewrite just in case */
+               skb_reset_mac_header(skb);
+               goto fix_dest;
+       default:
+               /* pass along other packets without modifications */
+               return 1;
+       }
+       if (skb_headroom(skb) < ETH_HLEN)
+               return 0;
+       skb_push(skb, ETH_HLEN);
+       skb_reset_mac_header(skb);
+       eth_hdr(skb)->h_proto = proto;
+       memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+fix_dest:
+       memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
+       return 1;
+}
+
+/* very simplistic detection of IPv4 or IPv6 headers */
+static bool possibly_iphdr(const char *data)
+{
+       return (data[0] & 0xd0) == 0x40;
+}
+
+/* disallow addresses which may be confused with IP headers */
+static int qmi_wwan_mac_addr(struct net_device *dev, void *p)
+{
+       int ret;
+       struct sockaddr *addr = p;
+
+       ret = eth_prepare_mac_addr_change(dev, p);
+       if (ret < 0)
+               return ret;
+       if (possibly_iphdr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       eth_commit_mac_addr_change(dev, p);
+       return 0;
+}
+
+static const struct net_device_ops qmi_wwan_netdev_ops = {
+       .ndo_open               = usbnet_open,
+       .ndo_stop               = usbnet_stop,
+       .ndo_start_xmit         = usbnet_start_xmit,
+       .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_set_mac_address    = qmi_wwan_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
 /* using a counter to merge subdriver requests with our own into a combined state */
 static int qmi_wwan_manage_power(struct usbnet *dev, int on)
 {
@@ -229,6 +320,18 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
        }
 
+       /* Never use the same address on both ends of the link, even
+        * if the buggy firmware told us to.
+        */
+       if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
+               eth_hw_addr_random(dev->net);
+
+       /* make MAC addr easily distinguishable from an IP header */
+       if (possibly_iphdr(dev->net->dev_addr)) {
+               dev->net->dev_addr[0] |= 0x02;  /* set local assignment bit */
+               dev->net->dev_addr[0] &= 0xbf;  /* clear "IP" bit */
+       }
+       dev->net->netdev_ops = &qmi_wwan_netdev_ops;
 err:
        return status;
 }
@@ -307,6 +410,7 @@ static const struct driver_info     qmi_wwan_info = {
        .bind           = qmi_wwan_bind,
        .unbind         = qmi_wwan_unbind,
        .manage_power   = qmi_wwan_manage_power,
+       .rx_fixup       = qmi_wwan_rx_fixup,
 };
 
 #define HUAWEI_VENDOR_ID       0x12D1
index 28fd992..bdee2ed 100644 (file)
@@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
        {0x00008258, 0x00000000},
        {0x0000825c, 0x40000000},
        {0x00008260, 0x00080922},
-       {0x00008264, 0x9bc00010},
+       {0x00008264, 0x9d400010},
        {0x00008268, 0xffffffff},
        {0x0000826c, 0x0000ffff},
        {0x00008270, 0x00000000},
index 467b600..73fe8d6 100644 (file)
@@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
        u32 sz, i;
        struct channel_detector *cd;
 
-       cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+       cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
        if (cd == NULL)
                goto fail;
 
        INIT_LIST_HEAD(&cd->head);
        cd->freq = freq;
        sz = sizeof(cd->detectors) * dpd->num_radar_types;
-       cd->detectors = kzalloc(sz, GFP_KERNEL);
+       cd->detectors = kzalloc(sz, GFP_ATOMIC);
        if (cd->detectors == NULL)
                goto fail;
 
index 91b8dce..5e48c55 100644 (file)
@@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
 {
        struct pulse_elem *p = pool_get_pulse_elem();
        if (p == NULL) {
-               p = kmalloc(sizeof(*p), GFP_KERNEL);
+               p = kmalloc(sizeof(*p), GFP_ATOMIC);
                if (p == NULL) {
                        DFS_POOL_STAT_INC(pulse_alloc_error);
                        return false;
@@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
                ps.deadline_ts = ps.first_ts + ps.dur;
                new_ps = pool_get_pseq_elem();
                if (new_ps == NULL) {
-                       new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
+                       new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
                        if (new_ps == NULL) {
                                DFS_POOL_STAT_INC(pseq_alloc_error);
                                return false;
index 716058b..a47f5e0 100644 (file)
@@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
         * required version.
         */
        if (priv->fw_version_major != MAJOR_VERSION_REQ ||
-           priv->fw_version_minor != MINOR_VERSION_REQ) {
+           priv->fw_version_minor < MINOR_VERSION_REQ) {
                dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
                        MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
                return -EINVAL;
index e8486c1..b70f220 100644 (file)
@@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
 #endif
 #ifdef CONFIG_B43_SSB
        case B43_BUS_SSB:
-               /* FIXME */
+               ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
+                                           avoid);
                break;
 #endif
        }
index ec46fff..78da3ef 100644 (file)
@@ -4126,10 +4126,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
                         BIT(NL80211_IFTYPE_ADHOC) |
                         BIT(NL80211_IFTYPE_AP)
        },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
-       },
        {
                .max = 1,
                .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4187,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
                                 BIT(NL80211_IFTYPE_ADHOC) |
                                 BIT(NL80211_IFTYPE_AP) |
                                 BIT(NL80211_IFTYPE_P2P_CLIENT) |
-                                BIT(NL80211_IFTYPE_P2P_GO) |
-                                BIT(NL80211_IFTYPE_P2P_DEVICE);
+                                BIT(NL80211_IFTYPE_P2P_GO);
        wiphy->iface_combinations = brcmf_iface_combos;
        wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
        wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
index c6451c6..e2340b2 100644 (file)
@@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
        }
 }
 
+/**
+ * This function frees the WL per-device resources.
+ *
+ * This function frees resources owned by the WL device pointed to
+ * by the wl parameter.
+ *
+ * precondition: can both be called locked and unlocked
+ *
+ */
+static void brcms_free(struct brcms_info *wl)
+{
+       struct brcms_timer *t, *next;
+
+       /* free ucode data */
+       if (wl->fw.fw_cnt)
+               brcms_ucode_data_free(&wl->ucode);
+       if (wl->irq)
+               free_irq(wl->irq, wl);
+
+       /* kill dpc */
+       tasklet_kill(&wl->tasklet);
+
+       if (wl->pub) {
+               brcms_debugfs_detach(wl->pub);
+               brcms_c_module_unregister(wl->pub, "linux", wl);
+       }
+
+       /* free common resources */
+       if (wl->wlc) {
+               brcms_c_detach(wl->wlc);
+               wl->wlc = NULL;
+               wl->pub = NULL;
+       }
+
+       /* virtual interface deletion is deferred so we cannot spinwait */
+
+       /* wait for all pending callbacks to complete */
+       while (atomic_read(&wl->callbacks) > 0)
+               schedule();
+
+       /* free timers */
+       for (t = wl->timers; t; t = next) {
+               next = t->next;
+#ifdef DEBUG
+               kfree(t->name);
+#endif
+               kfree(t);
+       }
+}
+
+/*
+* called from both kernel as from this kernel module (error flow on attach)
+* precondition: perimeter lock is not acquired.
+*/
+static void brcms_remove(struct bcma_device *pdev)
+{
+       struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+       struct brcms_info *wl = hw->priv;
+
+       if (wl->wlc) {
+               wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
+               wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+               ieee80211_unregister_hw(hw);
+       }
+
+       brcms_free(wl);
+
+       bcma_set_drvdata(pdev, NULL);
+       ieee80211_free_hw(hw);
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static void brcms_release_fw(struct brcms_info *wl)
+{
+       int i;
+       for (i = 0; i < MAX_FW_IMAGES; i++) {
+               release_firmware(wl->fw.fw_bin[i]);
+               release_firmware(wl->fw.fw_hdr[i]);
+       }
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
+{
+       int status;
+       struct device *device = &pdev->dev;
+       char fw_name[100];
+       int i;
+
+       memset(&wl->fw, 0, sizeof(struct brcms_firmware));
+       for (i = 0; i < MAX_FW_IMAGES; i++) {
+               if (brcms_firmwares[i] == NULL)
+                       break;
+               sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
+                       UCODE_LOADER_API_VER);
+               status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
+               if (status) {
+                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+                                 KBUILD_MODNAME, fw_name);
+                       return status;
+               }
+               sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
+                       UCODE_LOADER_API_VER);
+               status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
+               if (status) {
+                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+                                 KBUILD_MODNAME, fw_name);
+                       return status;
+               }
+               wl->fw.hdr_num_entries[i] =
+                   wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
+       }
+       wl->fw.fw_cnt = i;
+       status = brcms_ucode_data_init(wl, &wl->ucode);
+       brcms_release_fw(wl);
+       return status;
+}
+
 static void brcms_ops_tx(struct ieee80211_hw *hw,
                         struct ieee80211_tx_control *control,
                         struct sk_buff *skb)
@@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        if (!blocked)
                wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
 
+       if (!wl->ucode.bcm43xx_bomminor) {
+               err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+               if (err) {
+                       brcms_remove(wl->wlc->hw->d11core);
+                       return -ENOENT;
+               }
+       }
+
        spin_lock_bh(&wl->lock);
        /* avoid acknowledging frames before a non-monitor device is added */
        wl->mute_tx = true;
@@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data)
        wake_up(&wl->tx_flush_wq);
 }
 
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
-{
-       int status;
-       struct device *device = &pdev->dev;
-       char fw_name[100];
-       int i;
-
-       memset(&wl->fw, 0, sizeof(struct brcms_firmware));
-       for (i = 0; i < MAX_FW_IMAGES; i++) {
-               if (brcms_firmwares[i] == NULL)
-                       break;
-               sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
-                       UCODE_LOADER_API_VER);
-               status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
-               if (status) {
-                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
-                                 KBUILD_MODNAME, fw_name);
-                       return status;
-               }
-               sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
-                       UCODE_LOADER_API_VER);
-               status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
-               if (status) {
-                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
-                                 KBUILD_MODNAME, fw_name);
-                       return status;
-               }
-               wl->fw.hdr_num_entries[i] =
-                   wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
-       }
-       wl->fw.fw_cnt = i;
-       return brcms_ucode_data_init(wl, &wl->ucode);
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static void brcms_release_fw(struct brcms_info *wl)
-{
-       int i;
-       for (i = 0; i < MAX_FW_IMAGES; i++) {
-               release_firmware(wl->fw.fw_bin[i]);
-               release_firmware(wl->fw.fw_hdr[i]);
-       }
-}
-
-/**
- * This function frees the WL per-device resources.
- *
- * This function frees resources owned by the WL device pointed to
- * by the wl parameter.
- *
- * precondition: can both be called locked and unlocked
- *
- */
-static void brcms_free(struct brcms_info *wl)
-{
-       struct brcms_timer *t, *next;
-
-       /* free ucode data */
-       if (wl->fw.fw_cnt)
-               brcms_ucode_data_free(&wl->ucode);
-       if (wl->irq)
-               free_irq(wl->irq, wl);
-
-       /* kill dpc */
-       tasklet_kill(&wl->tasklet);
-
-       if (wl->pub) {
-               brcms_debugfs_detach(wl->pub);
-               brcms_c_module_unregister(wl->pub, "linux", wl);
-       }
-
-       /* free common resources */
-       if (wl->wlc) {
-               brcms_c_detach(wl->wlc);
-               wl->wlc = NULL;
-               wl->pub = NULL;
-       }
-
-       /* virtual interface deletion is deferred so we cannot spinwait */
-
-       /* wait for all pending callbacks to complete */
-       while (atomic_read(&wl->callbacks) > 0)
-               schedule();
-
-       /* free timers */
-       for (t = wl->timers; t; t = next) {
-               next = t->next;
-#ifdef DEBUG
-               kfree(t->name);
-#endif
-               kfree(t);
-       }
-}
-
-/*
-* called from both kernel as from this kernel module (error flow on attach)
-* precondition: perimeter lock is not acquired.
-*/
-static void brcms_remove(struct bcma_device *pdev)
-{
-       struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
-       struct brcms_info *wl = hw->priv;
-
-       if (wl->wlc) {
-               wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
-               wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
-               ieee80211_unregister_hw(hw);
-       }
-
-       brcms_free(wl);
-
-       bcma_set_drvdata(pdev, NULL);
-       ieee80211_free_hw(hw);
-}
-
 static irqreturn_t brcms_isr(int irq, void *dev_id)
 {
        struct brcms_info *wl;
@@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
        spin_lock_init(&wl->lock);
        spin_lock_init(&wl->isr_lock);
 
-       /* prepare ucode */
-       if (brcms_request_fw(wl, pdev) < 0) {
-               wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
-                         "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
-               brcms_release_fw(wl);
-               brcms_remove(pdev);
-               return NULL;
-       }
-
        /* common load-time initialization */
        wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
-       brcms_release_fw(wl);
        if (!wl->wlc) {
                wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
                          KBUILD_MODNAME, err);
index 45cacf7..1a779bb 100644 (file)
@@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = {
        { KE_KEY, 0x2142, { KEY_MEDIA } },
        { KE_KEY, 0x213b, { KEY_INFO } },
        { KE_KEY, 0x2169, { KEY_DIRECTION } },
-       { KE_KEY, 0x216a, { KEY_SETUP } },
        { KE_KEY, 0x231b, { KEY_HELP } },
        { KE_END, 0 }
 };
@@ -925,9 +924,6 @@ static int __init hp_wmi_init(void)
                err = hp_wmi_input_setup();
                if (err)
                        return err;
-               
-               //Enable magic for hotkeys that run on the SMBus
-               ec_write(0xe6,0x6e);
        }
 
        if (bios_capable) {
index 9a90756..edec135 100644 (file)
@@ -1964,9 +1964,6 @@ struct tp_nvram_state {
 /* kthread for the hotkey poller */
 static struct task_struct *tpacpi_hotkey_task;
 
-/* Acquired while the poller kthread is running, use to sync start/stop */
-static struct mutex hotkey_thread_mutex;
-
 /*
  * Acquire mutex to write poller control variables as an
  * atomic block.
@@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data)
        unsigned int poll_freq;
        bool was_frozen;
 
-       mutex_lock(&hotkey_thread_mutex);
-
        if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
                goto exit;
 
@@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data)
        }
 
 exit:
-       mutex_unlock(&hotkey_thread_mutex);
        return 0;
 }
 
@@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void)
        if (tpacpi_hotkey_task) {
                kthread_stop(tpacpi_hotkey_task);
                tpacpi_hotkey_task = NULL;
-               mutex_lock(&hotkey_thread_mutex);
-               /* at this point, the thread did exit */
-               mutex_unlock(&hotkey_thread_mutex);
        }
 }
 
@@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        mutex_init(&hotkey_mutex);
 
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-       mutex_init(&hotkey_thread_mutex);
        mutex_init(&hotkey_thread_data_mutex);
 #endif
 
index 1a9d1e3..c1441ed 100644 (file)
@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
+static void reset_one_i2c(struct bbc_i2c_bus *bp)
 {
        writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0);
        writeb(bp->own, bp->i2c_control_regs + 0x1);
@@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
        writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0);
 }
 
-static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index)
+static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index)
 {
        struct bbc_i2c_bus *bp;
        struct device_node *dp;
index 4c0f6d8..7b0bce9 100644 (file)
@@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
                return 0;
        }
 }
+
+void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
+{
+       u32 pmu_ctl = 0;
+
+       switch (cc->dev->bus->chip_id) {
+       case 0x4322:
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070);
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a);
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854);
+               if (spuravoid == 1)
+                       ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828);
+               else
+                       ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828);
+               pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
+               break;
+       case 43222:
+               /* TODO: BCM43222 requires updating PLLs too */
+               return;
+       default:
+               ssb_printk(KERN_ERR PFX
+                          "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+                          cc->dev->bus->chip_id);
+               return;
+       }
+
+       chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl);
+}
+EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate);
index 7c25408..86291dc 100644 (file)
@@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
 {
        struct fb_info *info = file_fb_info(file);
        struct fb_ops *fb;
-       unsigned long off;
+       unsigned long mmio_pgoff;
        unsigned long start;
        u32 len;
 
        if (!info)
                return -ENODEV;
-       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
-               return -EINVAL;
-       off = vma->vm_pgoff << PAGE_SHIFT;
        fb = info->fbops;
        if (!fb)
                return -ENODEV;
@@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
                return res;
        }
 
-       /* frame buffer memory */
+       /*
+        * Ugh. This can be either the frame buffer mapping, or
+        * if pgoff points past it, the mmio mapping.
+        */
        start = info->fix.smem_start;
-       len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
-       if (off >= len) {
-               /* memory mapped io */
-               off -= len;
-               if (info->var.accel_flags) {
-                       mutex_unlock(&info->mm_lock);
-                       return -EINVAL;
-               }
+       len = info->fix.smem_len;
+       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+       if (vma->vm_pgoff >= mmio_pgoff) {
+               vma->vm_pgoff -= mmio_pgoff;
                start = info->fix.mmio_start;
-               len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
+               len = info->fix.mmio_len;
        }
        mutex_unlock(&info->mm_lock);
-       start &= PAGE_MASK;
-       if ((vma->vm_end - vma->vm_start + off) > len)
-               return -EINVAL;
-       off += start;
-       vma->vm_pgoff = off >> PAGE_SHIFT;
-       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
+
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       fb_pgprotect(file, vma, off);
-       if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                            vma->vm_end - vma->vm_start, vma->vm_page_prot))
-               return -EAGAIN;
-       return 0;
+       fb_pgprotect(file, vma, start);
+
+       return vm_iomap_memory(vma, start, len);
 }
 
 static int
index 9ed8341..84de263 100644 (file)
@@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path)
 
        kfree(path);
        mutex_unlock(&disp_lock);
-
-       dev_info(path->dev, "de-register %s\n", path->name);
 }
 EXPORT_SYMBOL_GPL(mmp_unregister_path);
index 3939829..86af964 100644 (file)
@@ -1137,6 +1137,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
                        goto whole;
                if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
                        goto whole;
+               return 0;
        }
 
        /* Do not dump I/O mapped devices or special mappings */
index bb5768f..b96fc6c 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1428,8 +1428,6 @@ void bio_endio(struct bio *bio, int error)
        else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
                error = -EIO;
 
-       trace_block_bio_complete(bio, error);
-
        if (bio->bi_end_io)
                bio->bi_end_io(bio, error);
 }
index a94f0f7..fe0a762 100644 (file)
@@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode)
                struct address_space *mapping = inode->i_mapping;
                struct page *page;
                void *fsdata;
-               u32 size = inode->i_size;
+               loff_t size = inode->i_size;
 
                res = pagecache_write_begin(NULL, mapping, size, 0,
                                                AOP_FLAG_UNINTERRUPTIBLE,
index 84e3d85..523464e 100644 (file)
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
         * way when do_mmap_pgoff unwinds (may be important on powerpc
         * and ia64).
         */
-       vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
        vma->vm_ops = &hugetlb_vm_ops;
 
        if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
index f7ed9ee..cbd0f1b 100644 (file)
@@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
        "x (dead)",             /*  64 */
        "K (wakekill)",         /* 128 */
        "W (waking)",           /* 256 */
+       "P (parked)",           /* 512 */
 };
 
 static inline const char *get_task_state(struct task_struct *tsk)
index 0ea61e0..7c2e030 100644 (file)
@@ -12,7 +12,6 @@
 
 struct blk_trace {
        int trace_state;
-       bool rq_based;
        struct rchan *rchan;
        unsigned long __percpu *sequence;
        unsigned char __percpu *msg_data;
index 9bf2f1f..3d7df3d 100644 (file)
@@ -333,6 +333,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
                                              unsigned long count,
                                              u64 *max_size,
                                              int *reset_type);
+typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
 
 /*
  *  EFI Configuration Table and GUID definitions
@@ -575,9 +576,15 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
 #ifdef CONFIG_X86
 extern void efi_late_init(void);
 extern void efi_free_boot_services(void);
+extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
 #else
 static inline void efi_late_init(void) {}
 static inline void efi_free_boot_services(void) {}
+
+static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+{
+       return EFI_SUCCESS;
+}
 #endif
 extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
 extern u64 efi_get_iobase (void);
@@ -731,7 +738,7 @@ struct efivar_operations {
        efi_get_variable_t *get_variable;
        efi_get_next_variable_t *get_next_variable;
        efi_set_variable_t *set_variable;
-       efi_query_variable_info_t *query_variable_info;
+       efi_query_variable_store_t *query_variable_store;
 };
 
 struct efivars {
index d2e6927..d78d28a 100644 (file)
@@ -200,6 +200,8 @@ extern size_t vmcoreinfo_max_size;
 
 int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
                unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
+               unsigned long long *crash_size, unsigned long long *crash_base);
 int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
                unsigned long long *crash_size, unsigned long long *crash_base);
 int crash_shrink_memory(unsigned long new_size);
index e19ff30..e2091b8 100644 (file)
@@ -1611,6 +1611,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
+
 
 struct page *follow_page_mask(struct vm_area_struct *vma,
                              unsigned long address, unsigned int foll_flags,
index 01d25e6..0214c4c 100644 (file)
@@ -291,6 +291,7 @@ ip_set_hash_destroy(struct ip_set *set)
 #define type_pf_data_tlist     TOKEN(TYPE, PF, _data_tlist)
 #define type_pf_data_next      TOKEN(TYPE, PF, _data_next)
 #define type_pf_data_flags     TOKEN(TYPE, PF, _data_flags)
+#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags)
 #ifdef IP_SET_HASH_WITH_NETS
 #define type_pf_data_match     TOKEN(TYPE, PF, _data_match)
 #else
@@ -385,9 +386,9 @@ type_pf_resize(struct ip_set *set, bool retried)
        struct ip_set_hash *h = set->data;
        struct htable *t, *orig = h->table;
        u8 htable_bits = orig->htable_bits;
-       const struct type_pf_elem *data;
+       struct type_pf_elem *data;
        struct hbucket *n, *m;
-       u32 i, j;
+       u32 i, j, flags = 0;
        int ret;
 
 retry:
@@ -412,9 +413,16 @@ retry:
                n = hbucket(orig, i);
                for (j = 0; j < n->pos; j++) {
                        data = ahash_data(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+                       flags = 0;
+                       type_pf_data_reset_flags(data, &flags);
+#endif
                        m = hbucket(t, HKEY(data, h->initval, htable_bits));
-                       ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0);
+                       ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags);
                        if (ret < 0) {
+#ifdef IP_SET_HASH_WITH_NETS
+                               type_pf_data_flags(data, flags);
+#endif
                                read_unlock_bh(&set->lock);
                                ahash_destroy(t);
                                if (ret == -EAGAIN)
@@ -836,9 +844,9 @@ type_pf_tresize(struct ip_set *set, bool retried)
        struct ip_set_hash *h = set->data;
        struct htable *t, *orig = h->table;
        u8 htable_bits = orig->htable_bits;
-       const struct type_pf_elem *data;
+       struct type_pf_elem *data;
        struct hbucket *n, *m;
-       u32 i, j;
+       u32 i, j, flags = 0;
        int ret;
 
        /* Try to cleanup once */
@@ -873,10 +881,17 @@ retry:
                n = hbucket(orig, i);
                for (j = 0; j < n->pos; j++) {
                        data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+                       flags = 0;
+                       type_pf_data_reset_flags(data, &flags);
+#endif
                        m = hbucket(t, HKEY(data, h->initval, htable_bits));
-                       ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
-                                               ip_set_timeout_get(type_pf_data_timeout(data)));
+                       ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags,
+                               ip_set_timeout_get(type_pf_data_timeout(data)));
                        if (ret < 0) {
+#ifdef IP_SET_HASH_WITH_NETS
+                               type_pf_data_flags(data, flags);
+#endif
                                read_unlock_bh(&set->lock);
                                ahash_destroy(t);
                                if (ret == -EAGAIN)
@@ -1187,6 +1202,7 @@ type_pf_gc_init(struct ip_set *set)
 #undef type_pf_data_tlist
 #undef type_pf_data_next
 #undef type_pf_data_flags
+#undef type_pf_data_reset_flags
 #undef type_pf_data_match
 
 #undef type_pf_elem
index d35d2b6..e692a02 100644 (file)
@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_DEAD              64
 #define TASK_WAKEKILL          128
 #define TASK_WAKING            256
-#define TASK_STATE_MAX         512
+#define TASK_PARKED            512
+#define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
index 9e492be..6fcfe99 100644 (file)
 #define SSB_CHIPCO_PMU_CTL                     0x0600 /* PMU control */
 #define  SSB_CHIPCO_PMU_CTL_ILP_DIV            0xFFFF0000 /* ILP div mask */
 #define  SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT      16
+#define  SSB_CHIPCO_PMU_CTL_PLL_UPD            0x00000400
 #define  SSB_CHIPCO_PMU_CTL_NOILPONW           0x00000200 /* No ILP on wait */
 #define  SSB_CHIPCO_PMU_CTL_HTREQEN            0x00000100 /* HT req enable */
 #define  SSB_CHIPCO_PMU_CTL_ALPREQEN           0x00000080 /* ALP req enable */
@@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id {
 void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
                             enum ssb_pmu_ldo_volt_id id, u32 voltage);
 void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on);
+void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);
 
 #endif /* LINUX_SSB_CHIPCO_H_ */
index 2de42f9..a5ffd32 100644 (file)
@@ -25,6 +25,7 @@ extern int swiotlb_force;
 extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 extern unsigned long swiotlb_nr_tbl(void);
+unsigned long swiotlb_size_or_default(void);
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 
 /*
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
new file mode 100644 (file)
index 0000000..cbb20af
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef _LINUX_UCS2_STRING_H_
+#define _LINUX_UCS2_STRING_H_
+
+#include <linux/types.h>       /* for size_t */
+#include <linux/stddef.h>      /* for NULL */
+
+typedef u16 ucs2_char_t;
+
+unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength);
+unsigned long ucs2_strlen(const ucs2_char_t *s);
+unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+
+#endif /* _LINUX_UCS2_STRING_H_ */
index 40be2a0..84a6440 100644 (file)
@@ -199,6 +199,7 @@ extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
 /* Device notifier */
 extern int register_inet6addr_notifier(struct notifier_block *nb);
 extern int unregister_inet6addr_notifier(struct notifier_block *nb);
+extern int inet6addr_notifier_call_chain(unsigned long val, void *v);
 
 extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
                                         struct ipv6_devconf *devconf);
index f741091..f132924 100644 (file)
@@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)
        return (self && self->lap) ? self->lap->daddr : 0;
 }
 
-extern const char *irlmp_reasons[];
+const char *irlmp_reason_str(LM_REASON reason);
+
 extern int sysctl_discovery_timeout;
 extern int sysctl_discovery_slots;
 extern int sysctl_discovery;
index 975cca0..b117081 100644 (file)
@@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
        scm->pid  = get_pid(pid);
        scm->cred = cred ? get_cred(cred) : NULL;
        scm->creds.pid = pid_vnr(pid);
-       scm->creds.uid = cred ? cred->euid : INVALID_UID;
-       scm->creds.gid = cred ? cred->egid : INVALID_GID;
+       scm->creds.uid = cred ? cred->uid : INVALID_UID;
+       scm->creds.gid = cred ? cred->gid : INVALID_GID;
 }
 
 static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
index 9961726..9c14673 100644 (file)
@@ -257,6 +257,7 @@ TRACE_EVENT(block_bio_bounce,
 
 /**
  * block_bio_complete - completed all work on the block operation
+ * @q: queue holding the block operation
  * @bio: block operation completed
  * @error: io error value
  *
@@ -265,9 +266,9 @@ TRACE_EVENT(block_bio_bounce,
  */
 TRACE_EVENT(block_bio_complete,
 
-       TP_PROTO(struct bio *bio, int error),
+       TP_PROTO(struct request_queue *q, struct bio *bio, int error),
 
-       TP_ARGS(bio, error),
+       TP_ARGS(q, bio, error),
 
        TP_STRUCT__entry(
                __field( dev_t,         dev             )
@@ -278,8 +279,7 @@ TRACE_EVENT(block_bio_complete,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev ?
-                                         bio->bi_bdev->bd_dev : 0;
+               __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_sector;
                __entry->nr_sector      = bio->bi_size >> 9;
                __entry->error          = error;
index 5a8671e..e5586ca 100644 (file)
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
                  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
                                { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
                                { 16, "Z" }, { 32, "X" }, { 64, "x" },
-                               { 128, "W" }) : "R",
+                               { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
                __entry->prev_state & TASK_STATE_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
index 4c43b44..706d035 100644 (file)
 #ifndef _LINUX_FUSE_H
 #define _LINUX_FUSE_H
 
-#ifdef __linux__
+#ifdef __KERNEL__
 #include <linux/types.h>
 #else
 #include <stdint.h>
-#define __u64 uint64_t
-#define __s64 int64_t
-#define __u32 uint32_t
-#define __s32 int32_t
-#define __u16 uint16_t
 #endif
 
 /*
    userspace works under 64bit kernels */
 
 struct fuse_attr {
-       __u64   ino;
-       __u64   size;
-       __u64   blocks;
-       __u64   atime;
-       __u64   mtime;
-       __u64   ctime;
-       __u32   atimensec;
-       __u32   mtimensec;
-       __u32   ctimensec;
-       __u32   mode;
-       __u32   nlink;
-       __u32   uid;
-       __u32   gid;
-       __u32   rdev;
-       __u32   blksize;
-       __u32   padding;
+       uint64_t        ino;
+       uint64_t        size;
+       uint64_t        blocks;
+       uint64_t        atime;
+       uint64_t        mtime;
+       uint64_t        ctime;
+       uint32_t        atimensec;
+       uint32_t        mtimensec;
+       uint32_t        ctimensec;
+       uint32_t        mode;
+       uint32_t        nlink;
+       uint32_t        uid;
+       uint32_t        gid;
+       uint32_t        rdev;
+       uint32_t        blksize;
+       uint32_t        padding;
 };
 
 struct fuse_kstatfs {
-       __u64   blocks;
-       __u64   bfree;
-       __u64   bavail;
-       __u64   files;
-       __u64   ffree;
-       __u32   bsize;
-       __u32   namelen;
-       __u32   frsize;
-       __u32   padding;
-       __u32   spare[6];
+       uint64_t        blocks;
+       uint64_t        bfree;
+       uint64_t        bavail;
+       uint64_t        files;
+       uint64_t        ffree;
+       uint32_t        bsize;
+       uint32_t        namelen;
+       uint32_t        frsize;
+       uint32_t        padding;
+       uint32_t        spare[6];
 };
 
 struct fuse_file_lock {
-       __u64   start;
-       __u64   end;
-       __u32   type;
-       __u32   pid; /* tgid */
+       uint64_t        start;
+       uint64_t        end;
+       uint32_t        type;
+       uint32_t        pid; /* tgid */
 };
 
 /**
@@ -364,143 +359,143 @@ enum fuse_notify_code {
 #define FUSE_COMPAT_ENTRY_OUT_SIZE 120
 
 struct fuse_entry_out {
-       __u64   nodeid;         /* Inode ID */
-       __u64   generation;     /* Inode generation: nodeid:gen must
-                                  be unique for the fs's lifetime */
-       __u64   entry_valid;    /* Cache timeout for the name */
-       __u64   attr_valid;     /* Cache timeout for the attributes */
-       __u32   entry_valid_nsec;
-       __u32   attr_valid_nsec;
+       uint64_t        nodeid;         /* Inode ID */
+       uint64_t        generation;     /* Inode generation: nodeid:gen must
+                                          be unique for the fs's lifetime */
+       uint64_t        entry_valid;    /* Cache timeout for the name */
+       uint64_t        attr_valid;     /* Cache timeout for the attributes */
+       uint32_t        entry_valid_nsec;
+       uint32_t        attr_valid_nsec;
        struct fuse_attr attr;
 };
 
 struct fuse_forget_in {
-       __u64   nlookup;
+       uint64_t        nlookup;
 };
 
 struct fuse_forget_one {
-       __u64   nodeid;
-       __u64   nlookup;
+       uint64_t        nodeid;
+       uint64_t        nlookup;
 };
 
 struct fuse_batch_forget_in {
-       __u32   count;
-       __u32   dummy;
+       uint32_t        count;
+       uint32_t        dummy;
 };
 
 struct fuse_getattr_in {
-       __u32   getattr_flags;
-       __u32   dummy;
-       __u64   fh;
+       uint32_t        getattr_flags;
+       uint32_t        dummy;
+       uint64_t        fh;
 };
 
 #define FUSE_COMPAT_ATTR_OUT_SIZE 96
 
 struct fuse_attr_out {
-       __u64   attr_valid;     /* Cache timeout for the attributes */
-       __u32   attr_valid_nsec;
-       __u32   dummy;
+       uint64_t        attr_valid;     /* Cache timeout for the attributes */
+       uint32_t        attr_valid_nsec;
+       uint32_t        dummy;
        struct fuse_attr attr;
 };
 
 #define FUSE_COMPAT_MKNOD_IN_SIZE 8
 
 struct fuse_mknod_in {
-       __u32   mode;
-       __u32   rdev;
-       __u32   umask;
-       __u32   padding;
+       uint32_t        mode;
+       uint32_t        rdev;
+       uint32_t        umask;
+       uint32_t        padding;
 };
 
 struct fuse_mkdir_in {
-       __u32   mode;
-       __u32   umask;
+       uint32_t        mode;
+       uint32_t        umask;
 };
 
 struct fuse_rename_in {
-       __u64   newdir;
+       uint64_t        newdir;
 };
 
 struct fuse_link_in {
-       __u64   oldnodeid;
+       uint64_t        oldnodeid;
 };
 
 struct fuse_setattr_in {
-       __u32   valid;
-       __u32   padding;
-       __u64   fh;
-       __u64   size;
-       __u64   lock_owner;
-       __u64   atime;
-       __u64   mtime;
-       __u64   unused2;
-       __u32   atimensec;
-       __u32   mtimensec;
-       __u32   unused3;
-       __u32   mode;
-       __u32   unused4;
-       __u32   uid;
-       __u32   gid;
-       __u32   unused5;
+       uint32_t        valid;
+       uint32_t        padding;
+       uint64_t        fh;
+       uint64_t        size;
+       uint64_t        lock_owner;
+       uint64_t        atime;
+       uint64_t        mtime;
+       uint64_t        unused2;
+       uint32_t        atimensec;
+       uint32_t        mtimensec;
+       uint32_t        unused3;
+       uint32_t        mode;
+       uint32_t        unused4;
+       uint32_t        uid;
+       uint32_t        gid;
+       uint32_t        unused5;
 };
 
 struct fuse_open_in {
-       __u32   flags;
-       __u32   unused;
+       uint32_t        flags;
+       uint32_t        unused;
 };
 
 struct fuse_create_in {
-       __u32   flags;
-       __u32   mode;
-       __u32   umask;
-       __u32   padding;
+       uint32_t        flags;
+       uint32_t        mode;
+       uint32_t        umask;
+       uint32_t        padding;
 };
 
 struct fuse_open_out {
-       __u64   fh;
-       __u32   open_flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint32_t        open_flags;
+       uint32_t        padding;
 };
 
 struct fuse_release_in {
-       __u64   fh;
-       __u32   flags;
-       __u32   release_flags;
-       __u64   lock_owner;
+       uint64_t        fh;
+       uint32_t        flags;
+       uint32_t        release_flags;
+       uint64_t        lock_owner;
 };
 
 struct fuse_flush_in {
-       __u64   fh;
-       __u32   unused;
-       __u32   padding;
-       __u64   lock_owner;
+       uint64_t        fh;
+       uint32_t        unused;
+       uint32_t        padding;
+       uint64_t        lock_owner;
 };
 
 struct fuse_read_in {
-       __u64   fh;
-       __u64   offset;
-       __u32   size;
-       __u32   read_flags;
-       __u64   lock_owner;
-       __u32   flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint64_t        offset;
+       uint32_t        size;
+       uint32_t        read_flags;
+       uint64_t        lock_owner;
+       uint32_t        flags;
+       uint32_t        padding;
 };
 
 #define FUSE_COMPAT_WRITE_IN_SIZE 24
 
 struct fuse_write_in {
-       __u64   fh;
-       __u64   offset;
-       __u32   size;
-       __u32   write_flags;
-       __u64   lock_owner;
-       __u32   flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint64_t        offset;
+       uint32_t        size;
+       uint32_t        write_flags;
+       uint64_t        lock_owner;
+       uint32_t        flags;
+       uint32_t        padding;
 };
 
 struct fuse_write_out {
-       __u32   size;
-       __u32   padding;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 #define FUSE_COMPAT_STATFS_SIZE 48
@@ -510,32 +505,32 @@ struct fuse_statfs_out {
 };
 
 struct fuse_fsync_in {
-       __u64   fh;
-       __u32   fsync_flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint32_t        fsync_flags;
+       uint32_t        padding;
 };
 
 struct fuse_setxattr_in {
-       __u32   size;
-       __u32   flags;
+       uint32_t        size;
+       uint32_t        flags;
 };
 
 struct fuse_getxattr_in {
-       __u32   size;
-       __u32   padding;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 struct fuse_getxattr_out {
-       __u32   size;
-       __u32   padding;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 struct fuse_lk_in {
-       __u64   fh;
-       __u64   owner;
+       uint64_t        fh;
+       uint64_t        owner;
        struct fuse_file_lock lk;
-       __u32   lk_flags;
-       __u32   padding;
+       uint32_t        lk_flags;
+       uint32_t        padding;
 };
 
 struct fuse_lk_out {
@@ -543,134 +538,135 @@ struct fuse_lk_out {
 };
 
 struct fuse_access_in {
-       __u32   mask;
-       __u32   padding;
+       uint32_t        mask;
+       uint32_t        padding;
 };
 
 struct fuse_init_in {
-       __u32   major;
-       __u32   minor;
-       __u32   max_readahead;
-       __u32   flags;
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        max_readahead;
+       uint32_t        flags;
 };
 
 struct fuse_init_out {
-       __u32   major;
-       __u32   minor;
-       __u32   max_readahead;
-       __u32   flags;
-       __u16   max_background;
-       __u16   congestion_threshold;
-       __u32   max_write;
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        max_readahead;
+       uint32_t        flags;
+       uint16_t        max_background;
+       uint16_t        congestion_threshold;
+       uint32_t        max_write;
 };
 
 #define CUSE_INIT_INFO_MAX 4096
 
 struct cuse_init_in {
-       __u32   major;
-       __u32   minor;
-       __u32   unused;
-       __u32   flags;
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        unused;
+       uint32_t        flags;
 };
 
 struct cuse_init_out {
-       __u32   major;
-       __u32   minor;
-       __u32   unused;
-       __u32   flags;
-       __u32   max_read;
-       __u32   max_write;
-       __u32   dev_major;              /* chardev major */
-       __u32   dev_minor;              /* chardev minor */
-       __u32   spare[10];
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        unused;
+       uint32_t        flags;
+       uint32_t        max_read;
+       uint32_t        max_write;
+       uint32_t        dev_major;              /* chardev major */
+       uint32_t        dev_minor;              /* chardev minor */
+       uint32_t        spare[10];
 };
 
 struct fuse_interrupt_in {
-       __u64   unique;
+       uint64_t        unique;
 };
 
 struct fuse_bmap_in {
-       __u64   block;
-       __u32   blocksize;
-       __u32   padding;
+       uint64_t        block;
+       uint32_t        blocksize;
+       uint32_t        padding;
 };
 
 struct fuse_bmap_out {