Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 7 Mar 2013 23:57:38 +0000 (15:57 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 7 Mar 2013 23:57:38 +0000 (15:57 -0800)
Pull x86 fixes from Peter Anvin:
 "Several boot fixes (MacBook, legacy EFI bootloaders), another
  please-don't-brick fix, and some minor stuff."

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: Do not try to sync identity map for non-mapped pages
  x86, doc: Be explicit about what the x86 struct boot_params requires
  x86: Don't clear efi_info even if the sentinel hits
  x86, mm: Make sure to find a 2M free block for the first mapped area
  x86: Fix 32-bit *_cpu_data initializers
  efivarfs: return accurate error code in efivarfs_fill_super()
  efivars: efivarfs_valid_name() should handle pstore syntax
  efi: be more paranoid about available space when creating variables
  iommu, x86: Add DMA remap fault reason
  x86, smpboot: Remove unused variable

186 files changed:
Documentation/device-mapper/dm-raid.txt
Documentation/hwmon/adm1275
Documentation/hwmon/adt7410
Documentation/hwmon/jc42
Documentation/hwmon/lineage-pem
Documentation/hwmon/lm25066
Documentation/hwmon/ltc2978
Documentation/hwmon/ltc4261
Documentation/hwmon/max16064
Documentation/hwmon/max16065
Documentation/hwmon/max34440
Documentation/hwmon/max8688
Documentation/hwmon/pmbus
Documentation/hwmon/smm665
Documentation/hwmon/ucd9000
Documentation/hwmon/ucd9200
Documentation/hwmon/zl6100
Documentation/power/opp.txt
MAINTAINERS
arch/arm/boot/compressed/Makefile
arch/arm/include/asm/mmu.h
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/calls.S
arch/arm/kernel/head.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_tlb.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/suspend.c
arch/arm/lib/memset.S
arch/arm/mach-netx/generic.c
arch/arm/mach-netx/include/mach/irqs.h
arch/arm/mm/context.c
arch/arm/mm/idmap.c
arch/arm/mm/proc-v7-3level.S
arch/powerpc/crypto/sha1-powerpc-asm.S
arch/powerpc/include/asm/bitops.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/platforms/pseries/hvcserver.c
drivers/acpi/glue.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/sleep.c
drivers/ata/libata-acpi.c
drivers/base/power/main.c
drivers/base/power/power.h
drivers/base/power/qos.c
drivers/base/power/sysfs.c
drivers/base/regmap/regmap-irq.c
drivers/bcma/driver_pci_host.c
drivers/char/hw_random/core.c
drivers/connector/cn_proc.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/highbank-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_i2c.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
drivers/gpu/drm/nouveau/nouveau_agp.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/tegra/Kconfig
drivers/hwmon/pmbus/ltc2978.c
drivers/hwmon/sht15.c
drivers/isdn/hisax/st5481_usb.c
drivers/mailbox/pl320-ipc.c
drivers/md/Kconfig
drivers/md/dm-raid.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid10.h
drivers/md/raid5.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/phy/micrel.c
drivers/net/phy/phy_device.c
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/asix_devices.c
drivers/net/usb/ax88179_178a.c [new file with mode: 0644]
drivers/net/usb/cdc_ncm.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/pci/pci-acpi.c
drivers/pnp/pnpacpi/core.c
drivers/regulator/core.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/palmas-regulator.c
drivers/regulator/twl-regulator.c
drivers/scsi/scsi_lib.c
drivers/tty/hvc/hvcs.c
drivers/usb/core/usb-acpi.c
fs/ecryptfs/Kconfig
fs/ecryptfs/Makefile
fs/ecryptfs/crypto.c
fs/ecryptfs/dentry.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/messaging.c
include/acpi/acpi_bus.h
include/drm/drm_crtc.h
include/linux/ecryptfs.h
include/linux/hardirq.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/regulator/driver.h
include/linux/smpboot.h
include/net/tcp.h
kernel/smpboot.c
kernel/softirq.c
kernel/stop_machine.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/ipv4/ip_input.c
net/ipv4/tcp_input.c
net/ipv6/ip6_input.c
net/ipv6/route.c
net/irda/iriap.c
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/tx.c
net/netfilter/ipset/ip_set_core.c
net/rds/message.c
net/sctp/endpointola.c
net/sctp/socket.c
net/sctp/ssnmap.c
net/sctp/tsnmap.c
net/sctp/ulpqueue.c
net/wireless/nl80211.c

index 56fb62b..b428556 100644 (file)
@@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
   raid10        Various RAID10 inspired algorithms chosen by additional params
                - RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
                - RAID1E: Integrated Adjacent Stripe Mirroring
+               - RAID1E: Integrated Offset Stripe Mirroring
                -  and other similar RAID10 variants
 
   Reference: Chapter 4 of
@@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
                synchronisation state for each region.
 
         [raid10_copies   <# copies>]
-        [raid10_format   near]
+        [raid10_format   <near|far|offset>]
                These two options are used to alter the default layout of
                a RAID10 configuration.  The number of copies is can be
-               specified, but the default is 2.  There are other variations
-               to how the copies are laid down - the default and only current
-               option is "near".  Near copies are what most people think of
-               with respect to mirroring.  If these options are left
-               unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
-               are given, then the layouts for 2, 3 and 4 devices are:
+               specified, but the default is 2.  There are also three
+               variations to how the copies are laid down - the default
+               is "near".  Near copies are what most people think of with
+               respect to mirroring.  If these options are left unspecified,
+               or 'raid10_copies 2' and/or 'raid10_format near' are given,
+               then the layouts for 2, 3 and 4 devices are:
                2 drives         3 drives          4 drives
                --------         ----------        --------------
                A1  A1           A1  A1  A2        A1  A1  A2  A2
@@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
                3-device layout is what might be called a 'RAID1E - Integrated
                Adjacent Stripe Mirroring'.
 
+               If 'raid10_copies 2' and 'raid10_format far', then the layouts
+               for 2, 3 and 4 devices are:
+               2 drives             3 drives             4 drives
+               --------             --------------       --------------------
+               A1  A2               A1   A2   A3         A1   A2   A3   A4
+               A3  A4               A4   A5   A6         A5   A6   A7   A8
+               A5  A6               A7   A8   A9         A9   A10  A11  A12
+               ..  ..               ..   ..   ..         ..   ..   ..   ..
+               A2  A1               A3   A1   A2         A2   A1   A4   A3
+               A4  A3               A6   A4   A5         A6   A5   A8   A7
+               A6  A5               A9   A7   A8         A10  A9   A12  A11
+               ..  ..               ..   ..   ..         ..   ..   ..   ..
+
+               If 'raid10_copies 2' and 'raid10_format offset', then the
+               layouts for 2, 3 and 4 devices are:
+               2 drives       3 drives           4 drives
+               --------       ------------       -----------------
+               A1  A2         A1  A2  A3         A1  A2  A3  A4
+               A2  A1         A3  A1  A2         A2  A1  A4  A3
+               A3  A4         A4  A5  A6         A5  A6  A7  A8
+               A4  A3         A6  A4  A5         A6  A5  A8  A7
+               A5  A6         A7  A8  A9         A9  A10 A11 A12
+               A6  A5         A9  A7  A8         A10 A9  A12 A11
+               ..  ..         ..  ..  ..         ..  ..  ..  ..
+               Here we see layouts closely akin to 'RAID1E - Integrated
+               Offset Stripe Mirroring'.
+
 <#raid_devs>: The number of devices composing the array.
        Each device consists of two entries.  The first is the device
        containing the metadata (if any); the second is the one containing the
@@ -142,3 +170,5 @@ Version History
 1.3.0  Added support for RAID 10
 1.3.1  Allow device replacement/rebuild for RAID 10
 1.3.2   Fix/improve redundancy checking for RAID10
+1.4.0  Non-functional change.  Removes arg from mapping function.
+1.4.1   Add RAID10 "far" and "offset" algorithm support.
index 2cfa256..15b4a20 100644 (file)
@@ -15,7 +15,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index 9600400..58150c4 100644 (file)
@@ -4,9 +4,14 @@ Kernel driver adt7410
 Supported chips:
   * Analog Devices ADT7410
     Prefix: 'adt7410'
-    Addresses scanned: I2C 0x48 - 0x4B
+    Addresses scanned: None
     Datasheet: Publicly available at the Analog Devices website
                http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
+  * Analog Devices ADT7420
+    Prefix: 'adt7420'
+    Addresses scanned: None
+    Datasheet: Publicly available at the Analog Devices website
+               http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
 
 Author: Hartmut Knaack <knaack.h@gmx.de>
 
@@ -27,6 +32,10 @@ value per second or even justget one sample on demand for power saving.
 Besides, it can completely power down its ADC, if power management is
 required.
 
+The ADT7420 is register compatible, the only differences being the package,
+a slightly narrower operating temperature range (-40°C to +150°C), and a
+better accuracy (0.25°C instead of 0.50°C.)
+
 Configuration Notes
 -------------------
 
index 1650771..868d74d 100644 (file)
@@ -49,7 +49,7 @@ Supported chips:
     Addresses scanned: I2C 0x18 - 0x1f
 
 Author:
-       Guenter Roeck <guenter.roeck@ericsson.com>
+       Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index 2ba5ed1..83b2ddc 100644 (file)
@@ -8,7 +8,7 @@ Supported devices:
     Documentation:
         http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index a21db81..26025e4 100644 (file)
@@ -19,7 +19,7 @@ Supported chips:
     Datasheet:
        http://www.national.com/pf/LM/LM5066.html
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index c365f9b..e4d75c6 100644 (file)
@@ -5,13 +5,13 @@ Supported chips:
   * Linear Technology LTC2978
     Prefix: 'ltc2978'
     Addresses scanned: -
-    Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
+    Datasheet: http://www.linear.com/product/ltc2978
   * Linear Technology LTC3880
     Prefix: 'ltc3880'
     Addresses scanned: -
-    Datasheet: http://cds.linear.com/docs/Datasheet/3880f.pdf
+    Datasheet: http://www.linear.com/product/ltc3880
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index eba2e2c..9378a75 100644 (file)
@@ -8,7 +8,7 @@ Supported chips:
     Datasheet:
         http://cds.linear.com/docs/Datasheet/42612fb.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index f8b4780..d59cc78 100644 (file)
@@ -7,7 +7,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index c11f64a..208a29e 100644 (file)
@@ -24,7 +24,7 @@ Supported chips:
        http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
 
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index 47651ff..37cbf47 100644 (file)
@@ -27,7 +27,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index fe84987..e780786 100644 (file)
@@ -7,7 +7,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index 3d3a0f9..cf756ed 100644 (file)
@@ -34,7 +34,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: n.a.
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index 59e3161..a341eee 100644 (file)
@@ -29,7 +29,7 @@ Supported chips:
       http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
       http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Module Parameters
index 0df5f27..805e33e 100644 (file)
@@ -11,7 +11,7 @@ Supported chips:
        http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
        http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index fd7d07b..1e8060e 100644 (file)
@@ -15,7 +15,7 @@ Supported chips:
        http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
        http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index 3d924b6..756b57c 100644 (file)
@@ -54,7 +54,7 @@ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
 http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
 
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description
index 3035d00..425c51d 100644 (file)
@@ -1,6 +1,5 @@
-*=============*
-* OPP Library *
-*=============*
+Operating Performance Points (OPP) Library
+==========================================
 
 (C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
 
@@ -16,15 +15,31 @@ Contents
 
 1. Introduction
 ===============
+1.1 What is an Operating Performance Point (OPP)?
+
 Complex SoCs of today consists of a multiple sub-modules working in conjunction.
 In an operational system executing varied use cases, not all modules in the SoC
 need to function at their highest performing frequency all the time. To
 facilitate this, sub-modules in a SoC are grouped into domains, allowing some
-domains to run at lower voltage and frequency while other domains are loaded
-more. The set of discrete tuples consisting of frequency and voltage pairs that
+domains to run at lower voltage and frequency while other domains run at
+voltage/frequency pairs that are higher.
+
+The set of discrete tuples consisting of frequency and voltage pairs that
 the device will support per domain are called Operating Performance Points or
 OPPs.
 
+As an example:
+Let us consider an MPU device which supports the following:
+{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
+{1GHz at minimum voltage of 1.3V}
+
+We can represent these as three OPPs as the following {Hz, uV} tuples:
+{300000000, 1000000}
+{800000000, 1200000}
+{1000000000, 1300000}
+
+1.2 Operating Performance Points Library
+
 OPP library provides a set of helper functions to organize and query the OPP
 information. The library is located in drivers/base/power/opp.c and the header
 is located in include/linux/opp.h. OPP library can be enabled by enabling
index e95b1e9..9561658 100644 (file)
@@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
 
                -----------------------------------
 
-3C505 NETWORK DRIVER
-M:     Philip Blundell <philb@gnu.org>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/i825xx/3c505*
-
 3C59X NETWORK DRIVER
 M:     Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
 L:     netdev@vger.kernel.org
@@ -2361,12 +2355,6 @@ W:       http://www.arm.linux.org.uk/
 S:     Maintained
 F:     drivers/video/cyber2000fb.*
 
-CYCLADES 2X SYNC CARD DRIVER
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
-W:     http://oops.ghostprotocols.net:81/blog
-S:     Maintained
-F:     drivers/net/wan/cycx*
-
 CYCLADES ASYNC MUX DRIVER
 W:     http://www.cyclades.com/
 S:     Orphan
@@ -3067,12 +3055,6 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
 F:     drivers/video/s1d13xxxfb.c
 F:     include/video/s1d13xxxfb.h
 
-ETHEREXPRESS-16 NETWORK DRIVER
-M:     Philip Blundell <philb@gnu.org>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/i825xx/eexpress.*
-
 ETHERNET BRIDGE
 M:     Stephen Hemminger <stephen@networkplumber.org>
 L:     bridge@lists.linux-foundation.org
index 5cad8a6..afed28e 100644 (file)
@@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 
-ccflags-y := -fpic -fno-builtin -I$(obj)
+ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
 asflags-y := -Wa,-march=all -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
index 9f77e78..e3d5554 100644 (file)
@@ -5,15 +5,15 @@
 
 typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
-       u64 id;
+       atomic64_t      id;
 #endif
-       unsigned int vmalloc_seq;
+       unsigned int    vmalloc_seq;
 } mm_context_t;
 
 #ifdef CONFIG_CPU_HAS_ASID
 #define ASID_BITS      8
 #define ASID_MASK      ((~0ULL) << ASID_BITS)
-#define ASID(mm)       ((mm)->context.id & ~ASID_MASK)
+#define ASID(mm)       ((mm)->context.id.counter & ~ASID_MASK)
 #else
 #define ASID(mm)       (0)
 #endif
@@ -26,7 +26,7 @@ typedef struct {
  *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
  */
 typedef struct {
-       unsigned long           end_brk;
+       unsigned long   end_brk;
 } mm_context_t;
 
 #endif
index e1f644b..863a661 100644 (file)
@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
 #ifdef CONFIG_CPU_HAS_ASID
 
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm)       ({ mm->context.id = 0; })
+#define init_new_context(tsk,mm)       ({ atomic64_set(&mm->context.id, 0); 0; })
 
 #else  /* !CONFIG_CPU_HAS_ASID */
 
index 6e924d3..4db8c88 100644 (file)
 #define TLB_V6_D_ASID  (1 << 17)
 #define TLB_V6_I_ASID  (1 << 18)
 
+#define TLB_V6_BP      (1 << 19)
+
 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
-#define TLB_V7_UIS_PAGE        (1 << 19)
-#define TLB_V7_UIS_FULL (1 << 20)
-#define TLB_V7_UIS_ASID (1 << 21)
+#define TLB_V7_UIS_PAGE        (1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP  (1 << 23)
 
 #define TLB_BARRIER    (1 << 28)
 #define TLB_L2CLEAN_FR (1 << 29)               /* Feroceon */
 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
                         TLB_V6_I_FULL | TLB_V6_D_FULL | \
                         TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
-                        TLB_V6_I_ASID | TLB_V6_D_ASID)
+                        TLB_V6_I_ASID | TLB_V6_D_ASID | \
+                        TLB_V6_BP)
 
 #ifdef CONFIG_CPU_TLB_V6
 # define v6wbi_possible_flags  v6wbi_tlb_flags
 #endif
 
 #define v7wbi_tlb_flags_smp    (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
-                        TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
+                                TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+                                TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
 #define v7wbi_tlb_flags_up     (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
-                        TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+                                TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+                                TLB_V6_U_ASID | TLB_V6_BP)
 
 #ifdef CONFIG_CPU_TLB_V7
 
@@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
        }
 }
 
+static inline void local_flush_bp_all(void)
+{
+       const int zero = 0;
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       if (tlb_flag(TLB_V7_UIS_BP))
+               asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+       else if (tlb_flag(TLB_V6_BP))
+               asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+
+       if (tlb_flag(TLB_BARRIER))
+               isb();
+}
+
 /*
  *     flush_pmd_entry
  *
@@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
 #define flush_tlb_kernel_page  local_flush_tlb_kernel_page
 #define flush_tlb_range                local_flush_tlb_range
 #define flush_tlb_kernel_range local_flush_tlb_kernel_range
+#define flush_bp_all           local_flush_bp_all
 #else
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 extern void flush_tlb_kernel_page(unsigned long kaddr);
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
 #endif
 
 /*
index 4da7cde..af33b44 100644 (file)
 #define __NR_setns                     (__NR_SYSCALL_BASE+375)
 #define __NR_process_vm_readv          (__NR_SYSCALL_BASE+376)
 #define __NR_process_vm_writev         (__NR_SYSCALL_BASE+377)
-                                       /* 378 for kcmp */
+#define __NR_kcmp                      (__NR_SYSCALL_BASE+378)
 #define __NR_finit_module              (__NR_SYSCALL_BASE+379)
 
 /*
index 5ce738b..923eec7 100644 (file)
@@ -110,7 +110,7 @@ int main(void)
   BLANK();
 #endif
 #ifdef CONFIG_CPU_HAS_ASID
-  DEFINE(MM_CONTEXT_ID,                offsetof(struct mm_struct, context.id));
+  DEFINE(MM_CONTEXT_ID,                offsetof(struct mm_struct, context.id.counter));
   BLANK();
 #endif
   DEFINE(VMA_VM_MM,            offsetof(struct vm_area_struct, vm_mm));
index 0cc5761..c6ca7e3 100644 (file)
 /* 375 */      CALL(sys_setns)
                CALL(sys_process_vm_readv)
                CALL(sys_process_vm_writev)
-               CALL(sys_ni_syscall)    /* reserved for sys_kcmp */
+               CALL(sys_kcmp)
                CALL(sys_finit_module)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
index 486a15a..e0eb9a1 100644 (file)
@@ -184,13 +184,22 @@ __create_page_tables:
        orr     r3, r3, #3                      @ PGD block type
        mov     r6, #4                          @ PTRS_PER_PGD
        mov     r7, #1 << (55 - 32)             @ L_PGD_SWAPPER
-1:     str     r3, [r0], #4                    @ set bottom PGD entry bits
+1:
+#ifdef CONFIG_CPU_ENDIAN_BE8
        str     r7, [r0], #4                    @ set top PGD entry bits
+       str     r3, [r0], #4                    @ set bottom PGD entry bits
+#else
+       str     r3, [r0], #4                    @ set bottom PGD entry bits
+       str     r7, [r0], #4                    @ set top PGD entry bits
+#endif
        add     r3, r3, #0x1000                 @ next PMD table
        subs    r6, r6, #1
        bne     1b
 
        add     r4, r4, #0x1000                 @ point to the PMD tables
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       add     r4, r4, #4                      @ we only write the bottom word
+#endif
 #endif
 
        ldr     r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
@@ -258,6 +267,11 @@ __create_page_tables:
        addne   r6, r6, #1 << SECTION_SHIFT
        strne   r6, [r3]
 
+#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+       sub     r4, r4, #4                      @ Fixup page table pointer
+                                               @ for 64-bit descriptors
+#endif
+
 #ifdef CONFIG_DEBUG_LL
 #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
        /*
@@ -276,13 +290,17 @@ __create_page_tables:
        orr     r3, r7, r3, lsl #SECTION_SHIFT
 #ifdef CONFIG_ARM_LPAE
        mov     r7, #1 << (54 - 32)             @ XN
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r7, [r0], #4
+       str     r3, [r0], #4
 #else
-       orr     r3, r3, #PMD_SECT_XN
-#endif
        str     r3, [r0], #4
-#ifdef CONFIG_ARM_LPAE
        str     r7, [r0], #4
 #endif
+#else
+       orr     r3, r3, #PMD_SECT_XN
+       str     r3, [r0], #4
+#endif
 
 #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
        /* we don't need any serial debugging mappings */
index 5eae53e..96093b7 100644 (file)
@@ -1023,7 +1023,7 @@ out_mdbgen:
 static int __cpuinit dbg_reset_notify(struct notifier_block *self,
                                      unsigned long action, void *cpu)
 {
-       if (action == CPU_ONLINE)
+       if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
                smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
 
        return NOTIFY_OK;
index 31e0eb3..146157d 100644 (file)
@@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event)
        }
 
        if (event->group_leader != event) {
-               if (validate_group(event) != 0);
+               if (validate_group(event) != 0)
                        return -EINVAL;
        }
 
@@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
        SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
 };
 
-static void __init armpmu_init(struct arm_pmu *armpmu)
+static void armpmu_init(struct arm_pmu *armpmu)
 {
        atomic_set(&armpmu->active_events, 0);
        mutex_init(&armpmu->reserve_mutex);
index 8c79a9e..039cffb 100644 (file)
@@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 /*
  * PMXEVTYPER: Event selection reg
  */
-#define        ARMV7_EVTYPE_MASK       0xc00000ff      /* Mask for writable bits */
+#define        ARMV7_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
 #define        ARMV7_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
 
 /*
index 1bdfd87..31644f1 100644 (file)
@@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
         * switch away from it before attempting any exclusive accesses.
         */
        cpu_switch_mm(mm->pgd, mm);
+       local_flush_bp_all();
        enter_lazy_tlb(mm, current);
        local_flush_tlb_all();
 
index 02c5d2c..bd03005 100644 (file)
@@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
        local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
 }
 
+static inline void ipi_flush_bp_all(void *ignored)
+{
+       local_flush_bp_all();
+}
+
 void flush_tlb_all(void)
 {
        if (tlb_ops_need_broadcast())
@@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
                local_flush_tlb_kernel_range(start, end);
 }
 
+void flush_bp_all(void)
+{
+       if (tlb_ops_need_broadcast())
+               on_each_cpu(ipi_flush_bp_all, NULL, 1);
+       else
+               local_flush_bp_all();
+}
index c092115..3f25650 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 
+#include <asm/smp_plat.h>
 #include <asm/smp_twd.h>
 #include <asm/localtimer.h>
 
@@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
        struct device_node *np;
        int err;
 
+       if (!is_smp() || !setup_max_cpus)
+               return;
+
        np = of_find_matching_node(NULL, twd_of_match);
        if (!np)
                return;
index 358bca3..c59c97e 100644 (file)
@@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        ret = __cpu_suspend(arg, fn);
        if (ret == 0) {
                cpu_switch_mm(mm->pgd, mm);
+               local_flush_bp_all();
                local_flush_tlb_all();
        }
 
index 650d592..d912e73 100644 (file)
@@ -19,9 +19,9 @@
 1:     subs    r2, r2, #4              @ 1 do we have enough
        blt     5f                      @ 1 bytes to align with?
        cmp     r3, #2                  @ 1
-       strltb  r1, [r0], #1            @ 1
-       strleb  r1, [r0], #1            @ 1
-       strb    r1, [r0], #1            @ 1
+       strltb  r1, [ip], #1            @ 1
+       strleb  r1, [ip], #1            @ 1
+       strb    r1, [ip], #1            @ 1
        add     r2, r2, r3              @ 1 (r2 = r2 - (4 - r3))
 /*
  * The pointer is now aligned and the length is adjusted.  Try doing the
  */
 
 ENTRY(memset)
-       ands    r3, r0, #3              @ 1 unaligned?
+/*
+ * Preserve the contents of r0 for the return value.
+ */
+       mov     ip, r0
+       ands    r3, ip, #3              @ 1 unaligned?
        bne     1b                      @ 1
 /*
- * we know that the pointer in r0 is aligned to a word boundary.
+ * we know that the pointer in ip is aligned to a word boundary.
  */
        orr     r1, r1, r1, lsl #8
        orr     r1, r1, r1, lsl #16
@@ -43,29 +47,28 @@ ENTRY(memset)
 #if ! CALGN(1)+0
 
 /*
- * We need an extra register for this loop - save the return address and
- * use the LR
+ * We need 2 extra registers for this loop - use r8 and the LR
  */
-       str     lr, [sp, #-4]!
-       mov     ip, r1
+       stmfd   sp!, {r8, lr}
+       mov     r8, r1
        mov     lr, r1
 
 2:     subs    r2, r2, #64
-       stmgeia r0!, {r1, r3, ip, lr}   @ 64 bytes at a time.
-       stmgeia r0!, {r1, r3, ip, lr}
-       stmgeia r0!, {r1, r3, ip, lr}
-       stmgeia r0!, {r1, r3, ip, lr}
+       stmgeia ip!, {r1, r3, r8, lr}   @ 64 bytes at a time.
+       stmgeia ip!, {r1, r3, r8, lr}
+       stmgeia ip!, {r1, r3, r8, lr}
+       stmgeia ip!, {r1, r3, r8, lr}
        bgt     2b
-       ldmeqfd sp!, {pc}               @ Now <64 bytes to go.
+       ldmeqfd sp!, {r8, pc}           @ Now <64 bytes to go.
 /*
  * No need to correct the count; we're only testing bits from now on
  */
        tst     r2, #32
-       stmneia r0!, {r1, r3, ip, lr}
-       stmneia r0!, {r1, r3, ip, lr}
+       stmneia ip!, {r1, r3, r8, lr}
+       stmneia ip!, {r1, r3, r8, lr}
        tst     r2, #16
-       stmneia r0!, {r1, r3, ip, lr}
-       ldr     lr, [sp], #4
+       stmneia ip!, {r1, r3, r8, lr}
+       ldmfd   sp!, {r8, lr}
 
 #else
 
@@ -74,54 +77,54 @@ ENTRY(memset)
  * whole cache lines at once.
  */
 
-       stmfd   sp!, {r4-r7, lr}
+       stmfd   sp!, {r4-r8, lr}
        mov     r4, r1
        mov     r5, r1
        mov     r6, r1
        mov     r7, r1
-       mov     ip, r1
+       mov     r8, r1
        mov     lr, r1
 
        cmp     r2, #96
-       tstgt   r0, #31
+       tstgt   ip, #31
        ble     3f
 
-       and     ip, r0, #31
-       rsb     ip, ip, #32
-       sub     r2, r2, ip
-       movs    ip, ip, lsl #(32 - 4)
-       stmcsia r0!, {r4, r5, r6, r7}
-       stmmiia r0!, {r4, r5}
-       tst     ip, #(1 << 30)
-       mov     ip, r1
-       strne   r1, [r0], #4
+       and     r8, ip, #31
+       rsb     r8, r8, #32
+       sub     r2, r2, r8
+       movs    r8, r8, lsl #(32 - 4)
+       stmcsia ip!, {r4, r5, r6, r7}
+       stmmiia ip!, {r4, r5}
+       tst     r8, #(1 << 30)
+       mov     r8, r1
+       strne   r1, [ip], #4
 
 3:     subs    r2, r2, #64
-       stmgeia r0!, {r1, r3-r7, ip, lr}
-       stmgeia r0!, {r1, r3-r7, ip, lr}
+       stmgeia ip!, {r1, r3-r8, lr}
+       stmgeia ip!, {r1, r3-r8, lr}
        bgt     3b
-       ldmeqfd sp!, {r4-r7, pc}
+       ldmeqfd sp!, {r4-r8, pc}
 
        tst     r2, #32
-       stmneia r0!, {r1, r3-r7, ip, lr}
+       stmneia ip!, {r1, r3-r8, lr}
        tst     r2, #16
-       stmneia r0!, {r4-r7}
-       ldmfd   sp!, {r4-r7, lr}
+       stmneia ip!, {r4-r7}
+       ldmfd   sp!, {r4-r8, lr}
 
 #endif
 
 4:     tst     r2, #8
-       stmneia r0!, {r1, r3}
+       stmneia ip!, {r1, r3}
        tst     r2, #4
-       strne   r1, [r0], #4
+       strne   r1, [ip], #4
 /*
  * When we get here, we've got less than 4 bytes to zero.  We
  * may have an unaligned pointer as well.
  */
 5:     tst     r2, #2
-       strneb  r1, [r0], #1
-       strneb  r1, [r0], #1
+       strneb  r1, [ip], #1
+       strneb  r1, [ip], #1
        tst     r2, #1
-       strneb  r1, [r0], #1
+       strneb  r1, [ip], #1
        mov     pc, lr
 ENDPROC(memset)
index 27c2cb7..1504b68 100644 (file)
@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
 {
        int irq;
 
-       vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
+       vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
 
        for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
                irq_set_chip_and_handler(irq, &netx_hif_chip,
index 6ce914d..8f74a84 100644 (file)
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-#define NETX_IRQ_VIC_START   0
-#define NETX_IRQ_SOFTINT     0
-#define NETX_IRQ_TIMER0      1
-#define NETX_IRQ_TIMER1      2
-#define NETX_IRQ_TIMER2      3
-#define NETX_IRQ_SYSTIME_NS  4
-#define NETX_IRQ_SYSTIME_S   5
-#define NETX_IRQ_GPIO_15     6
-#define NETX_IRQ_WATCHDOG    7
-#define NETX_IRQ_UART0       8
-#define NETX_IRQ_UART1       9
-#define NETX_IRQ_UART2      10
-#define NETX_IRQ_USB        11
-#define NETX_IRQ_SPI        12
-#define NETX_IRQ_I2C        13
-#define NETX_IRQ_LCD        14
-#define NETX_IRQ_HIF        15
-#define NETX_IRQ_GPIO_0_14  16
-#define NETX_IRQ_XPEC0      17
-#define NETX_IRQ_XPEC1      18
-#define NETX_IRQ_XPEC2      19
-#define NETX_IRQ_XPEC3      20
-#define NETX_IRQ_XPEC(no)   (17 + (no))
-#define NETX_IRQ_MSYNC0     21
-#define NETX_IRQ_MSYNC1     22
-#define NETX_IRQ_MSYNC2     23
-#define NETX_IRQ_MSYNC3     24
-#define NETX_IRQ_IRQ_PHY    25
-#define NETX_IRQ_ISO_AREA   26
+#define NETX_IRQ_VIC_START     64
+#define NETX_IRQ_SOFTINT       (NETX_IRQ_VIC_START + 0)
+#define NETX_IRQ_TIMER0                (NETX_IRQ_VIC_START + 1)
+#define NETX_IRQ_TIMER1                (NETX_IRQ_VIC_START + 2)
+#define NETX_IRQ_TIMER2                (NETX_IRQ_VIC_START + 3)
+#define NETX_IRQ_SYSTIME_NS    (NETX_IRQ_VIC_START + 4)
+#define NETX_IRQ_SYSTIME_S     (NETX_IRQ_VIC_START + 5)
+#define NETX_IRQ_GPIO_15       (NETX_IRQ_VIC_START + 6)
+#define NETX_IRQ_WATCHDOG      (NETX_IRQ_VIC_START + 7)
+#define NETX_IRQ_UART0         (NETX_IRQ_VIC_START + 8)
+#define NETX_IRQ_UART1         (NETX_IRQ_VIC_START + 9)
+#define NETX_IRQ_UART2         (NETX_IRQ_VIC_START + 10)
+#define NETX_IRQ_USB           (NETX_IRQ_VIC_START + 11)
+#define NETX_IRQ_SPI           (NETX_IRQ_VIC_START + 12)
+#define NETX_IRQ_I2C           (NETX_IRQ_VIC_START + 13)
+#define NETX_IRQ_LCD           (NETX_IRQ_VIC_START + 14)
+#define NETX_IRQ_HIF           (NETX_IRQ_VIC_START + 15)
+#define NETX_IRQ_GPIO_0_14     (NETX_IRQ_VIC_START + 16)
+#define NETX_IRQ_XPEC0         (NETX_IRQ_VIC_START + 17)
+#define NETX_IRQ_XPEC1         (NETX_IRQ_VIC_START + 18)
+#define NETX_IRQ_XPEC2         (NETX_IRQ_VIC_START + 19)
+#define NETX_IRQ_XPEC3         (NETX_IRQ_VIC_START + 20)
+#define NETX_IRQ_XPEC(no)      (NETX_IRQ_VIC_START + 17 + (no))
+#define NETX_IRQ_MSYNC0                (NETX_IRQ_VIC_START + 21)
+#define NETX_IRQ_MSYNC1                (NETX_IRQ_VIC_START + 22)
+#define NETX_IRQ_MSYNC2                (NETX_IRQ_VIC_START + 23)
+#define NETX_IRQ_MSYNC3                (NETX_IRQ_VIC_START + 24)
+#define NETX_IRQ_IRQ_PHY       (NETX_IRQ_VIC_START + 25)
+#define NETX_IRQ_ISO_AREA      (NETX_IRQ_VIC_START + 26)
 /* int 27 is reserved */
 /* int 28 is reserved */
-#define NETX_IRQ_TIMER3     29
-#define NETX_IRQ_TIMER4     30
+#define NETX_IRQ_TIMER3                (NETX_IRQ_VIC_START + 29)
+#define NETX_IRQ_TIMER4                (NETX_IRQ_VIC_START + 30)
 /* int 31 is reserved */
 
-#define NETX_IRQS 32
+#define NETX_IRQS              (NETX_IRQ_VIC_START + 32)
 
 /* for multiplexed irqs on gpio 0..14 */
 #define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
index 7a05111..a5a4b2b 100644 (file)
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
        return 0;
 }
 
-static void new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 {
-       u64 asid = mm->context.id;
+       u64 asid = atomic64_read(&mm->context.id);
        u64 generation = atomic64_read(&asid_generation);
 
        if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
                cpumask_clear(mm_cpumask(mm));
        }
 
-       mm->context.id = asid;
+       return asid;
 }
 
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 {
        unsigned long flags;
        unsigned int cpu = smp_processor_id();
+       u64 asid;
 
        if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
                __check_vmalloc_seq(mm);
@@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
         */
        cpu_set_reserved_ttbr0();
 
-       if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
-           && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+       asid = atomic64_read(&mm->context.id);
+       if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+           && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
                goto switch_mm_fastpath;
 
        raw_spin_lock_irqsave(&cpu_asid_lock, flags);
        /* Check that our ASID belongs to the current generation. */
-       if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
-               new_context(mm, cpu);
-
-       atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
-       cpumask_set_cpu(cpu, mm_cpumask(mm));
+       asid = atomic64_read(&mm->context.id);
+       if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+               asid = new_context(mm, cpu);
+               atomic64_set(&mm->context.id, asid);
+       }
 
-       if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+       if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+               local_flush_bp_all();
                local_flush_tlb_all();
+       }
+
+       atomic64_set(&per_cpu(active_asids, cpu), asid);
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:
index 2dffc01..5ee505c 100644 (file)
@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
 {
        /* Switch to the identity mapping. */
        cpu_switch_mm(idmap_pgd, &init_mm);
+       local_flush_bp_all();
 
 #ifdef CONFIG_CPU_HAS_ASID
        /*
index 50bf1da..6ffd78c 100644 (file)
@@ -48,7 +48,7 @@
 ENTRY(cpu_v7_switch_mm)
 #ifdef CONFIG_MMU
        mmid    r1, r1                          @ get mm->context.id
-       and     r3, r1, #0xff
+       asid    r3, r1
        mov     r3, r3, lsl #(48 - 32)          @ ASID
        mcrr    p15, 0, r0, r3, c2              @ set TTB 0
        isb
index a5f8264..125e165 100644 (file)
        STEPUP4((t)+16, fn)
 
 _GLOBAL(powerpc_sha_transform)
-       PPC_STLU r1,-STACKFRAMESIZE(r1)
+       PPC_STLU r1,-INT_FRAME_SIZE(r1)
        SAVE_8GPRS(14, r1)
        SAVE_10GPRS(22, r1)
 
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
 
        REST_8GPRS(14, r1)
        REST_10GPRS(22, r1)
-       addi    r1,r1,STACKFRAMESIZE
+       addi    r1,r1,INT_FRAME_SIZE
        blr
index ef918a2..08bd299 100644 (file)
@@ -52,8 +52,6 @@
 #define smp_mb__before_clear_bit()     smp_mb()
 #define smp_mb__after_clear_bit()      smp_mb()
 
-#define BITOP_LE_SWIZZLE       ((BITS_PER_LONG-1) & ~0x7)
-
 /* Macro for generating the ***_bits() functions */
 #define DEFINE_BITOP(fn, op, prefix, postfix)  \
 static __inline__ void fn(unsigned long mask,  \
index e665861..c9c67fc 100644 (file)
 #define SPRN_HSRR0     0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1     0x13B   /* Hypervisor Save/Restore 1 */
 #define SPRN_FSCR      0x099   /* Facility Status & Control Register */
-#define FSCR_TAR       (1<<8)  /* Enable Target Adress Register */
+#define   FSCR_TAR     (1 << (63-55)) /* Enable Target Address Register */
+#define   FSCR_DSCR    (1 << (63-61)) /* Enable Data Stream Control Register */
 #define SPRN_TAR       0x32f   /* Target Address Register */
 #define SPRN_LPCR      0x13E   /* LPAR Control Register */
 #define   LPCR_VPM0    (1ul << (63-0))
index 535b6d8..ebbec52 100644 (file)
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
 COMPAT_SYS(process_vm_readv)
 COMPAT_SYS(process_vm_writev)
 SYSCALL(finit_module)
+SYSCALL(ni_syscall) /* sys_kcmp */
index f25b5c4..1487f0f 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          354
+#define __NR_syscalls          355
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 8c478c6..74cb4d7 100644 (file)
 #define __NR_process_vm_readv  351
 #define __NR_process_vm_writev 352
 #define __NR_finit_module      353
+#define __NR_kcmp              354
 
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index d29facb..ea847ab 100644 (file)
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
 
 _GLOBAL(__setup_cpu_power8)
        mflr    r11
+       bl      __init_FSCR
        bl      __init_hvmode_206
        mtlr    r11
        beqlr
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
        mfspr   r3,SPRN_LPCR
        oris    r3, r3, LPCR_AIL_3@h
        bl      __init_LPCR
-       bl      __init_FSCR
        bl      __init_TLB
        mtlr    r11
        blr
 
 _GLOBAL(__restore_cpu_power8)
        mflr    r11
+       bl      __init_FSCR
        mfmsr   r3
        rldicl. r0,r3,4,63
        beqlr
@@ -115,7 +116,7 @@ __init_LPCR:
 
 __init_FSCR:
        mfspr   r3,SPRN_FSCR
-       ori     r3,r3,FSCR_TAR
+       ori     r3,r3,FSCR_TAR|FSCR_DSCR
        mtspr   SPRN_FSCR,r3
        blr
 
index a8a5361..87ef8f5 100644 (file)
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                              \
        mflr    r10 ;                                           \
        ld      r12,PACAKBASE(r13) ;                            \
        LOAD_HANDLER(r12, system_call_entry_direct) ;           \
-       mtlr    r12 ;                                           \
+       mtctr   r12 ;                                           \
        mfspr   r12,SPRN_SRR1 ;                                 \
        /* Re-use of r13... No spare regs to do this */ \
        li      r13,MSR_RI ;                                    \
        mtmsrd  r13,1 ;                                         \
        GET_PACA(r13) ; /* get r13 back */                      \
-       blr ;
+       bctr ;
 #else
        /* We can branch directly */
 #define SYSCALL_PSERIES_2_DIRECT                               \
index fcf4b4c..4557e91 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 
 #include <asm/hvcall.h>
 #include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
                        = (unsigned int)last_p_partition_ID;
 
                /* copy the Null-term char too */
-               strncpy(&next_partner_info->location_code[0],
+               strlcpy(&next_partner_info->location_code[0],
                        (char *)&pi_buff[2],
-                       strlen((char *)&pi_buff[2]) + 1);
+                       sizeof(next_partner_info->location_code));
 
                list_add_tail(&(next_partner_info->node), head);
                next_partner_info = NULL;
index ef6f155..40a84cc 100644 (file)
@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
 {
        if (acpi_disabled)
                return -ENODEV;
-       if (type && type->bus && type->find_device) {
+       if (type && type->match && type->find_device) {
                down_write(&bus_type_sem);
                list_add_tail(&type->list, &bus_type_list);
                up_write(&bus_type_sem);
-               printk(KERN_INFO PREFIX "bus type %s registered\n",
-                      type->bus->name);
+               printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
                return 0;
        }
        return -ENODEV;
@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
                down_write(&bus_type_sem);
                list_del_init(&type->list);
                up_write(&bus_type_sem);
-               printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
-                      type->bus->name);
+               printk(KERN_INFO PREFIX "bus type %s unregistered\n",
+                      type->name);
                return 0;
        }
        return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
 
-static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
+static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
 {
        struct acpi_bus_type *tmp, *ret = NULL;
 
-       if (!type)
-               return NULL;
-
        down_read(&bus_type_sem);
        list_for_each_entry(tmp, &bus_type_list, list) {
-               if (tmp->bus == type) {
+               if (tmp->match(dev)) {
                        ret = tmp;
                        break;
                }
@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
        return ret;
 }
 
-static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
-{
-       struct acpi_bus_type *tmp;
-       int ret = -ENODEV;
-
-       down_read(&bus_type_sem);
-       list_for_each_entry(tmp, &bus_type_list, list) {
-               if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
-                       ret = 0;
-                       break;
-               }
-       }
-       up_read(&bus_type_sem);
-       return ret;
-}
-
 static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
                                      void *addr_p, void **ret_p)
 {
@@ -261,29 +241,12 @@ err:
 
 static int acpi_platform_notify(struct device *dev)
 {
-       struct acpi_bus_type *type;
+       struct acpi_bus_type *type = acpi_get_bus_type(dev);
        acpi_handle handle;
        int ret;
 
        ret = acpi_bind_one(dev, NULL);
-       if (ret && (!dev->bus || !dev->parent)) {
-               /* bridge devices genernally haven't bus or parent */
-               ret = acpi_find_bridge_device(dev, &handle);
-               if (!ret) {
-                       ret = acpi_bind_one(dev, handle);
-                       if (ret)
-                               goto out;
-               }
-       }
-
-       type = acpi_get_bus_type(dev->bus);
-       if (ret) {
-               if (!type || !type->find_device) {
-                       DBG("No ACPI bus support for %s\n", dev_name(dev));
-                       ret = -EINVAL;
-                       goto out;
-               }
-
+       if (ret && type) {
                ret = type->find_device(dev, &handle);
                if (ret) {
                        DBG("Unable to get handle for %s\n", dev_name(dev));
@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
 {
        struct acpi_bus_type *type;
 
-       type = acpi_get_bus_type(dev->bus);
+       type = acpi_get_bus_type(dev);
        if (type && type->cleanup)
                type->cleanup(dev);
 
index eff7222..164d495 100644 (file)
@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
        }
 
 exit:
-       if (buffer.pointer)
-               kfree(buffer.pointer);
+       kfree(buffer.pointer);
        return apic_id;
 }
 
index df34bd0..bec717f 100644 (file)
@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
                return 0;
 #endif
 
-       BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
+       BUG_ON(pr->id >= nr_cpu_ids);
 
        /*
         * Buggy BIOS check
index 6d3a06a..2421303 100644 (file)
@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
                status = acpi_get_sleep_type_data(i, &type_a, &type_b);
                if (ACPI_SUCCESS(status)) {
                        sleep_states[i] = 1;
-                       pr_cont(" S%d", i);
                }
        }
 
@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
        hibernation_set_ops(old_suspend_ordering ?
                        &acpi_hibernation_ops_old : &acpi_hibernation_ops);
        sleep_states[ACPI_STATE_S4] = 1;
-       pr_cont(KERN_CONT " S4");
        if (nosigcheck)
                return;
 
@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
 {
        acpi_status status;
        u8 type_a, type_b;
+       char supported[ACPI_S_STATE_COUNT * 3 + 1];
+       char *pos = supported;
+       int i;
 
        if (acpi_disabled)
                return 0;
@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
        acpi_sleep_dmi_check();
 
        sleep_states[ACPI_STATE_S0] = 1;
-       pr_info(PREFIX "(supports S0");
 
        acpi_sleep_suspend_setup();
        acpi_sleep_hibernate_setup();
@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
        status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
        if (ACPI_SUCCESS(status)) {
                sleep_states[ACPI_STATE_S5] = 1;
-               pr_cont(" S5");
                pm_power_off_prepare = acpi_power_off_prepare;
                pm_power_off = acpi_power_off;
        }
-       pr_cont(")\n");
+
+       supported[0] = 0;
+       for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
+               if (sleep_states[i])
+                       pos += sprintf(pos, " S%d", i);
+       }
+       pr_info(PREFIX "(supports%s)\n", supported);
+
        /*
         * Register the tts_notifier to reboot notifier list so that the _TTS
         * object can also be evaluated when the system enters S5.
index 0ea1018..beea311 100644 (file)
@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
                return -ENODEV;
 }
 
-static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
-{
-       return -ENODEV;
-}
-
 static struct acpi_bus_type ata_acpi_bus = {
-       .find_bridge = ata_acpi_find_dummy,
+       .name = "ATA",
        .find_device = ata_acpi_find_device,
 };
 
index 2b7f77d..15beb50 100644 (file)
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
                dev_warn(dev, "parent %s should not be sleeping\n",
                        dev_name(dev->parent));
        list_add_tail(&dev->power.entry, &dpm_list);
-       dev_pm_qos_constraints_init(dev);
        mutex_unlock(&dpm_list_mtx);
 }
 
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
        complete_all(&dev->power.completion);
        mutex_lock(&dpm_list_mtx);
-       dev_pm_qos_constraints_destroy(dev);
        list_del_init(&dev->power.entry);
        mutex_unlock(&dpm_list_mtx);
        device_wakeup_disable(dev);
index b16686a..cfc3226 100644 (file)
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
 {
        if (!dev->power.early_init) {
                spin_lock_init(&dev->power.lock);
-               dev->power.power_state = PMSG_INVALID;
+               dev->power.qos = NULL;
                dev->power.early_init = true;
        }
 }
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
 
 static inline void device_pm_sleep_init(struct device *dev) {}
 
-static inline void device_pm_add(struct device *dev)
-{
-       dev_pm_qos_constraints_init(dev);
-}
+static inline void device_pm_add(struct device *dev) {}
 
 static inline void device_pm_remove(struct device *dev)
 {
-       dev_pm_qos_constraints_destroy(dev);
        pm_runtime_remove(dev);
 }
 
index 3d4d1f8..5f74587 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/mutex.h>
 #include <linux/export.h>
 #include <linux/pm_runtime.h>
+#include <linux/err.h>
 
 #include "power.h"
 
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
        struct pm_qos_flags *pqf;
        s32 val;
 
-       if (!qos)
+       if (IS_ERR_OR_NULL(qos))
                return PM_QOS_FLAGS_UNDEFINED;
 
        pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
  */
 s32 __dev_pm_qos_read_value(struct device *dev)
 {
-       return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
+       return IS_ERR_OR_NULL(dev->power.qos) ?
+               0 : pm_qos_read_value(&dev->power.qos->latency);
 }
 
 /**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        return 0;
 }
 
-/**
- * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
- * @dev: target device
- *
- * Called from the device PM subsystem during device insertion under
- * device_pm_lock().
- */
-void dev_pm_qos_constraints_init(struct device *dev)
-{
-       mutex_lock(&dev_pm_qos_mtx);
-       dev->power.qos = NULL;
-       dev->power.power_state = PMSG_ON;
-       mutex_unlock(&dev_pm_qos_mtx);
-}
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
 
 /**
  * dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
        struct pm_qos_constraints *c;
        struct pm_qos_flags *f;
 
+       mutex_lock(&dev_pm_qos_mtx);
+
        /*
         * If the device's PM QoS resume latency limit or PM QoS flags have been
         * exposed to user space, they have to be hidden at this point.
         */
-       dev_pm_qos_hide_latency_limit(dev);
-       dev_pm_qos_hide_flags(dev);
+       __dev_pm_qos_hide_latency_limit(dev);
+       __dev_pm_qos_hide_flags(dev);
 
-       mutex_lock(&dev_pm_qos_mtx);
-
-       dev->power.power_state = PMSG_INVALID;
        qos = dev->power.qos;
        if (!qos)
                goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
        }
 
        spin_lock_irq(&dev->power.lock);
-       dev->power.qos = NULL;
+       dev->power.qos = ERR_PTR(-ENODEV);
        spin_unlock_irq(&dev->power.lock);
 
        kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
                 "%s() called for already added request\n", __func__))
                return -EINVAL;
 
-       req->dev = dev;
-
        mutex_lock(&dev_pm_qos_mtx);
 
-       if (!dev->power.qos) {
-               if (dev->power.power_state.event == PM_EVENT_INVALID) {
-                       /* The device has been removed from the system. */
-                       req->dev = NULL;
-                       ret = -ENODEV;
-                       goto out;
-               } else {
-                       /*
-                        * Allocate the constraints data on the first call to
-                        * add_request, i.e. only if the data is not already
-                        * allocated and if the device has not been removed.
-                        */
-                       ret = dev_pm_qos_constraints_allocate(dev);
-               }
-       }
+       if (IS_ERR(dev->power.qos))
+               ret = -ENODEV;
+       else if (!dev->power.qos)
+               ret = dev_pm_qos_constraints_allocate(dev);
 
        if (!ret) {
+               req->dev = dev;
                req->type = type;
                ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
        }
 
- out:
        mutex_unlock(&dev_pm_qos_mtx);
 
        return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
        s32 curr_value;
        int ret = 0;
 
-       if (!req->dev->power.qos)
+       if (!req) /*guard against callers passing in null */
+               return -EINVAL;
+
+       if (WARN(!dev_pm_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+
+       if (IS_ERR_OR_NULL(req->dev->power.qos))
                return -ENODEV;
 
        switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
 {
        int ret;
 
+       mutex_lock(&dev_pm_qos_mtx);
+       ret = __dev_pm_qos_update_request(req, new_value);
+       mutex_unlock(&dev_pm_qos_mtx);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+       int ret;
+
        if (!req) /*guard against callers passing in null */
                return -EINVAL;
 
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
                 "%s() called for unknown object\n", __func__))
                return -EINVAL;
 
-       mutex_lock(&dev_pm_qos_mtx);
-       ret = __dev_pm_qos_update_request(req, new_value);
-       mutex_unlock(&dev_pm_qos_mtx);
+       if (IS_ERR_OR_NULL(req->dev->power.qos))
+               return -ENODEV;
 
+       ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+       memset(req, 0, sizeof(*req));
        return ret;
 }
-EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
 
 /**
  * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
  */
 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
 {
-       int ret = 0;
-
-       if (!req) /*guard against callers passing in null */
-               return -EINVAL;
-
-       if (WARN(!dev_pm_qos_request_active(req),
-                "%s() called for unknown object\n", __func__))
-               return -EINVAL;
+       int ret;
 
        mutex_lock(&dev_pm_qos_mtx);
-
-       if (req->dev->power.qos) {
-               ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
-                                      PM_QOS_DEFAULT_VALUE);
-               memset(req, 0, sizeof(*req));
-       } else {
-               /* Return if the device has been removed */
-               ret = -ENODEV;
-       }
-
+       ret = __dev_pm_qos_remove_request(req);
        mutex_unlock(&dev_pm_qos_mtx);
        return ret;
 }
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
 
        mutex_lock(&dev_pm_qos_mtx);
 
-       if (!dev->power.qos)
-               ret = dev->power.power_state.event != PM_EVENT_INVALID ?
-                       dev_pm_qos_constraints_allocate(dev) : -ENODEV;
+       if (IS_ERR(dev->power.qos))
+               ret = -ENODEV;
+       else if (!dev->power.qos)
+               ret = dev_pm_qos_constraints_allocate(dev);
 
        if (!ret)
                ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
        mutex_lock(&dev_pm_qos_mtx);
 
        /* Silently return if the constraints object is not present. */
-       if (dev->power.qos)
+       if (!IS_ERR_OR_NULL(dev->power.qos))
                retval = blocking_notifier_chain_unregister(
                                dev->power.qos->latency.notifiers,
                                notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
 static void __dev_pm_qos_drop_user_request(struct device *dev,
                                           enum dev_pm_qos_req_type type)
 {
+       struct dev_pm_qos_request *req = NULL;
+
        switch(type) {
        case DEV_PM_QOS_LATENCY:
-               dev_pm_qos_remove_request(dev->power.qos->latency_req);
+               req = dev->power.qos->latency_req;
                dev->power.qos->latency_req = NULL;
                break;
        case DEV_PM_QOS_FLAGS:
-               dev_pm_qos_remove_request(dev->power.qos->flags_req);
+               req = dev->power.qos->flags_req;
                dev->power.qos->flags_req = NULL;
                break;
        }
+       __dev_pm_qos_remove_request(req);
+       kfree(req);
 }
 
 /**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
        if (!device_is_registered(dev) || value < 0)
                return -EINVAL;
 
-       if (dev->power.qos && dev->power.qos->latency_req)
-               return -EEXIST;
-
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
                return -ENOMEM;
 
        ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(req);
                return ret;
+       }
+
+       mutex_lock(&dev_pm_qos_mtx);
+
+       if (IS_ERR_OR_NULL(dev->power.qos))
+               ret = -ENODEV;
+       else if (dev->power.qos->latency_req)
+               ret = -EEXIST;
+
+       if (ret < 0) {
+               __dev_pm_qos_remove_request(req);
+               kfree(req);
+               goto out;
+       }
 
        dev->power.qos->latency_req = req;
        ret = pm_qos_sysfs_add_latency(dev);
        if (ret)
                __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
 
+ out:
+       mutex_unlock(&dev_pm_qos_mtx);
        return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
 
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+       if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
+               pm_qos_sysfs_remove_latency(dev);
+               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+       }
+}
+
 /**
  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
  */
 void dev_pm_qos_hide_latency_limit(struct device *dev)
 {
-       if (dev->power.qos && dev->power.qos->latency_req) {
-               pm_qos_sysfs_remove_latency(dev);
-               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
-       }
+       mutex_lock(&dev_pm_qos_mtx);
+       __dev_pm_qos_hide_latency_limit(dev);
+       mutex_unlock(&dev_pm_qos_mtx);
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
 
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
        if (!device_is_registered(dev))
                return -EINVAL;
 
-       if (dev->power.qos && dev->power.qos->flags_req)
-               return -EEXIST;
-
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
                return -ENOMEM;
 
-       pm_runtime_get_sync(dev);
        ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
-       if (ret < 0)
-               goto fail;
+       if (ret < 0) {
+               kfree(req);
+               return ret;
+       }
+
+       pm_runtime_get_sync(dev);
+       mutex_lock(&dev_pm_qos_mtx);
+
+       if (IS_ERR_OR_NULL(dev->power.qos))
+               ret = -ENODEV;
+       else if (dev->power.qos->flags_req)
+               ret = -EEXIST;
+
+       if (ret < 0) {
+               __dev_pm_qos_remove_request(req);
+               kfree(req);
+               goto out;
+       }
 
        dev->power.qos->flags_req = req;
        ret = pm_qos_sysfs_add_flags(dev);
        if (ret)
                __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
 
-fail:
+ out:
+       mutex_unlock(&dev_pm_qos_mtx);
        pm_runtime_put(dev);
        return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
 
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+       if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
+               pm_qos_sysfs_remove_flags(dev);
+               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+       }
+}
+
 /**
  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
  * @dev: Device whose PM QoS flags are to be hidden from user space.
  */
 void dev_pm_qos_hide_flags(struct device *dev)
 {
-       if (dev->power.qos && dev->power.qos->flags_req) {
-               pm_qos_sysfs_remove_flags(dev);
-               pm_runtime_get_sync(dev);
-               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
-               pm_runtime_put(dev);
-       }
+       pm_runtime_get_sync(dev);
+       mutex_lock(&dev_pm_qos_mtx);
+       __dev_pm_qos_hide_flags(dev);
+       mutex_unlock(&dev_pm_qos_mtx);
+       pm_runtime_put(dev);
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
 
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
        s32 value;
        int ret;
 
-       if (!dev->power.qos || !dev->power.qos->flags_req)
-               return -EINVAL;
-
        pm_runtime_get_sync(dev);
        mutex_lock(&dev_pm_qos_mtx);
 
+       if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        value = dev_pm_qos_requested_flags(dev);
        if (set)
                value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
 
        ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
 
+ out:
        mutex_unlock(&dev_pm_qos_mtx);
        pm_runtime_put(dev);
-
        return ret;
 }
+#else /* !CONFIG_PM_RUNTIME */
+static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
+static void __dev_pm_qos_hide_flags(struct device *dev) {}
 #endif /* CONFIG_PM_RUNTIME */
index 50d16e3..a53ebd2 100644 (file)
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
 
 void dpm_sysfs_remove(struct device *dev)
 {
+       dev_pm_qos_constraints_destroy(dev);
        rpm_sysfs_remove(dev);
        sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
        sysfs_remove_group(&dev->kobj, &pm_attr_group);
index 4706c63..020ea2b 100644 (file)
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
                if (ret < 0) {
                        dev_err(map->dev, "IRQ thread failed to resume: %d\n",
                                ret);
+                       pm_runtime_put(map->dev);
                        return IRQ_NONE;
                }
        }
index d3bde6c..30629a3 100644 (file)
@@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
                return;
        }
 
+       spin_lock_init(&pc_host->cfgspace_lock);
+
        pc->host_controller = pc_host;
        pc_host->pci_controller.io_resource = &pc_host->io_resource;
        pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
index 1bafb40..69ae597 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
 #include <asm/uaccess.h>
 
 
@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
 static LIST_HEAD(rng_list);
 static DEFINE_MUTEX(rng_mutex);
 static int data_avail;
-static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
-       __cacheline_aligned;
+static u8 *rng_buffer;
+
+static size_t rng_buffer_size(void)
+{
+       return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
+}
 
 static inline int hwrng_init(struct hwrng *rng)
 {
@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
 
                if (!data_avail) {
                        bytes_read = rng_get_data(current_rng, rng_buffer,
-                               sizeof(rng_buffer),
+                               rng_buffer_size(),
                                !(filp->f_flags & O_NONBLOCK));
                        if (bytes_read < 0) {
                                err = bytes_read;
@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
 
        mutex_lock(&rng_mutex);
 
+       /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+       err = -ENOMEM;
+       if (!rng_buffer) {
+               rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+               if (!rng_buffer)
+                       goto out_unlock;
+       }
+
        /* Must not register two RNGs with the same name. */
        err = -EEXIST;
        list_for_each_entry(tmp, &rng_list, list) {
index fce2000..1110478 100644 (file)
@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
            (task_active_pid_ns(current) != &init_pid_ns))
                return;
 
+       /* Can only change if privileged. */
+       if (!capable(CAP_NET_ADMIN)) {
+               err = EPERM;
+               goto out;
+       }
+
        mc_op = (enum proc_cn_mcast_op *)msg->data;
        switch (*mc_op) {
        case PROC_CN_MCAST_LISTEN:
@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
                err = EINVAL;
                break;
        }
+
+out:
        cn_proc_ack(err, msg->seq, msg->ack);
 }
 
index d2ac911..46bde01 100644 (file)
@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu)                              \
  * dbs: used as a shortform for demand based switching It helps to keep variable
  *     names smaller, simpler
  * cdbs: common dbs
- * on_*: On-demand governor
+ * od_*: On-demand governor
  * cs_*: Conservative governor
  */
 
index 66e3a71..b61b5a3 100644 (file)
 
 static int hb_voltage_change(unsigned int freq)
 {
-       int i;
-       u32 msg[HB_CPUFREQ_IPC_LEN];
-
-       msg[0] = HB_CPUFREQ_CHANGE_NOTE;
-       msg[1] = freq / 1000000;
-       for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
-               msg[i] = 0;
+       u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
 
        return pl320_ipc_transmit(msg);
 }
index 096fde0..f6dd1e7 100644 (file)
@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
        cpu = all_cpu_data[policy->cpu];
 
+       if (!policy->cpuinfo.max_freq)
+               return -ENODEV;
+
        intel_pstate_get_min_max(cpu, &min, &max);
 
        limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
        .owner          = THIS_MODULE,
 };
 
-static void intel_pstate_exit(void)
-{
-       int cpu;
-
-       sysfs_remove_group(intel_pstate_kobject,
-                               &intel_pstate_attr_group);
-       debugfs_remove_recursive(debugfs_parent);
-
-       cpufreq_unregister_driver(&intel_pstate_driver);
-
-       if (!all_cpu_data)
-               return;
-
-       get_online_cpus();
-       for_each_online_cpu(cpu) {
-               if (all_cpu_data[cpu]) {
-                       del_timer_sync(&all_cpu_data[cpu]->timer);
-                       kfree(all_cpu_data[cpu]);
-               }
-       }
-
-       put_online_cpus();
-       vfree(all_cpu_data);
-}
-module_exit(intel_pstate_exit);
-
 static int __initdata no_load;
 
 static int __init intel_pstate_init(void)
 {
-       int rc = 0;
+       int cpu, rc = 0;
        const struct x86_cpu_id *id;
 
        if (no_load)
@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
        intel_pstate_sysfs_expose_params();
        return rc;
 out:
-       intel_pstate_exit();
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if (all_cpu_data[cpu]) {
+                       del_timer_sync(&all_cpu_data[cpu]->timer);
+                       kfree(all_cpu_data[cpu]);
+               }
+       }
+
+       put_online_cpus();
+       vfree(all_cpu_data);
        return -ENODEV;
 }
 device_initcall(intel_pstate_init);
index 6f2306d..f9dbd50 100644 (file)
@@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
        return data & (1 << bit) ? 1 : 0;
 }
 
-static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
+static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
 {
-       return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
+       return ichx_priv.use_gpio & (1 << (nr / 32));
 }
 
 static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
index fff9786..c2534d6 100644 (file)
@@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
 static void gpiod_free(struct gpio_desc *desc);
 static int gpiod_direction_input(struct gpio_desc *desc);
 static int gpiod_direction_output(struct gpio_desc *desc, int value);
+static int gpiod_get_direction(const struct gpio_desc *desc);
 static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
-static int gpiod_get_value_cansleep(struct gpio_desc *desc);
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
 static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-static int gpiod_get_value(struct gpio_desc *desc);
+static int gpiod_get_value(const struct gpio_desc *desc);
 static void gpiod_set_value(struct gpio_desc *desc, int value);
-static int gpiod_cansleep(struct gpio_desc *desc);
-static int gpiod_to_irq(struct gpio_desc *desc);
+static int gpiod_cansleep(const struct gpio_desc *desc);
+static int gpiod_to_irq(const struct gpio_desc *desc);
 static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
 static int gpiod_export_link(struct device *dev, const char *name,
                             struct gpio_desc *desc);
@@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
        return 0;
 }
 
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
+static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
 {
-       return desc->chip;
+       return desc ? desc->chip : NULL;
 }
 
+/* caller holds gpio_lock *OR* gpio is marked as requested */
 struct gpio_chip *gpio_to_chip(unsigned gpio)
 {
        return gpiod_to_chip(gpio_to_desc(gpio));
@@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
 }
 
 /* caller ensures gpio is valid and requested, chip->get_direction may sleep  */
-static int gpiod_get_direction(struct gpio_desc *desc)
+static int gpiod_get_direction(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        unsigned                offset;
@@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
        if (status > 0) {
                /* GPIOF_DIR_IN, or other positive */
                status = 1;
-               clear_bit(FLAG_IS_OUT, &desc->flags);
+               /* FLAG_IS_OUT is just a cache of the result of get_direction(),
+                * so it does not affect constness per se */
+               clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
        }
        if (status == 0) {
                /* GPIOF_DIR_OUT */
-               set_bit(FLAG_IS_OUT, &desc->flags);
+               set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
        }
        return status;
 }
@@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
 static ssize_t gpio_direction_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct gpio_desc        *desc = dev_get_drvdata(dev);
+       const struct gpio_desc  *desc = dev_get_drvdata(dev);
        ssize_t                 status;
 
        mutex_lock(&sysfs_lock);
@@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
                goto done;
 
        desc = gpio_to_desc(gpio);
+       /* reject invalid GPIOs */
+       if (!desc) {
+               pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+               return -EINVAL;
+       }
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
@@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
        if (status < 0)
                goto done;
 
-       status = -EINVAL;
-
        desc = gpio_to_desc(gpio);
        /* reject bogus commands (gpio_unexport ignores them) */
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+               return -EINVAL;
+       }
+
+       status = -EINVAL;
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
@@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
 {
        int                     status = -EINVAL;
 
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
 
        mutex_unlock(&sysfs_lock);
 
-done:
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
        struct device           *dev = NULL;
        int                     status = -EINVAL;
 
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
 unlock:
        mutex_unlock(&sysfs_lock);
 
-done:
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
        struct device           *dev = NULL;
 
        if (!desc) {
-               status = -EINVAL;
-               goto done;
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return;
        }
 
        mutex_lock(&sysfs_lock);
@@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
                device_unregister(dev);
                put_device(dev);
        }
-done:
+
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        int                     status = -EPROBE_DEFER;
        unsigned long           flags;
 
-       spin_lock_irqsave(&gpio_lock, flags);
-
        if (!desc) {
-               status = -EINVAL;
-               goto done;
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
        }
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
        chip = desc->chip;
        if (chip == NULL)
                goto done;
@@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
 done:
        if (status)
                pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
-                        desc ? desc_to_gpio(desc) : -1,
-                        label ? : "?", status);
+                        desc_to_gpio(desc), label ? : "?", status);
        spin_unlock_irqrestore(&gpio_lock, flags);
        return status;
 }
@@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
        int                     status = -EINVAL;
        int                     offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->get || !chip->direction_input)
                goto fail;
@@ -1655,13 +1670,9 @@ lose:
        return status;
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
        return status;
 }
 
@@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
        int                     status = -EINVAL;
        int offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        /* Open drain pin should not be driven to 1 */
        if (value && test_bit(FLAG_OPEN_DRAIN,  &desc->flags))
                return gpiod_direction_input(desc);
@@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
 
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->set || !chip->direction_output)
                goto fail;
@@ -1725,13 +1739,9 @@ lose:
        return status;
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
        return status;
 }
 
@@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
        int                     status = -EINVAL;
        int                     offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->set || !chip->set_debounce)
                goto fail;
@@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
 
        return status;
 }
@@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
  * It returns the zero or nonzero value provided by the associated
  * gpio_chip.get() method; or zero if no such method is provided.
  */
-static int gpiod_get_value(struct gpio_desc *desc)
+static int gpiod_get_value(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int value;
        int offset;
 
+       if (!desc)
+               return 0;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        /* Should be using gpio_get_value_cansleep() */
@@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        struct gpio_chip        *chip;
 
+       if (!desc)
+               return;
        chip = desc->chip;
        /* Should be using gpio_set_value_cansleep() */
        WARN_ON(chip->can_sleep);
@@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
  * This is used directly or indirectly to implement gpio_cansleep().  It
  * returns nonzero if access reading or writing the GPIO value can sleep.
  */
-static int gpiod_cansleep(struct gpio_desc *desc)
+static int gpiod_cansleep(const struct gpio_desc *desc)
 {
+       if (!desc)
+               return 0;
        /* only call this on GPIOs that are valid! */
        return desc->chip->can_sleep;
 }
@@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
  * It returns the number of the IRQ signaled by this (input) GPIO,
  * or a negative errno.
  */
-static int gpiod_to_irq(struct gpio_desc *desc)
+static int gpiod_to_irq(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int                     offset;
 
+       if (!desc)
+               return -EINVAL;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
@@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
  * Common examples include ones connected to I2C or SPI chips.
  */
 
-static int gpiod_get_value_cansleep(struct gpio_desc *desc)
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int value;
        int offset;
 
        might_sleep_if(extra_checks);
+       if (!desc)
+               return 0;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        value = chip->get ? chip->get(chip, offset) : 0;
@@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
        struct gpio_chip        *chip;
 
        might_sleep_if(extra_checks);
+       if (!desc)
+               return;
        chip = desc->chip;
        trace_gpio_value(desc_to_gpio(desc), 0, value);
        if (test_bit(FLAG_OPEN_DRAIN,  &desc->flags))
index c5b8c81..0a8eceb 100644 (file)
@@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = {         /* aka */
        INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
        INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
        INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
-       INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+       INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
+       INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
        INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
-       INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
-       INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+       INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
+       INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
        INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
-       INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
-       INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+       INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
+       INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
        INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
-       INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
        INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
@@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
                intel_modeset_disable(dev);
 
                drm_irq_uninstall(dev);
+               dev_priv->enable_hotplug_processing = false;
        }
 
        i915_save_state(dev);
@@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
                error = i915_gem_init_hw(dev);
                mutex_unlock(&dev->struct_mutex);
 
+               /* We need working interrupts for modeset enabling ... */
+               drm_irq_install(dev);
+
                intel_modeset_init_hw(dev);
                intel_modeset_setup_hw_state(dev, false);
-               drm_irq_install(dev);
+
+               /*
+                * ... but also need to make sure that hotplug processing
+                * doesn't cause havoc. Like in the driver load code we don't
+                * bother with the tiny race here where we might loose hotplug
+                * notifications.
+                * */
                intel_hpd_init(dev);
+               dev_priv->enable_hotplug_processing = true;
        }
 
        intel_opregion_init(dev);
index 2cd97d1..3c7bb04 100644 (file)
@@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 de_iir, gt_iir, de_ier, pm_iir;
+       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
        irqreturn_t ret = IRQ_NONE;
        int i;
 
@@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
        de_ier = I915_READ(DEIER);
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
+       /* Disable south interrupts. We'll only write to SDEIIR once, so further
+        * interrupts will will be stored on its back queue, and then we'll be
+        * able to process them after we restore SDEIER (as soon as we restore
+        * it, we'll get an interrupt if SDEIIR still has something to process
+        * due to its back queue). */
+       sde_ier = I915_READ(SDEIER);
+       I915_WRITE(SDEIER, 0);
+       POSTING_READ(SDEIER);
+
        gt_iir = I915_READ(GTIIR);
        if (gt_iir) {
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
        I915_WRITE(DEIER, de_ier);
        POSTING_READ(DEIER);
+       I915_WRITE(SDEIER, sde_ier);
+       POSTING_READ(SDEIER);
 
        return ret;
 }
@@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = IRQ_NONE;
-       u32 de_iir, gt_iir, de_ier, pm_iir;
+       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
 
        atomic_inc(&dev_priv->irq_received);
 
@@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
        POSTING_READ(DEIER);
 
+       /* Disable south interrupts. We'll only write to SDEIIR once, so further
+        * interrupts will will be stored on its back queue, and then we'll be
+        * able to process them after we restore SDEIER (as soon as we restore
+        * it, we'll get an interrupt if SDEIIR still has something to process
+        * due to its back queue). */
+       sde_ier = I915_READ(SDEIER);
+       I915_WRITE(SDEIER, 0);
+       POSTING_READ(SDEIER);
+
        de_iir = I915_READ(DEIIR);
        gt_iir = I915_READ(GTIIR);
        pm_iir = I915_READ(GEN6_PMIIR);
@@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 done:
        I915_WRITE(DEIER, de_ier);
        POSTING_READ(DEIER);
+       I915_WRITE(SDEIER, sde_ier);
+       POSTING_READ(SDEIER);
 
        return ret;
 }
index 527b664..848992f 100644 (file)
 #define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 #define   ADPA_USE_VGA_HVPOLARITY (1<<15)
 #define   ADPA_SETS_HVPOLARITY 0
-#define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_VSYNC_CNTL_DISABLE (1<<10)
 #define   ADPA_VSYNC_CNTL_ENABLE 0
-#define   ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_HSYNC_CNTL_DISABLE (1<<11)
 #define   ADPA_HSYNC_CNTL_ENABLE 0
 #define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
 #define   ADPA_VSYNC_ACTIVE_LOW        0
index 969d08c..32a3693 100644 (file)
@@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
        u32 temp;
 
        temp = I915_READ(crt->adpa_reg);
-       temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+       temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
        temp &= ~ADPA_DAC_ENABLE;
        I915_WRITE(crt->adpa_reg, temp);
 }
index d64af5a..8d0bac3 100644 (file)
@@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_i915_private *dev_priv = encoder->dev->dev_private;
        enum port port = intel_dig_port->port;
-       bool wait;
        uint32_t val;
+       bool wait = false;
 
        if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
                val = I915_READ(DDI_BUF_CTL(port));
index a05ac2c..287b42c 100644 (file)
@@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
         */
 }
 
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       u32 cntl = I915_READ(CURCNTR(pipe));
+
+       if ((cntl & CURSOR_MODE) == 0) {
+               u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+               I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+               I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+               intel_wait_for_vblank(dev_priv->dev, pipe);
+               I915_WRITE(CURCNTR(pipe), cntl);
+               I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+               I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+       }
+}
+
 static void i9xx_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 
        intel_enable_pipe(dev_priv, pipe, false);
        intel_enable_plane(dev_priv, plane, pipe);
+       if (IS_G4X(dev))
+               g4x_fixup_plane(dev_priv, pipe);
 
        intel_crtc_load_lut(crtc);
        intel_update_fbc(dev);
@@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_framebuffer *intel_fb;
-       struct drm_i915_gem_object *obj;
+       struct drm_framebuffer *old_fb = crtc->fb;
+       struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        unsigned long flags;
@@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        work->event = event;
        work->crtc = crtc;
-       intel_fb = to_intel_framebuffer(crtc->fb);
-       work->old_fb_obj = intel_fb->obj;
+       work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
 
        ret = drm_vblank_get(dev, intel_crtc->pipe);
@@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        intel_crtc->unpin_work = work;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       intel_fb = to_intel_framebuffer(fb);
-       obj = intel_fb->obj;
-
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
                flush_workqueue(dev_priv->wq);
 
@@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
+       crtc->fb = old_fb;
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
index f61cb79..6f728e5 100644 (file)
@@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 
 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
        if (has_aux_irq)
-               done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+               done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+                                         msecs_to_jiffies(10));
        else
                done = wait_for_atomic(C, 10) == 0;
        if (!done)
index 61fee7f..a1794c6 100644 (file)
@@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
        I915_WRITE(GEN6_RC_SLEEP, 0);
        I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
        I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
-       I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+       I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
        /* Check if we are enabling RC6 */
index 5ea5033..4d932c4 100644 (file)
@@ -112,7 +112,6 @@ struct mga_framebuffer {
 struct mga_fbdev {
        struct drm_fb_helper helper;
        struct mga_framebuffer mfb;
-       struct list_head fbdev_list;
        void *sysram;
        int size;
        struct ttm_bo_kmap_obj mapping;
index 5a88ec5..d3dcf54 100644 (file)
@@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
        int ret;
        int data, clock;
 
+       WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
        WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
        WREG_DAC(MGA1064_GEN_IO_CTL, 0);
 
index d3d99a2..a274b99 100644 (file)
@@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
 static int mga_vga_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
+       struct drm_device *dev = connector->dev;
+       struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+       struct mga_fbdev *mfbdev = mdev->mfbdev;
+       struct drm_fb_helper *fb_helper = &mfbdev->helper;
+       struct drm_fb_helper_connector *fb_helper_conn = NULL;
+       int bpp = 32;
+       int i = 0;
+
        /* FIXME: Add bandwidth and g200se limitations */
 
        if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
@@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
                return MODE_BAD;
        }
 
+       /* Validate the mode input by the user */
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               if (fb_helper->connector_info[i]->connector == connector) {
+                       /* Found the helper for this connector */
+                       fb_helper_conn = fb_helper->connector_info[i];
+                       if (fb_helper_conn->cmdline_mode.specified) {
+                               if (fb_helper_conn->cmdline_mode.bpp_specified) {
+                                       bpp = fb_helper_conn->cmdline_mode.bpp;
+                               }
+                       }
+               }
+       }
+
+       if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+               if (fb_helper_conn)
+                       fb_helper_conn->cmdline_mode.specified = false;
+               return MODE_BAD;
+       }
+
        return MODE_OK;
 }
 
index 61cec0f..4857f91 100644 (file)
@@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
                nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
        }
 
-       nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+       nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
        nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
 }
 
index 2cc1e6a..9c41b58 100644 (file)
@@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
                init->offset += 2;
 
                init_wr32(init, dreg, idata);
-               init_mask(init, creg, ~mask, data | idata);
+               init_mask(init, creg, ~mask, data | iaddr);
        }
 }
 
index a114a0e..2e98e8a 100644 (file)
@@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
        /* drop port's i2c subdev refcount, i2c handles this itself */
        if (ret == 0) {
                list_add_tail(&port->head, &i2c->ports);
+               atomic_dec(&parent->refcount);
                atomic_dec(&engine->refcount);
        }
 
index d28430c..6e7a55f 100644 (file)
@@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
        if (drm->agp.stat == UNKNOWN) {
                if (!nouveau_agpmode)
                        return false;
+#ifdef __powerpc__
+               /* Disable AGP by default on all PowerPC machines for
+                * now -- At least some UniNorth-2 AGP bridges are
+                * known to be broken: DMA from the host to the card
+                * works just fine, but writeback from the card to the
+                * host goes straight to memory untranslated bypassing
+                * the GATT somehow, making them quite painful to deal
+                * with...
+                */
+               if (nouveau_agpmode == -1)
+                       return false;
+#endif
                return true;
        }
 
index a6237c9..87a5a56 100644 (file)
@@ -55,9 +55,9 @@
 
 /* offsets in shared sync bo of various structures */
 #define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
-#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
+#define EVO_MAST_NTFY     EVO_SYNC(      0, 0x00)
+#define EVO_FLIP_SEM0(c)  EVO_SYNC((c) + 1, 0x00)
+#define EVO_FLIP_SEM1(c)  EVO_SYNC((c) + 1, 0x10)
 
 #define EVO_CORE_HANDLE      (0xd1500000)
 #define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
@@ -341,10 +341,8 @@ struct nv50_curs {
 
 struct nv50_sync {
        struct nv50_dmac base;
-       struct {
-               u32 offset;
-               u16 value;
-       } sem;
+       u32 addr;
+       u32 data;
 };
 
 struct nv50_ovly {
@@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc)
        return nv50_disp(dev)->sync;
 }
 
+struct nv50_display_flip {
+       struct nv50_disp *disp;
+       struct nv50_sync *chan;
+};
+
+static bool
+nv50_display_flip_wait(void *data)
+{
+       struct nv50_display_flip *flip = data;
+       if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
+                                             flip->chan->data);
+               return true;
+       usleep_range(1, 2);
+       return false;
+}
+
 void
 nv50_display_flip_stop(struct drm_crtc *crtc)
 {
-       struct nv50_sync *sync = nv50_sync(crtc);
+       struct nouveau_device *device = nouveau_dev(crtc->dev);
+       struct nv50_display_flip flip = {
+               .disp = nv50_disp(crtc->dev),
+               .chan = nv50_sync(crtc),
+       };
        u32 *push;
 
-       push = evo_wait(sync, 8);
+       push = evo_wait(flip.chan, 8);
        if (push) {
                evo_mthd(push, 0x0084, 1);
                evo_data(push, 0x00000000);
@@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
                evo_data(push, 0x00000000);
                evo_mthd(push, 0x0080, 1);
                evo_data(push, 0x00000000);
-               evo_kick(push, sync);
+               evo_kick(push, flip.chan);
        }
+
+       nv_wait_cb(device, nv50_display_flip_wait, &flip);
 }
 
 int
@@ -496,11 +516,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                       struct nouveau_channel *chan, u32 swap_interval)
 {
        struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
-       struct nv50_disp *disp = nv50_disp(crtc->dev);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        struct nv50_sync *sync = nv50_sync(crtc);
+       int head = nv_crtc->index, ret;
        u32 *push;
-       int ret;
 
        swap_interval <<= 4;
        if (swap_interval == 0)
@@ -510,58 +529,64 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        if (unlikely(push == NULL))
                return -EBUSY;
 
-       /* synchronise with the rendering channel, if necessary */
-       if (likely(chan)) {
+       if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
+               ret = RING_SPACE(chan, 8);
+               if (ret)
+                       return ret;
+
+               BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
+               OUT_RING  (chan, NvEvoSema0 + head);
+               OUT_RING  (chan, sync->addr ^ 0x10);
+               BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+               OUT_RING  (chan, sync->data + 1);
+               BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+               OUT_RING  (chan, sync->addr);
+               OUT_RING  (chan, sync->data);
+       } else
+       if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+               u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+               ret = RING_SPACE(chan, 12);
+               if (ret)
+                       return ret;
+
+               BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+               OUT_RING  (chan, chan->vram);
+               BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+               OUT_RING  (chan, upper_32_bits(addr ^ 0x10));
+               OUT_RING  (chan, lower_32_bits(addr ^ 0x10));
+               OUT_RING  (chan, sync->data + 1);
+               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+               BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+               OUT_RING  (chan, upper_32_bits(addr));
+               OUT_RING  (chan, lower_32_bits(addr));
+               OUT_RING  (chan, sync->data);
+               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
+       } else
+       if (chan) {
+               u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
                ret = RING_SPACE(chan, 10);
                if (ret)
                        return ret;
 
-               if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
-                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
-                       OUT_RING  (chan, NvEvoSema0 + nv_crtc->index);
-                       OUT_RING  (chan, sync->sem.offset);
-                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
-                       OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
-                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
-                       OUT_RING  (chan, sync->sem.offset ^ 0x10);
-                       OUT_RING  (chan, 0x74b1e000);
-                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
-                       OUT_RING  (chan, NvSema);
-               } else
-               if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
-                       u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
-                       offset += sync->sem.offset;
-
-                       BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-                       OUT_RING  (chan, upper_32_bits(offset));
-                       OUT_RING  (chan, lower_32_bits(offset));
-                       OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
-                       OUT_RING  (chan, 0x00000002);
-                       BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-                       OUT_RING  (chan, upper_32_bits(offset));
-                       OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
-                       OUT_RING  (chan, 0x74b1e000);
-                       OUT_RING  (chan, 0x00000001);
-               } else {
-                       u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
-                       offset += sync->sem.offset;
-
-                       BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-                       OUT_RING  (chan, upper_32_bits(offset));
-                       OUT_RING  (chan, lower_32_bits(offset));
-                       OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
-                       OUT_RING  (chan, 0x00001002);
-                       BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-                       OUT_RING  (chan, upper_32_bits(offset));
-                       OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
-                       OUT_RING  (chan, 0x74b1e000);
-                       OUT_RING  (chan, 0x00001001);
-               }
+               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+               OUT_RING  (chan, upper_32_bits(addr ^ 0x10));
+               OUT_RING  (chan, lower_32_bits(addr ^ 0x10));
+               OUT_RING  (chan, sync->data + 1);
+               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
+                                NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+               OUT_RING  (chan, upper_32_bits(addr));
+               OUT_RING  (chan, lower_32_bits(addr));
+               OUT_RING  (chan, sync->data);
+               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
+                                NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+       }
 
+       if (chan) {
+               sync->addr ^= 0x10;
+               sync->data++;
                FIRE_RING (chan);
        } else {
-               nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
-                               0xf00d0000 | sync->sem.value);
                evo_sync(crtc->dev);
        }
 
@@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                evo_data(push, 0x40000000);
        }
        evo_mthd(push, 0x0088, 4);
-       evo_data(push, sync->sem.offset);
-       evo_data(push, 0xf00d0000 | sync->sem.value);
-       evo_data(push, 0x74b1e000);
+       evo_data(push, sync->addr);
+       evo_data(push, sync->data++);
+       evo_data(push, sync->data);
        evo_data(push, NvEvoSync);
        evo_mthd(push, 0x00a0, 2);
        evo_data(push, 0x00000000);
@@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        evo_mthd(push, 0x0080, 1);
        evo_data(push, 0x00000000);
        evo_kick(push, sync);
-
-       sync->sem.offset ^= 0x10;
-       sync->sem.value++;
        return 0;
 }
 
@@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
        if (ret)
                goto out;
 
-       head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+       head->sync.addr = EVO_FLIP_SEM0(index);
+       head->sync.data = 0x00000000;
 
        /* allocate overlay resources */
        ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
@@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev)
 int
 nv50_display_init(struct drm_device *dev)
 {
-       u32 *push = evo_wait(nv50_mast(dev), 32);
-       if (push) {
-               evo_mthd(push, 0x0088, 1);
-               evo_data(push, NvEvoSync);
-               evo_kick(push, nv50_mast(dev));
-               return 0;
+       struct nv50_disp *disp = nv50_disp(dev);
+       struct drm_crtc *crtc;
+       u32 *push;
+
+       push = evo_wait(nv50_mast(dev), 32);
+       if (!push)
+               return -EBUSY;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct nv50_sync *sync = nv50_sync(crtc);
+               nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
        }
 
-       return -EBUSY;
+       evo_mthd(push, 0x0088, 1);
+       evo_data(push, NvEvoSync);
+       evo_kick(push, nv50_mast(dev));
+       return 0;
 }
 
 void
index 3c38ea4..305a657 100644 (file)
@@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
        if (tmp & L2_BUSY)
                reset_mask |= RADEON_RESET_VMC;
 
+       /* Skip MC reset as it's mostly likely not hung, just busy */
+       if (reset_mask & RADEON_RESET_MC) {
+               DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+               reset_mask &= ~RADEON_RESET_MC;
+       }
+
        return reset_mask;
 }
 
index 99fb132..eb8ac31 100644 (file)
@@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
                         __func__, __LINE__, toffset, surf.base_align);
                return -EINVAL;
        }
-       if (moffset & (surf.base_align - 1)) {
+       if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
                dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
                         __func__, __LINE__, moffset, surf.base_align);
                return -EINVAL;
index 7cead76..d4c633e 100644 (file)
@@ -1381,6 +1381,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
        if (tmp & L2_BUSY)
                reset_mask |= RADEON_RESET_VMC;
 
+       /* Skip MC reset as it's mostly likely not hung, just busy */
+       if (reset_mask & RADEON_RESET_MC) {
+               DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+               reset_mask &= ~RADEON_RESET_MC;
+       }
+
        return reset_mask;
 }
 
index 6d4b561..0740db3 100644 (file)
@@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
        if (r600_is_display_hung(rdev))
                reset_mask |= RADEON_RESET_DISPLAY;
 
+       /* Skip MC reset as it's mostly likely not hung, just busy */
+       if (reset_mask & RADEON_RESET_MC) {
+               DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+               reset_mask &= ~RADEON_RESET_MC;
+       }
+
        return reset_mask;
 }
 
index 3e403bd..78edadc 100644 (file)
@@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
                        found = 1;
        }
 
+       /* quirks */
+       /* Radeon 9100 (R200) */
+       if ((dev->pdev->device == 0x514D) &&
+           (dev->pdev->subsystem_vendor == 0x174B) &&
+           (dev->pdev->subsystem_device == 0x7149)) {
+               /* vbios value is bad, use the default */
+               found = 0;
+       }
+
        if (!found) /* fallback to defaults */
                radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
 
index 1677584..66a7f0f 100644 (file)
  *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
  *   2.28.0 - r600-eg: Add MEM_WRITE packet support
  *   2.29.0 - R500 FP16 color clear registers
+ *   2.30.0 - fix for FMASK texturing
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       29
+#define KMS_DRIVER_MINOR       30
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 90374dd..48f80cd 100644 (file)
@@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
 {
        unsigned long irqflags;
 
+       if (!rdev->ddev->irq_enabled)
+               return;
+
        spin_lock_irqsave(&rdev->irq.lock, irqflags);
        rdev->irq.afmt[block] = true;
        radeon_irq_set(rdev);
@@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
 {
        unsigned long irqflags;
 
+       if (!rdev->ddev->irq_enabled)
+               return;
+
        spin_lock_irqsave(&rdev->irq.lock, irqflags);
        rdev->irq.afmt[block] = false;
        radeon_irq_set(rdev);
@@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
        unsigned long irqflags;
        int i;
 
+       if (!rdev->ddev->irq_enabled)
+               return;
+
        spin_lock_irqsave(&rdev->irq.lock, irqflags);
        for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
                rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
@@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
        unsigned long irqflags;
        int i;
 
+       if (!rdev->ddev->irq_enabled)
+               return;
+
        spin_lock_irqsave(&rdev->irq.lock, irqflags);
        for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
                rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
index 80979ed..9128120 100644 (file)
@@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
        if (tmp & L2_BUSY)
                reset_mask |= RADEON_RESET_VMC;
 
+       /* Skip MC reset as it's mostly likely not hung, just busy */
+       if (reset_mask & RADEON_RESET_MC) {
+               DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+               reset_mask &= ~RADEON_RESET_MC;
+       }
+
        return reset_mask;
 }
 
index c92955d..be1daf7 100644 (file)
@@ -4,7 +4,6 @@ config DRM_TEGRA
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
-       select DRM_HDMI
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 9652a2c..a58de38 100644 (file)
@@ -62,7 +62,7 @@ struct ltc2978_data {
        int temp_min, temp_max;
        int vout_min[8], vout_max[8];
        int iout_max[2];
-       int temp2_max[2];
+       int temp2_max;
        struct pmbus_driver_info info;
 };
 
@@ -204,10 +204,9 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
                ret = pmbus_read_word_data(client, page,
                                           LTC3880_MFR_TEMPERATURE2_PEAK);
                if (ret >= 0) {
-                       if (lin11_to_val(ret)
-                           > lin11_to_val(data->temp2_max[page]))
-                               data->temp2_max[page] = ret;
-                       ret = data->temp2_max[page];
+                       if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
+                               data->temp2_max = ret;
+                       ret = data->temp2_max;
                }
                break;
        case PMBUS_VIRT_READ_VIN_MIN:
@@ -248,11 +247,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
 
        switch (reg) {
        case PMBUS_VIRT_RESET_IOUT_HISTORY:
-               data->iout_max[page] = 0x7fff;
+               data->iout_max[page] = 0x7c00;
                ret = ltc2978_clear_peaks(client, page, data->id);
                break;
        case PMBUS_VIRT_RESET_TEMP2_HISTORY:
-               data->temp2_max[page] = 0x7fff;
+               data->temp2_max = 0x7c00;
                ret = ltc2978_clear_peaks(client, page, data->id);
                break;
        case PMBUS_VIRT_RESET_VOUT_HISTORY:
@@ -262,12 +261,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
                break;
        case PMBUS_VIRT_RESET_VIN_HISTORY:
                data->vin_min = 0x7bff;
-               data->vin_max = 0;
+               data->vin_max = 0x7c00;
                ret = ltc2978_clear_peaks(client, page, data->id);
                break;
        case PMBUS_VIRT_RESET_TEMP_HISTORY:
                data->temp_min = 0x7bff;
-               data->temp_max = 0x7fff;
+               data->temp_max = 0x7c00;
                ret = ltc2978_clear_peaks(client, page, data->id);
                break;
        default:
@@ -321,12 +320,13 @@ static int ltc2978_probe(struct i2c_client *client,
        info = &data->info;
        info->write_word_data = ltc2978_write_word_data;
 
-       data->vout_min[0] = 0xffff;
        data->vin_min = 0x7bff;
+       data->vin_max = 0x7c00;
        data->temp_min = 0x7bff;
-       data->temp_max = 0x7fff;
+       data->temp_max = 0x7c00;
+       data->temp2_max = 0x7c00;
 
-       switch (id->driver_data) {
+       switch (data->id) {
        case ltc2978:
                info->read_word_data = ltc2978_read_word_data;
                info->pages = 8;
@@ -336,7 +336,6 @@ static int ltc2978_probe(struct i2c_client *client,
                for (i = 1; i < 8; i++) {
                        info->func[i] = PMBUS_HAVE_VOUT
                          | PMBUS_HAVE_STATUS_VOUT;
-                       data->vout_min[i] = 0xffff;
                }
                break;
        case ltc3880:
@@ -352,11 +351,14 @@ static int ltc2978_probe(struct i2c_client *client,
                  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
                  | PMBUS_HAVE_POUT
                  | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
-               data->vout_min[1] = 0xffff;
+               data->iout_max[0] = 0x7c00;
+               data->iout_max[1] = 0x7c00;
                break;
        default:
                return -ENODEV;
        }
+       for (i = 0; i < info->pages; i++)
+               data->vout_min[i] = 0xffff;
 
        return pmbus_do_probe(client, id, info);
 }
index bfe326e..2507f90 100644 (file)
@@ -965,7 +965,13 @@ static int sht15_probe(struct platform_device *pdev)
                if (voltage)
                        data->supply_uv = voltage;
 
-               regulator_enable(data->reg);
+               ret = regulator_enable(data->reg);
+               if (ret != 0) {
+                       dev_err(&pdev->dev,
+                               "failed to enable regulator: %d\n", ret);
+                       return ret;
+               }
+
                /*
                 * Setup a notifier block to update this if another device
                 * causes the voltage to change
index 017c67e..ead0a4f 100644 (file)
@@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
        // Allocate URBs and buffers for interrupt endpoint
        urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!urb) {
-               return -ENOMEM;
+               goto err1;
        }
        intr->urb = urb;
 
        buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
        if (!buf) {
-               return -ENOMEM;
+               goto err2;
        }
 
        endpoint = &altsetting->endpoint[EP_INT-1];
@@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
                         endpoint->desc.bInterval);
 
        return 0;
+err2:
+       usb_free_urb(intr->urb);
+       intr->urb = NULL;
+err1:
+       usb_free_urb(ctrl->urb);
+       ctrl->urb = NULL;
+
+       return -ENOMEM;
 }
 
 /*
index c45b3ae..d873cba 100644 (file)
@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
 
-static int __init pl320_probe(struct amba_device *adev,
-                               const struct amba_id *id)
+static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
 {
        int ret;
 
index e30b490..4d8d90b 100644 (file)
@@ -154,17 +154,6 @@ config MD_RAID456
 
          If unsure, say Y.
 
-config MULTICORE_RAID456
-       bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
-       depends on MD_RAID456
-       depends on SMP
-       depends on EXPERIMENTAL
-       ---help---
-         Enable the raid456 module to dispatch per-stripe raid operations to a
-         thread pool.
-
-         If unsure, say N.
-
 config MD_MULTIPATH
        tristate "Multipath I/O support"
        depends on BLK_DEV_MD
index 9a01d1e..311e3d3 100644 (file)
@@ -91,15 +91,44 @@ static struct raid_type {
        {"raid6_nc", "RAID6 (N continue)",              2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
 };
 
+static char *raid10_md_layout_to_format(int layout)
+{
+       /*
+        * Bit 16 and 17 stand for "offset" and "use_far_sets"
+        * Refer to MD's raid10.c for details
+        */
+       if ((layout & 0x10000) && (layout & 0x20000))
+               return "offset";
+
+       if ((layout & 0xFF) > 1)
+               return "near";
+
+       return "far";
+}
+
 static unsigned raid10_md_layout_to_copies(int layout)
 {
-       return layout & 0xFF;
+       if ((layout & 0xFF) > 1)
+               return layout & 0xFF;
+       return (layout >> 8) & 0xFF;
 }
 
 static int raid10_format_to_md_layout(char *format, unsigned copies)
 {
-       /* 1 "far" copy, and 'copies' "near" copies */
-       return (1 << 8) | (copies & 0xFF);
+       unsigned n = 1, f = 1;
+
+       if (!strcmp("near", format))
+               n = copies;
+       else
+               f = copies;
+
+       if (!strcmp("offset", format))
+               return 0x30000 | (f << 8) | n;
+
+       if (!strcmp("far", format))
+               return 0x20000 | (f << 8) | n;
+
+       return (f << 8) | n;
 }
 
 static struct raid_type *get_raid_type(char *name)
@@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
 {
        unsigned i, rebuild_cnt = 0;
        unsigned rebuilds_per_group, copies, d;
+       unsigned group_size, last_group_start;
 
        for (i = 0; i < rs->md.raid_disks; i++)
                if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 * as long as the failed devices occur in different mirror
                 * groups (i.e. different stripes).
                 *
-                * Right now, we only allow for "near" copies.  When other
-                * formats are added, we will have to check those too.
-                *
                 * When checking "near" format, make sure no adjacent devices
                 * have failed beyond what can be handled.  In addition to the
                 * simple case where the number of devices is a multiple of the
@@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 *          A    A    B    B    C
                 *          C    D    D    E    E
                 */
-               for (i = 0; i < rs->md.raid_disks * copies; i++) {
-                       if (!(i % copies))
+               if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
+                       for (i = 0; i < rs->md.raid_disks * copies; i++) {
+                               if (!(i % copies))
+                                       rebuilds_per_group = 0;
+                               d = i % rs->md.raid_disks;
+                               if ((!rs->dev[d].rdev.sb_page ||
+                                    !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+                                   (++rebuilds_per_group >= copies))
+                                       goto too_many;
+                       }
+                       break;
+               }
+
+               /*
+                * When checking "far" and "offset" formats, we need to ensure
+                * that the device that holds its copy is not also dead or
+                * being rebuilt.  (Note that "far" and "offset" formats only
+                * support two copies right now.  These formats also only ever
+                * use the 'use_far_sets' variant.)
+                *
+                * This check is somewhat complicated by the need to account
+                * for arrays that are not a multiple of (far) copies.  This
+                * results in the need to treat the last (potentially larger)
+                * set differently.
+                */
+               group_size = (rs->md.raid_disks / copies);
+               last_group_start = (rs->md.raid_disks / group_size) - 1;
+               last_group_start *= group_size;
+               for (i = 0; i < rs->md.raid_disks; i++) {
+                       if (!(i % copies) && !(i > last_group_start))
                                rebuilds_per_group = 0;
-                       d = i % rs->md.raid_disks;
-                       if ((!rs->dev[d].rdev.sb_page ||
-                            !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+                       if ((!rs->dev[i].rdev.sb_page ||
+                            !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
                            (++rebuilds_per_group >= copies))
-                               goto too_many;
+                                       goto too_many;
                }
                break;
        default:
@@ -433,7 +487,7 @@ too_many:
  *
  * RAID10-only options:
  *    [raid10_copies <# copies>]        Number of copies.  (Default: 2)
- *    [raid10_format <near>]            Layout algorithm.  (Default: near)
+ *    [raid10_format <near|far|offset>] Layout algorithm.  (Default: near)
  */
 static int parse_raid_params(struct raid_set *rs, char **argv,
                             unsigned num_raid_params)
@@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
                                return -EINVAL;
                        }
-                       if (strcmp("near", argv[i])) {
+                       if (strcmp("near", argv[i]) &&
+                           strcmp("far", argv[i]) &&
+                           strcmp("offset", argv[i])) {
                                rs->ti->error = "Invalid 'raid10_format' value given";
                                return -EINVAL;
                        }
@@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                        return -EINVAL;
                }
 
+               /*
+                * If the format is not "near", we only support
+                * two copies at the moment.
+                */
+               if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
+                       rs->ti->error = "Too many copies for given RAID10 format.";
+                       return -EINVAL;
+               }
+
                /* (Len * #mirrors) / #devices */
                sectors_per_dev = rs->ti->len * raid10_copies;
                sector_div(sectors_per_dev, rs->md.raid_disks);
@@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
        /*
         * Reshaping is not currently allowed
         */
-       if ((le32_to_cpu(sb->level) != mddev->level) ||
-           (le32_to_cpu(sb->layout) != mddev->layout) ||
-           (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
-               DMERR("Reshaping arrays not yet supported.");
+       if (le32_to_cpu(sb->level) != mddev->level) {
+               DMERR("Reshaping arrays not yet supported. (RAID level change)");
+               return -EINVAL;
+       }
+       if (le32_to_cpu(sb->layout) != mddev->layout) {
+               DMERR("Reshaping arrays not yet supported. (RAID layout change)");
+               DMERR("  0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+               DMERR("  Old layout: %s w/ %d copies",
+                     raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
+                     raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
+               DMERR("  New layout: %s w/ %d copies",
+                     raid10_md_layout_to_format(mddev->layout),
+                     raid10_md_layout_to_copies(mddev->layout));
+               return -EINVAL;
+       }
+       if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+               DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
                return -EINVAL;
        }
 
        /* We can only change the number of devices in RAID1 right now */
        if ((rs->raid_type->level != 1) &&
            (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
-               DMERR("Reshaping arrays not yet supported.");
+               DMERR("Reshaping arrays not yet supported. (device count change)");
                return -EINVAL;
        }
 
@@ -1329,7 +1407,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                               raid10_md_layout_to_copies(rs->md.layout));
 
                if (rs->print_flags & DMPF_RAID10_FORMAT)
-                       DMEMIT(" raid10_format near");
+                       DMEMIT(" raid10_format %s",
+                              raid10_md_layout_to_format(rs->md.layout));
 
                DMEMIT(" %d", rs->md.raid_disks);
                for (i = 0; i < rs->md.raid_disks; i++) {
@@ -1418,6 +1497,10 @@ static struct target_type raid_target = {
 
 static int __init dm_raid_init(void)
 {
+       DMINFO("Loading target version %u.%u.%u",
+              raid_target.version[0],
+              raid_target.version[1],
+              raid_target.version[2]);
        return dm_register_target(&raid_target);
 }
 
index 3db3d1b..fcb878f 100644 (file)
@@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
                bio_io_error(bio);
                return;
        }
+       if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+               bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+               return;
+       }
        smp_rmb(); /* Ensure implications of  'active' are visible */
        rcu_read_lock();
        if (mddev->suspended) {
@@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
                } else if (!sectors)
                        sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
                                rdev->data_offset;
+               if (!my_mddev->pers->resize)
+                       /* Cannot change size for RAID0 or Linear etc */
+                       return -EINVAL;
        }
        if (sectors < my_mddev->dev_sectors)
                return -EINVAL; /* component must fit device */
@@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        mddev->ro = 0;
                        sysfs_notify_dirent_safe(mddev->sysfs_state);
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-                       md_wakeup_thread(mddev->thread);
+                       /* mddev_unlock will wake thread */
+                       /* If a device failed while we were read-only, we
+                        * need to make sure the metadata is updated now.
+                        */
+                       if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+                               mddev_unlock(mddev);
+                               wait_event(mddev->sb_wait,
+                                          !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
+                                          !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+                               mddev_lock(mddev);
+                       }
                } else {
                        err = -EROFS;
                        goto abort_unlock;
index 24b3597..0505452 100644 (file)
@@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
                        rdev1->new_raid_disk = j;
                }
 
-               if (j < 0 || j >= mddev->raid_disks) {
+               if (j < 0) {
+                       printk(KERN_ERR
+                              "md/raid0:%s: remove inactive devices before converting to RAID0\n",
+                              mdname(mddev));
+                       goto abort;
+               }
+               if (j >= mddev->raid_disks) {
                        printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
                               "aborting!\n", mdname(mddev), j);
                        goto abort;
@@ -289,7 +295,7 @@ abort:
        kfree(conf->strip_zone);
        kfree(conf->devlist);
        kfree(conf);
-       *private_conf = NULL;
+       *private_conf = ERR_PTR(err);
        return err;
 }
 
@@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
                  "%s does not support generic reshape\n", __func__);
 
        rdev_for_each(rdev, mddev)
-               array_sectors += rdev->sectors;
+               array_sectors += (rdev->sectors &
+                                 ~(sector_t)(mddev->chunk_sectors-1));
 
        return array_sectors;
 }
index d5bddfc..fd86b37 100644 (file)
@@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
                spin_unlock_irq(&conf->device_lock);
+               wake_up(&conf->wait_barrier);
                md_wakeup_thread(mddev->thread);
                kfree(plug);
                return;
@@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1303,8 @@ read_again:
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
+               mbio->bi_rw =
+                       WRITE | do_flush_fua | do_sync | do_discard | do_same;
                mbio->bi_private = r1_bio;
 
                atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
+       if (mddev->queue)
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
        rdev_for_each(rdev, mddev) {
                if (!mddev->gendisk)
                        continue;
index 64d4824..77b562d 100644 (file)
  *    near_copies (stored in low byte of layout)
  *    far_copies (stored in second byte of layout)
  *    far_offset (stored in bit 16 of layout )
+ *    use_far_sets (stored in bit 17 of layout )
  *
- * The data to be stored is divided into chunks using chunksize.
- * Each device is divided into far_copies sections.
- * In each section, chunks are laid out in a style similar to raid0, but
- * near_copies copies of each chunk is stored (each on a different drive).
- * The starting device for each section is offset near_copies from the starting
- * device of the previous section.
- * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
- * drive.
- * near_copies and far_copies must be at least one, and their product is at most
- * raid_disks.
+ * The data to be stored is divided into chunks using chunksize.  Each device
+ * is divided into far_copies sections.   In each section, chunks are laid out
+ * in a style similar to raid0, but near_copies copies of each chunk is stored
+ * (each on a different drive).  The starting device for each section is offset
+ * near_copies from the starting device of the previous section.  Thus there
+ * are (near_copies * far_copies) of each chunk, and each is on a different
+ * drive.  near_copies and far_copies must be at least one, and their product
+ * is at most raid_disks.
  *
  * If far_offset is true, then the far_copies are handled a bit differently.
- * The copies are still in different stripes, but instead of be very far apart
- * on disk, there are adjacent stripes.
+ * The copies are still in different stripes, but instead of being very far
+ * apart on disk, there are adjacent stripes.
+ *
+ * The far and offset algorithms are handled slightly differently if
+ * 'use_far_sets' is true.  In this case, the array's devices are grouped into
+ * sets that are (near_copies * far_copies) in size.  The far copied stripes
+ * are still shifted by 'near_copies' devices, but this shifting stays confined
+ * to the set rather than the entire array.  This is done to improve the number
+ * of device combinations that can fail without causing the array to fail.
+ * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
+ * on a device):
+ *    A B C D    A B C D E
+ *      ...         ...
+ *    D A B C    E A B C D
+ * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
+ *    [A B] [C D]    [A B] [C D E]
+ *    |...| |...|    |...| | ... |
+ *    [B A] [D C]    [B A] [E C D]
  */
 
 /*
@@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
        sector_t stripe;
        int dev;
        int slot = 0;
+       int last_far_set_start, last_far_set_size;
+
+       last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+       last_far_set_start *= geo->far_set_size;
+
+       last_far_set_size = geo->far_set_size;
+       last_far_set_size += (geo->raid_disks % geo->far_set_size);
 
        /* now calculate first sector/dev */
        chunk = r10bio->sector >> geo->chunk_shift;
@@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
        /* and calculate all the others */
        for (n = 0; n < geo->near_copies; n++) {
                int d = dev;
+               int set;
                sector_t s = sector;
-               r10bio->devs[slot].addr = sector;
                r10bio->devs[slot].devnum = d;
+               r10bio->devs[slot].addr = s;
                slot++;
 
                for (f = 1; f < geo->far_copies; f++) {
+                       set = d / geo->far_set_size;
                        d += geo->near_copies;
-                       if (d >= geo->raid_disks)
-                               d -= geo->raid_disks;
+
+                       if ((geo->raid_disks % geo->far_set_size) &&
+                           (d > last_far_set_start)) {
+                               d -= last_far_set_start;
+                               d %= last_far_set_size;
+                               d += last_far_set_start;
+                       } else {
+                               d %= geo->far_set_size;
+                               d += geo->far_set_size * set;
+                       }
                        s += geo->stride;
                        r10bio->devs[slot].devnum = d;
                        r10bio->devs[slot].addr = s;
@@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
         * or recovery, so reshape isn't happening
         */
        struct geom *geo = &conf->geo;
+       int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
+       int far_set_size = geo->far_set_size;
+       int last_far_set_start;
+
+       if (geo->raid_disks % geo->far_set_size) {
+               last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+               last_far_set_start *= geo->far_set_size;
+
+               if (dev >= last_far_set_start) {
+                       far_set_size = geo->far_set_size;
+                       far_set_size += (geo->raid_disks % geo->far_set_size);
+                       far_set_start = last_far_set_start;
+               }
+       }
 
        offset = sector & geo->chunk_mask;
        if (geo->far_offset) {
@@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
                chunk = sector >> geo->chunk_shift;
                fc = sector_div(chunk, geo->far_copies);
                dev -= fc * geo->near_copies;
-               if (dev < 0)
-                       dev += geo->raid_disks;
+               if (dev < far_set_start)
+                       dev += far_set_size;
        } else {
                while (sector >= geo->stride) {
                        sector -= geo->stride;
-                       if (dev < geo->near_copies)
-                               dev += geo->raid_disks - geo->near_copies;
+                       if (dev < (geo->near_copies + far_set_start))
+                               dev += far_set_size - geo->near_copies;
                        else
                                dev -= geo->near_copies;
                }
@@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
                spin_unlock_irq(&conf->device_lock);
+               wake_up(&conf->wait_barrier);
                md_wakeup_thread(mddev->thread);
                kfree(plug);
                return;
@@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
@@ -1460,7 +1508,8 @@ retry_write:
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1551,8 @@ retry_write:
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
                disks = mddev->raid_disks + mddev->delta_disks;
                break;
        }
-       if (layout >> 17)
+       if (layout >> 18)
                return -1;
        if (chunk < (PAGE_SIZE >> 9) ||
            !is_power_of_2(chunk))
@@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
        geo->near_copies = nc;
        geo->far_copies = fc;
        geo->far_offset = fo;
+       geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
        geo->chunk_mask = chunk - 1;
        geo->chunk_shift = ffz(~chunk);
        return nc*fc;
@@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
        if (mddev->queue) {
                blk_queue_max_discard_sectors(mddev->queue,
                                              mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
                blk_queue_io_min(mddev->queue, chunk_size);
                if (conf->geo.raid_disks % conf->geo.near_copies)
                        blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
index 1054cf6..157d69e 100644 (file)
@@ -33,6 +33,11 @@ struct r10conf {
                                               * far_offset, in which case it is
                                               * 1 stripe.
                                               */
+               int             far_set_size; /* The number of devices in a set,
+                                              * where a 'set' are devices that
+                                              * contain far/offset copies of
+                                              * each other.
+                                              */
                int             chunk_shift; /* shift from chunks to sectors */
                sector_t        chunk_mask;
        } prev, geo;
index 5af2d27..3ee2912 100644 (file)
@@ -1403,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
                           &sh->ops.zero_sum_result, percpu->spare_page, &submit);
 }
 
-static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 {
        int overlap_clear = 0, i, disks = sh->disks;
        struct dma_async_tx_descriptor *tx = NULL;
@@ -1468,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
-#ifdef CONFIG_MULTICORE_RAID456
-static void async_run_ops(void *param, async_cookie_t cookie)
-{
-       struct stripe_head *sh = param;
-       unsigned long ops_request = sh->ops.request;
-
-       clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
-       wake_up(&sh->ops.wait_for_ops);
-
-       __raid_run_ops(sh, ops_request);
-       release_stripe(sh);
-}
-
-static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
-{
-       /* since handle_stripe can be called outside of raid5d context
-        * we need to ensure sh->ops.request is de-staged before another
-        * request arrives
-        */
-       wait_event(sh->ops.wait_for_ops,
-                  !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
-       sh->ops.request = ops_request;
-
-       atomic_inc(&sh->count);
-       async_schedule(async_run_ops, sh);
-}
-#else
-#define raid_run_ops __raid_run_ops
-#endif
-
 static int grow_one_stripe(struct r5conf *conf)
 {
        struct stripe_head *sh;
@@ -1506,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
                return 0;
 
        sh->raid_conf = conf;
-       #ifdef CONFIG_MULTICORE_RAID456
-       init_waitqueue_head(&sh->ops.wait_for_ops);
-       #endif
 
        spin_lock_init(&sh->stripe_lock);
 
@@ -1627,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                        break;
 
                nsh->raid_conf = conf;
-               #ifdef CONFIG_MULTICORE_RAID456
-               init_waitqueue_head(&nsh->ops.wait_for_ops);
-               #endif
                spin_lock_init(&nsh->stripe_lock);
 
                list_add(&nsh->lru, &newstripes);
index 11d01d6..7bd068a 100644 (file)
@@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (bond->dev_addr_from_first)
+       if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
                bond_set_dev_addr(bond->dev, slave_dev);
 
        new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
index 639049d..da5f439 100644 (file)
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                        bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
                                  ring->start);
                } else {
+                       /* Omit CRC. */
+                       len -= ETH_FCS_LEN;
+
                        new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
                        if (new_skb) {
                                skb_put(new_skb, len);
                                skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
                                                                 new_skb->data,
                                                                 len);
+                               skb_checksum_none_assert(skb);
                                new_skb->protocol =
                                        eth_type_trans(new_skb, bgmac->net_dev);
                                netif_receive_skb(new_skb);
index ecac04a..a923bc4 100644 (file)
@@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
                tsum = ~csum_fold(csum_add((__force __wsum) csum,
                                  csum_partial(t_header, -fix, 0)));
 
-       return bswab16(csum);
+       return bswab16(tsum);
 }
 
 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
index 9a674b1..edfa67a 100644 (file)
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
                if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
                        cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+               if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+                       cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
        }
 
        cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                                ADVERTISED_10000baseKR_Full))
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+                       if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
                }
        } else { /* forced speed */
                /* advertise the requested speed and duplex if supported */
index 1663e0b..31c5787 100644 (file)
@@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
                                         0x0);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Disable MI_INT interrupt before setting LED4
+                                * source to constant off.
+                                */
+                               if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+                                          params->port*4) &
+                                   NIG_MASK_MI_INT) {
+                                       params->link_flags |=
+                                       LINK_FLAGS_INT_DISABLED;
+
+                                       bnx2x_bits_dis(
+                                               bp,
+                                               NIG_REG_MASK_INTERRUPT_PORT0 +
+                                               params->port*4,
+                                               NIG_MASK_MI_INT);
+                               }
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x0);
+                       }
                }
                break;
        case LED_MODE_ON:
@@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
                                         0x20);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Disable MI_INT interrupt before setting LED4
+                                * source to constant on.
+                                */
+                               if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+                                          params->port*4) &
+                                   NIG_MASK_MI_INT) {
+                                       params->link_flags |=
+                                       LINK_FLAGS_INT_DISABLED;
+
+                                       bnx2x_bits_dis(
+                                               bp,
+                                               NIG_REG_MASK_INTERRUPT_PORT0 +
+                                               params->port*4,
+                                               NIG_MASK_MI_INT);
+                               }
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x20);
+                       }
                }
                break;
 
@@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LINK_SIGNAL,
                                         val);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Restore LED4 source to external link,
+                                * and re-enable interrupts.
+                                */
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x40);
+                               if (params->link_flags &
+                                   LINK_FLAGS_INT_DISABLED) {
+                                       bnx2x_link_int_enable(params);
+                                       params->link_flags &=
+                                               ~LINK_FLAGS_INT_DISABLED;
+                               }
+                       }
                }
                break;
        }
@@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                        phy->media_type = ETH_PHY_KR;
                        phy->flags |= FLAGS_WC_DUAL_MODE;
                        phy->supported &= (SUPPORTED_20000baseKR2_Full |
+                                          SUPPORTED_10000baseT_Full |
+                                          SUPPORTED_1000baseT_Full |
                                           SUPPORTED_Autoneg |
                                           SUPPORTED_FIBRE |
                                           SUPPORTED_Pause |
@@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
                struct bnx2x_phy *phy = &params->phy[INT_PHY];
                bnx2x_set_aer_mmd(params, phy);
                if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
-                   (phy->speed_cap_mask & SPEED_20000))
+                   (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
                        bnx2x_check_kr2_wa(params, vars, phy);
                bnx2x_check_over_curr(params, vars);
                if (vars->rx_tx_asic_rst)
index d25c7d7..be5c195 100644 (file)
@@ -307,7 +307,8 @@ struct link_params {
        struct bnx2x *bp;
        u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
                                req_flow_ctrl is set to AUTO */
-       u16 rsrv1;
+       u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED                (1<<0)
        u32 lfa_base;
 };
 
index fccc3bf..069a155 100644 (file)
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct bufdesc *bdp;
        void *bufaddr;
        unsigned short  status;
-       unsigned long flags;
+       unsigned int index;
 
        if (!fep->link) {
                /* Link is down or autonegotiation is in progress. */
                return NETDEV_TX_BUSY;
        }
 
-       spin_lock_irqsave(&fep->hw_lock, flags);
        /* Fill in a Tx ring entry */
        bdp = fep->cur_tx;
 
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                 * This should not happen, since ndev->tbusy should be set.
                 */
                printk("%s: tx queue full!.\n", ndev->name);
-               spin_unlock_irqrestore(&fep->hw_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
         * 4-byte boundaries. Use bounce buffers to copy data
         * and get it aligned. Ugh.
         */
+       if (fep->bufdesc_ex)
+               index = (struct bufdesc_ex *)bdp -
+                       (struct bufdesc_ex *)fep->tx_bd_base;
+       else
+               index = bdp - fep->tx_bd_base;
+
        if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
-               unsigned int index;
-               if (fep->bufdesc_ex)
-                       index = (struct bufdesc_ex *)bdp -
-                               (struct bufdesc_ex *)fep->tx_bd_base;
-               else
-                       index = bdp - fep->tx_bd_base;
                memcpy(fep->tx_bounce[index], skb->data, skb->len);
                bufaddr = fep->tx_bounce[index];
        }
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                swap_buffer(bufaddr, skb->len);
 
        /* Save skb pointer */
-       fep->tx_skbuff[fep->skb_cur] = skb;
-
-       ndev->stats.tx_bytes += skb->len;
-       fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+       fep->tx_skbuff[index] = skb;
 
        /* Push the data cache so the CPM does not get stale memory
         * data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        ebdp->cbd_esc = BD_ENET_TX_INT;
                }
        }
-       /* Trigger transmission start */
-       writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-
        /* If this was the last BD in the ring, start at the beginning again. */
        if (status & BD_ENET_TX_WRAP)
                bdp = fep->tx_bd_base;
        else
                bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
 
-       if (bdp == fep->dirty_tx) {
-               fep->tx_full = 1;
+       fep->cur_tx = bdp;
+
+       if (fep->cur_tx == fep->dirty_tx)
                netif_stop_queue(ndev);
-       }
 
-       fep->cur_tx = bdp;
+       /* Trigger transmission start */
+       writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 
        skb_tx_timestamp(skb);
 
-       spin_unlock_irqrestore(&fep->hw_lock, flags);
-
        return NETDEV_TX_OK;
 }
 
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
                        * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
 
-       fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
        fep->cur_rx = fep->rx_bd_base;
 
-       /* Reset SKB transmit buffers. */
-       fep->skb_cur = fep->skb_dirty = 0;
        for (i = 0; i <= TX_RING_MOD_MASK; i++) {
                if (fep->tx_skbuff[i]) {
                        dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
        struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
+       int     index = 0;
 
        fep = netdev_priv(ndev);
-       spin_lock(&fep->hw_lock);
        bdp = fep->dirty_tx;
 
+       /* get next bdp of dirty_tx */
+       if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+               bdp = fep->tx_bd_base;
+       else
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+
        while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-               if (bdp == fep->cur_tx && fep->tx_full == 0)
+
+               /* current queue is empty */
+               if (bdp == fep->cur_tx)
                        break;
 
+               if (fep->bufdesc_ex)
+                       index = (struct bufdesc_ex *)bdp -
+                               (struct bufdesc_ex *)fep->tx_bd_base;
+               else
+                       index = bdp - fep->tx_bd_base;
+
                dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
                bdp->cbd_bufaddr = 0;
 
-               skb = fep->tx_skbuff[fep->skb_dirty];
+               skb = fep->tx_skbuff[index];
+
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
                                   BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-               fep->tx_skbuff[fep->skb_dirty] = NULL;
-               fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+               fep->tx_skbuff[index] = NULL;
+
+               fep->dirty_tx = bdp;
 
                /* Update pointer to next buffer descriptor to be transmitted */
                if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Since we have freed up a buffer, the ring is no longer full
                 */
-               if (fep->tx_full) {
-                       fep->tx_full = 0;
+               if (fep->dirty_tx != fep->cur_tx) {
                        if (netif_queue_stopped(ndev))
                                netif_wake_queue(ndev);
                }
        }
-       fep->dirty_tx = bdp;
-       spin_unlock(&fep->hw_lock);
+       return;
 }
 
 
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
                int_events = readl(fep->hwp + FEC_IEVENT);
                writel(int_events, fep->hwp + FEC_IEVENT);
 
-               if (int_events & FEC_ENET_RXF) {
+               if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
                        ret = IRQ_HANDLED;
 
                        /* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
                        }
                }
 
-               /* Transmit OK, or non-fatal error. Update the buffer
-                * descriptors. FEC handles all errors, we just discover
-                * them as part of the transmit process.
-                */
-               if (int_events & FEC_ENET_TXF) {
-                       ret = IRQ_HANDLED;
-                       fec_enet_tx(ndev);
-               }
-
                if (int_events & FEC_ENET_MII) {
                        ret = IRQ_HANDLED;
                        complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
        int pkts = fec_enet_rx(ndev, budget);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       fec_enet_tx(ndev);
+
        if (pkts < budget) {
                napi_complete(napi);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
 
        /* ...and the same for transmit */
        bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
        for (i = 0; i < TX_RING_SIZE; i++) {
 
                /* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
        /* Set the last buffer to wrap */
        bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
        bdp->cbd_sc |= BD_SC_WRAP;
+       fep->dirty_tx = bdp;
 
        fec_restart(ndev, 0);
 
index 01579b8..f539007 100644 (file)
@@ -97,6 +97,13 @@ struct bufdesc {
        unsigned short cbd_sc;  /* Control and status info */
        unsigned long cbd_bufaddr;      /* Buffer address */
 };
+#else
+struct bufdesc {
+       unsigned short  cbd_sc;                 /* Control and status info */
+       unsigned short  cbd_datlen;             /* Data length */
+       unsigned long   cbd_bufaddr;            /* Buffer address */
+};
+#endif
 
 struct bufdesc_ex {
        struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
        unsigned short res0[4];
 };
 
-#else
-struct bufdesc {
-       unsigned short  cbd_sc;                 /* Control and status info */
-       unsigned short  cbd_datlen;             /* Data length */
-       unsigned long   cbd_bufaddr;            /* Buffer address */
-};
-#endif
-
 /*
  *     The following definitions courtesy of commproc.h, which where
  *     Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
        unsigned char *tx_bounce[TX_RING_SIZE];
        struct  sk_buff *tx_skbuff[TX_RING_SIZE];
        struct  sk_buff *rx_skbuff[RX_RING_SIZE];
-       ushort  skb_cur;
-       ushort  skb_dirty;
 
        /* CPM dual port RAM relative addresses */
        dma_addr_t      bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
        /* The ring entries to be free()ed */
        struct bufdesc  *dirty_tx;
 
-       uint    tx_full;
        /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
        spinlock_t hw_lock;
 
index 8900398..28fb50a 100644 (file)
@@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 
-       rtl_tx_performance_tweak(pdev,
-               (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+       if (tp->dev->mtu <= ETH_DATA_LEN) {
+               rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
+                                        PCI_EXP_DEVCTL_NOSNOOP_EN);
+       }
 }
 
 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
 
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        rtl_disable_clock_request(pdev);
 
@@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
 
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
 
        rtl_csi_access_enable_1(tp);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
@@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
@@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
index bf57b3c..0bc0099 100644 (file)
@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
                                                tx_queue->txd.entries);
        }
 
+       efx_device_detach_sync(efx);
        efx_stop_all(efx);
        efx_stop_interrupts(efx, true);
 
@@ -832,6 +833,7 @@ out:
 
        efx_start_interrupts(efx, true);
        efx_start_all(efx);
+       netif_device_attach(efx->net_dev);
        return rc;
 
 rollback:
@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
        /* Flush efx_mac_work(), refill_workqueue, monitor_work */
        efx_flush_all(efx);
 
-       /* Stop the kernel transmit interface late, so the watchdog
-        * timer isn't ticking over the flush */
+       /* Stop the kernel transmit interface.  This is only valid if
+        * the device is stopped or detached; otherwise the watchdog
+        * may fire immediately.
+        */
+       WARN_ON(netif_running(efx->net_dev) &&
+               netif_device_present(efx->net_dev));
        netif_tx_disable(efx->net_dev);
 
        efx_stop_datapath(efx);
@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
        if (new_mtu > EFX_MAX_MTU)
                return -EINVAL;
 
-       efx_stop_all(efx);
-
        netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
+       efx_device_detach_sync(efx);
+       efx_stop_all(efx);
+
        mutex_lock(&efx->mac_lock);
        net_dev->mtu = new_mtu;
        efx->type->reconfigure_mac(efx);
        mutex_unlock(&efx->mac_lock);
 
        efx_start_all(efx);
+       netif_device_attach(efx->net_dev);
        return 0;
 }
 
index 2d756c1..0a90abd 100644 (file)
@@ -210,6 +210,7 @@ struct efx_tx_queue {
  *     Will be %NULL if the buffer slot is currently free.
  * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
  *     Will be %NULL if the buffer slot is currently free.
+ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
  * @len: Buffer length, in bytes.
  * @flags: Flags for buffer and packet state.
  */
@@ -219,7 +220,8 @@ struct efx_rx_buffer {
                struct sk_buff *skb;
                struct page *page;
        } u;
-       unsigned int len;
+       u16 page_offset;
+       u16 len;
        u16 flags;
 };
 #define EFX_RX_BUF_PAGE                0x0001
index d780a0d..879ff58 100644 (file)
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
 static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
                                             struct efx_rx_buffer *buf)
 {
-       /* Offset is always within one page, so we don't need to consider
-        * the page order.
-        */
-       return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
-               efx->type->rx_buffer_hash_size;
+       return buf->page_offset + efx->type->rx_buffer_hash_size;
 }
 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
 {
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
        struct efx_nic *efx = rx_queue->efx;
        struct efx_rx_buffer *rx_buf;
        struct page *page;
+       unsigned int page_offset;
        struct efx_rx_page_state *state;
        dma_addr_t dma_addr;
        unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
                state->dma_addr = dma_addr;
 
                dma_addr += sizeof(struct efx_rx_page_state);
+               page_offset = sizeof(struct efx_rx_page_state);
 
        split:
                index = rx_queue->added_count & rx_queue->ptr_mask;
                rx_buf = efx_rx_buffer(rx_queue, index);
                rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
                rx_buf->u.page = page;
+               rx_buf->page_offset = page_offset;
                rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
                rx_buf->flags = EFX_RX_BUF_PAGE;
                ++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
                        /* Use the second half of the page */
                        get_page(page);
                        dma_addr += (PAGE_SIZE >> 1);
+                       page_offset += (PAGE_SIZE >> 1);
                        ++count;
                        goto split;
                }
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
 }
 
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
-                               struct efx_rx_buffer *rx_buf)
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int used_len)
 {
        if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
                struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
                                       state->dma_addr,
                                       efx_rx_buf_size(efx),
                                       DMA_FROM_DEVICE);
+               } else if (used_len) {
+                       dma_sync_single_for_cpu(&efx->pci_dev->dev,
+                                               rx_buf->dma_addr, used_len,
+                                               DMA_FROM_DEVICE);
                }
        } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
                dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
                               struct efx_rx_buffer *rx_buf)
 {
-       efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+       efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
        efx_free_rx_buffer(rx_queue->efx, rx_buf);
 }
 
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
                goto out;
        }
 
-       /* Release card resources - assumes all RX buffers consumed in-order
-        * per RX queue
+       /* Release and/or sync DMA mapping - assumes all RX buffers
+        * consumed in-order per RX queue
         */
-       efx_unmap_rx_buffer(efx, rx_buf);
+       efx_unmap_rx_buffer(efx, rx_buf, len);
 
        /* Prefetch nice and early so data will (hopefully) be in cache by
         * the time we look at it.
index 7e93df6..01ffbc4 100644 (file)
@@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
 
        writel(vlan, &priv->host_port_regs->port_vlan);
 
-       for (i = 0; i < 2; i++)
+       for (i = 0; i < priv->data.slaves; i++)
                slave_write(priv->slaves + i, vlan, reg);
 
        cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
index 2993444..abf7b61 100644 (file)
@@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9021,
        .phy_id_mask    = 0x000ffffe,
        .name           = "Micrel KSZ9021 Gigabit PHY",
-       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
-                               | SUPPORTED_Asym_Pause),
+       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
index 9930f99..3657b4a 100644 (file)
@@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
 
 void phy_device_free(struct phy_device *phydev)
 {
-       kfree(phydev);
+       put_device(&phydev->dev);
 }
 EXPORT_SYMBOL(phy_device_free);
 
 static void phy_device_release(struct device *dev)
 {
-       phy_device_free(to_phy_device(dev));
+       kfree(to_phy_device(dev));
 }
 
 static struct phy_driver genphy_driver;
@@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
           there's no driver _already_ loaded. */
        request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
 
+       device_initialize(&dev->dev);
+
        return dev;
 }
 EXPORT_SYMBOL(phy_device_create);
@@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
        /* Run all of the fixups for this PHY */
        phy_scan_fixups(phydev);
 
-       err = device_register(&phydev->dev);
+       err = device_add(&phydev->dev);
        if (err) {
-               pr_err("phy %d failed to register\n", phydev->addr);
+               pr_err("PHY %d failed to add\n", phydev->addr);
                goto out;
        }
 
index da92ed3..3b6e9b8 100644 (file)
@@ -156,6 +156,24 @@ config USB_NET_AX8817X
          This driver creates an interface named "ethX", where X depends on
          what other networking devices you have in use.
 
+config USB_NET_AX88179_178A
+       tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
+       depends on USB_USBNET
+       select CRC32
+       select PHYLIB
+       default y
+       help
+         This option adds support for ASIX AX88179 based USB 3.0/2.0
+         to Gigabit Ethernet adapters.
+
+         This driver should work with at least the following devices:
+           * ASIX AX88179
+           * ASIX AX88178A
+           * Sitcomm LN-032
+
+         This driver creates an interface named "ethX", where X depends on
+         what other networking devices you have in use.
+
 config USB_NET_CDCETHER
        tristate "CDC Ethernet support (smart devices such as cable modems)"
        depends on USB_USBNET
index 4786913..119b06c 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150)       += rtl8150.o
 obj-$(CONFIG_USB_HSO)          += hso.o
 obj-$(CONFIG_USB_NET_AX8817X)  += asix.o
 asix-y := asix_devices.o asix_common.o ax88172a.o
+obj-$(CONFIG_USB_NET_AX88179_178A)      += ax88179_178a.o
 obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
 obj-$(CONFIG_USB_NET_CDC_EEM)  += cdc_eem.o
 obj-$(CONFIG_USB_NET_DM9601)   += dm9601.o
index 2205dbc..7097534 100644 (file)
@@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
        .tx_fixup = asix_tx_fixup,
 };
 
+/*
+ * USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
+ * no-name packaging.
+ * USB device strings are:
+ *   1: Manufacturer: USBLINK
+ *   2: Product: HG20F9 USB2.0
+ *   3: Serial: 000003
+ * Appears to be compatible with Asix 88772B.
+ */
+static const struct driver_info hg20f9_info = {
+       .description = "HG20F9 USB 2.0 Ethernet",
+       .bind = ax88772_bind,
+       .unbind = ax88772_unbind,
+       .status = asix_status,
+       .link_reset = ax88772_link_reset,
+       .reset = ax88772_reset,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+                FLAG_MULTI_PACKET,
+       .rx_fixup = asix_rx_fixup_common,
+       .tx_fixup = asix_tx_fixup,
+       .data = FLAG_EEPROM_MAC,
+};
+
 extern const struct driver_info ax88172a_info;
 
 static const struct usb_device_id      products [] = {
@@ -1063,6 +1086,14 @@ static const struct usb_device_id        products [] = {
        /* ASIX 88172a demo board */
        USB_DEVICE(0x0b95, 0x172a),
        .driver_info = (unsigned long) &ax88172a_info,
+}, {
+       /*
+        * USBLINK HG20F9 "USB 2.0 LAN"
+        * Appears to have gazumped Linksys's manufacturer ID but
+        * doesn't (yet) conflict with any known Linksys product.
+        */
+       USB_DEVICE(0x066b, 0x20f9),
+       .driver_info = (unsigned long) &hg20f9_info,
 },
        { },            // END
 };
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
new file mode 100644 (file)
index 0000000..71c27d8
--- /dev/null
@@ -0,0 +1,1448 @@
+/*
+ * ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet Devices
+ *
+ * Copyright (C) 2011-2013 ASIX
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+
+#define AX88179_PHY_ID                         0x03
+#define AX_EEPROM_LEN                          0x100
+#define AX88179_EEPROM_MAGIC                   0x17900b95
+#define AX_MCAST_FLTSIZE                       8
+#define AX_MAX_MCAST                           64
+#define AX_INT_PPLS_LINK                       ((u32)BIT(16))
+#define AX_RXHDR_L4_TYPE_MASK                  0x1c
+#define AX_RXHDR_L4_TYPE_UDP                   4
+#define AX_RXHDR_L4_TYPE_TCP                   16
+#define AX_RXHDR_L3CSUM_ERR                    2
+#define AX_RXHDR_L4CSUM_ERR                    1
+#define AX_RXHDR_CRC_ERR                       ((u32)BIT(31))
+#define AX_RXHDR_DROP_ERR                      ((u32)BIT(30))
+#define AX_ACCESS_MAC                          0x01
+#define AX_ACCESS_PHY                          0x02
+#define AX_ACCESS_EEPROM                       0x04
+#define AX_ACCESS_EFUS                         0x05
+#define AX_PAUSE_WATERLVL_HIGH                 0x54
+#define AX_PAUSE_WATERLVL_LOW                  0x55
+
+#define PHYSICAL_LINK_STATUS                   0x02
+       #define AX_USB_SS               0x04
+       #define AX_USB_HS               0x02
+
+#define GENERAL_STATUS                         0x03
+/* Check AX88179 version. UA1