Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Apr 2011 01:32:57 +0000 (18:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Apr 2011 01:32:57 +0000 (18:32 -0700)
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/radeon/kms: pll tweaks for r7xx
  drm/nouveau: fix allocation of notifier object
  drm/nouveau: fix notifier memory corruption bug
  drm/nouveau: fix pinning of notifier block
  drm/nouveau: populate ttm_alloced with false, when it's not
  drm/nouveau: fix nv30 pcie boards
  drm/nouveau: split ramin_lock into two locks, one hardirq safe
  drm/radeon/kms: adjust evergreen display watermark setup
  drm/radeon/kms: add connectors even if i2c fails
  drm/radeon/kms: fix bad shift in atom iio table parser

444 files changed:
Documentation/feature-removal-schedule.txt
Documentation/input/event-codes.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/alpha/kernel/Makefile
arch/alpha/kernel/core_mcpcia.c
arch/alpha/kernel/err_titan.c
arch/alpha/kernel/irq_alpha.c
arch/alpha/kernel/setup.c
arch/alpha/kernel/smc37c93x.c
arch/alpha/kernel/sys_wildfire.c
arch/alpha/kernel/time.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/common/Makefile
arch/arm/include/asm/thread_notify.h
arch/arm/kernel/Makefile
arch/arm/kernel/elf.c
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/process.c
arch/arm/kernel/traps.c
arch/arm/mach-mmp/include/mach/gpio.h
arch/arm/mach-mmp/include/mach/mfp-pxa168.h
arch/arm/mach-msm/board-qsd8x50.c
arch/arm/mach-msm/timer.c
arch/arm/mach-pxa/include/mach/gpio.h
arch/arm/mach-pxa/include/mach/irqs.h
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-tegra/gpio.c
arch/arm/mach-tegra/tegra2_clocks.c
arch/arm/mm/mmap.c
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xscale.S
arch/arm/plat-s5p/pm.c
arch/arm/plat-samsung/pm-check.c
arch/arm/plat-samsung/pm.c
arch/arm/vfp/vfpmodule.c
arch/avr32/include/asm/setup.h
arch/avr32/kernel/setup.c
arch/avr32/kernel/traps.c
arch/avr32/mach-at32ap/clock.c
arch/avr32/mach-at32ap/extint.c
arch/avr32/mach-at32ap/pio.c
arch/avr32/mach-at32ap/pm-at32ap700x.S
arch/blackfin/include/asm/system.h
arch/blackfin/kernel/gptimers.c
arch/blackfin/kernel/time-ts.c
arch/blackfin/mach-common/smp.c
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/entry_mm.S
arch/m68k/kernel/syscalltable.S
arch/microblaze/Kconfig
arch/powerpc/Kconfig
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/pte-common.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/ibmebus.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/time.c
arch/powerpc/platforms/powermac/smp.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/fsl_rio.c
arch/um/Kconfig.x86
arch/um/include/asm/bug.h [new file with mode: 0644]
arch/x86/include/asm/gart.h
arch/x86/include/asm/msr-index.h
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/smpboot.c
arch/x86/platform/ce4100/falconfalls.dts
arch/x86/platform/mrst/mrst.c
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-sysfs.c
block/blk.h
block/cfq-iosched.c
block/elevator.c
drivers/amba/bus.c
drivers/base/platform.c
drivers/base/power/main.c
drivers/connector/connector.c
drivers/dma/fsldma.c
drivers/gpio/ml_ioh_gpio.c
drivers/gpio/pca953x.c
drivers/gpio/pch_gpio.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/i2c-core.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/twl4030_keypad.c
drivers/input/misc/xen-kbdfront.c
drivers/input/touchscreen/h3600_ts_input.c
drivers/leds/leds-regulator.c
drivers/md/dm-raid.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/video/videobuf-dma-contig.c
drivers/mfd/mfd-core.c
drivers/misc/sgi-gru/grufile.c
drivers/mtd/mtdswap.c
drivers/mtd/nand/atmel_nand.c
drivers/net/benet/be.h
drivers/net/benet/be_main.c
drivers/net/bna/bfa_ioc.c
drivers/net/bna/bfa_ioc.h
drivers/net/bna/bfa_ioc_ct.c
drivers/net/bna/bfi.h
drivers/net/bna/bnad.c
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/can/mcp251x.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/loopback.c
drivers/net/mlx4/en_rx.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mlx4.h
drivers/net/mlx4/sense.c
drivers/net/natsemi.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_main.c
drivers/net/pppoe.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_main.c
drivers/net/sfc/efx.c
drivers/net/sfc/io.h
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/sfc/selftest.c
drivers/net/sfc/tx.c
drivers/net/sis900.c
drivers/net/smsc911x.c
drivers/net/stmmac/dwmac_lib.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tokenring/3c359.c
drivers/net/tokenring/lanstreamer.c
drivers/net/tokenring/olympic.c
drivers/net/usb/smsc95xx.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/regd_common.h
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/dma.h
drivers/net/wireless/iwlegacy/Kconfig
drivers/net/wireless/iwlegacy/iwl-3945-hw.h
drivers/net/wireless/iwlegacy/iwl-4965-hw.h
drivers/net/wireless/iwlegacy/iwl-core.c
drivers/net/wireless/iwlegacy/iwl-eeprom.c
drivers/net/wireless/iwlegacy/iwl3945-base.c
drivers/net/wireless/iwlegacy/iwl4965-base.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-eeprom.h
drivers/net/wireless/mwl8k.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rtlwifi/efuse.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/wl12xx/sdio.c
drivers/net/wireless/wl12xx/spi.c
drivers/net/wireless/wl12xx/testmode.c
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/wireless/zd1211rw/zd_usb.h
drivers/pci/Kconfig
drivers/pci/Makefile
drivers/pci/pci-driver.c
drivers/pci/setup-bus.c
drivers/pcmcia/pxa2xx_balloon3.c
drivers/pcmcia/pxa2xx_trizeps4.c
drivers/platform/x86/Kconfig
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/eeepc-wmi.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/rapidio/rio.c
drivers/rapidio/switches/idt_gen2.c
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-mc13xxx.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-s3c.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/spi/amba-pl022.c
drivers/spi/dw_spi.c
drivers/spi/pxa2xx_spi.c
drivers/spi/spi_bfin5xx.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/samsung-laptop/Kconfig [deleted file]
drivers/staging/samsung-laptop/Makefile [deleted file]
drivers/staging/samsung-laptop/TODO [deleted file]
drivers/staging/samsung-laptop/samsung-laptop.c [deleted file]
drivers/usb/Kconfig
drivers/usb/core/devices.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/gadget/f_audio.c
drivers/usb/gadget/f_eem.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/pch_udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/host/ehci-q.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/ohci-au1xxx.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/Kconfig
drivers/usb/musb/blackfin.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musbhsdma.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/ux500.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/video/pxafb.c
drivers/xen/events.c
drivers/xen/manage.c
fs/9p/fid.c
fs/9p/v9fs.h
fs/9p/vfs_dentry.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/binfmt_elf.c
fs/btrfs/acl.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/xattr.c
fs/cifs/README
fs/cifs/cache.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_spnego.c
fs/cifs/cifs_unicode.c
fs/cifs/cifs_unicode.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/sess.c
fs/dcache.c
fs/ext4/ext4_jbd2.h
fs/ext4/fsync.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fhandle.c
fs/filesystems.c
fs/gfs2/aops.c
fs/gfs2/dir.c
fs/gfs2/file.c
fs/gfs2/glops.c
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/ops_fstype.c
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/namei.c
fs/namespace.c
fs/nfs/write.c
fs/nfsd/lockd.c
fs/nfsd/nfs4state.c
fs/partitions/ldm.c
fs/proc/base.c
fs/ramfs/file-nommu.c
fs/ubifs/debug.h
fs/ubifs/file.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_message.c
fs/xfs/linux-2.6/xfs_message.h
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm.h
fs/xfs/quota/xfs_qm_syscalls.c
fs/xfs/xfs_alloc.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_priv.h
include/linux/blkdev.h
include/linux/can/platform/mcp251x.h
include/linux/device-mapper.h
include/linux/input.h
include/linux/input/mt.h
include/linux/memcontrol.h
include/linux/mfd/core.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/pid.h
include/linux/platform_device.h
include/linux/posix-clock.h
include/linux/rio.h
include/linux/rio_ids.h
include/linux/rtc.h
include/linux/sched.h
include/linux/suspend.h
include/linux/usb/usbnet.h
include/linux/vmstat.h
include/net/9p/9p.h
include/net/9p/client.h
include/net/ip_vs.h
include/net/mac80211.h
include/net/route.h
include/trace/events/block.h
kernel/futex.c
kernel/perf_event.c
kernel/pid.c
kernel/power/Kconfig
kernel/sched.c
kernel/sched_fair.c
kernel/time/posix-clock.c
kernel/trace/blktrace.c
lib/kstrtox.c
lib/test-kstrtox.c
mm/huge_memory.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmap.c
mm/oom_kill.c
mm/page_alloc.c
mm/shmem.c
mm/vmscan.c
mm/vmstat.c
net/9p/client.c
net/9p/protocol.c
net/9p/trans_common.c
net/9p/trans_virtio.c
net/bridge/br_netfilter.c
net/caif/cfdgml.c
net/caif/cfmuxl.c
net/ceph/osd_client.c
net/core/dev.c
net/dsa/mv88e6131.c
net/dsa/mv88e6xxx.h
net/ieee802154/Makefile
net/ipv4/inet_connection_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_options.c
net/ipv4/netfilter.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/xfrm4_policy.c
net/ipv6/inet6_connection_sock.c
net/ipv6/netfilter.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/af_irda.c
net/llc/llc_input.c
net/mac80211/rx.c
net/netfilter/Kconfig
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_h323_asn1.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_conntrack.c
net/netfilter/xt_set.c
net/sctp/associola.c
sound/arm/pxa2xx-pcm-lib.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/pxa/pxa2xx-pcm.c
sound/soc/pxa/zylonite.c
sound/soc/soc-core.c
sound/usb/midi.c
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/util/cgroup.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/python.c
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c

index 274b32d12532a2e0e3814e70940b39721dde864a..492e81df296835e04ff6195304b3a32931e466c6 100644 (file)
@@ -387,26 +387,6 @@ Who:       Tejun Heo <tj@kernel.org>
 
 ----------------------------
 
-What:  Support for lcd_switch and display_get in asus-laptop driver
-When:  March 2010
-Why:   These two features use non-standard interfaces. There are the
-       only features that really need multiple path to guess what's
-       the right method name on a specific laptop.
-
-       Removing them will allow to remove a lot of code an significantly
-       clean the drivers.
-
-       This will affect the backlight code which won't be able to know
-       if the backlight is on or off. The platform display file will also be
-       write only (like the one in eeepc-laptop).
-
-       This should'nt affect a lot of user because they usually know
-       when their display is on or off.
-
-Who:   Corentin Chary <corentin.chary@gmail.com>
-
-----------------------------
-
 What:  sysfs-class-rfkill state file
 When:  Feb 2014
 Files: net/rfkill/core.c
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
new file mode 100644 (file)
index 0000000..23fcb05
--- /dev/null
@@ -0,0 +1,262 @@
+The input protocol uses a map of types and codes to express input device values
+to userspace. This document describes the types and codes and how and when they
+may be used.
+
+A single hardware event generates multiple input events. Each input event
+contains the new value of a single data item. A special event type, EV_SYN, is
+used to separate input events into packets of input data changes occurring at
+the same moment in time. In the following, the term "event" refers to a single
+input event encompassing a type, code, and value.
+
+The input protocol is a stateful protocol. Events are emitted only when values
+of event codes have changed. However, the state is maintained within the Linux
+input subsystem; drivers do not need to maintain the state and may attempt to
+emit unchanged values without harm. Userspace may obtain the current state of
+event code values using the EVIOCG* ioctls defined in linux/input.h. The event
+reports supported by a device are also provided by sysfs in
+class/input/event*/device/capabilities/, and the properties of a device are
+provided in class/input/event*/device/properties.
+
+Types:
+==========
+Types are groupings of codes under a logical input construct. Each type has a
+set of applicable codes to be used in generating events. See the Codes section
+for details on valid codes for each type.
+
+* EV_SYN:
+  - Used as markers to separate events. Events may be separated in time or in
+    space, such as with the multitouch protocol.
+
+* EV_KEY:
+  - Used to describe state changes of keyboards, buttons, or other key-like
+    devices.
+
+* EV_REL:
+  - Used to describe relative axis value changes, e.g. moving the mouse 5 units
+    to the left.
+
+* EV_ABS:
+  - Used to describe absolute axis value changes, e.g. describing the
+    coordinates of a touch on a touchscreen.
+
+* EV_MSC:
+  - Used to describe miscellaneous input data that do not fit into other types.
+
+* EV_SW:
+  - Used to describe binary state input switches.
+
+* EV_LED:
+  - Used to turn LEDs on devices on and off.
+
+* EV_SND:
+  - Used to output sound to devices.
+
+* EV_REP:
+  - Used for autorepeating devices.
+
+* EV_FF:
+  - Used to send force feedback commands to an input device.
+
+* EV_PWR:
+  - A special type for power button and switch input.
+
+* EV_FF_STATUS:
+  - Used to receive force feedback device status.
+
+Codes:
+==========
+Codes define the precise type of event.
+
+EV_SYN:
+----------
+EV_SYN event values are undefined. Their usage is defined only by when they are
+sent in the evdev event stream.
+
+* SYN_REPORT:
+  - Used to synchronize and separate events into packets of input data changes
+    occurring at the same moment in time. For example, motion of a mouse may set
+    the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next
+    motion will emit more REL_X and REL_Y values and send another SYN_REPORT.
+
+* SYN_CONFIG:
+  - TBD
+
+* SYN_MT_REPORT:
+  - Used to synchronize and separate touch events. See the
+    multi-touch-protocol.txt document for more information.
+
+* SYN_DROPPED:
+  - Used to indicate buffer overrun in the evdev client's event queue.
+    Client should ignore all events up to and including next SYN_REPORT
+    event and query the device (using EVIOCG* ioctls) to obtain its
+    current state.
+
+EV_KEY:
+----------
+EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used
+to represent the 'A' key on a keyboard. When a key is depressed, an event with
+the key's code is emitted with value 1. When the key is released, an event is
+emitted with value 0. Some hardware send events when a key is repeated. These
+events have a value of 2. In general, KEY_<name> is used for keyboard keys, and
+BTN_<name> is used for other types of momentary switch events.
+
+A few EV_KEY codes have special meanings:
+
+* BTN_TOOL_<name>:
+  - These codes are used in conjunction with input trackpads, tablets, and
+    touchscreens. These devices may be used with fingers, pens, or other tools.
+    When an event occurs and a tool is used, the corresponding BTN_TOOL_<name>
+    code should be set to a value of 1. When the tool is no longer interacting
+    with the input device, the BTN_TOOL_<name> code should be reset to 0. All
+    trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name>
+    code when events are generated.
+
+* BTN_TOUCH:
+    BTN_TOUCH is used for touch contact. While an input tool is determined to be
+    within meaningful physical contact, the value of this property must be set
+    to 1. Meaningful physical contact may mean any contact, or it may mean
+    contact conditioned by an implementation defined property. For example, a
+    touchpad may set the value to 1 only when the touch pressure rises above a
+    certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For
+    example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the
+    pen is hovering over but not touching the tablet surface.
+
+Note: For appropriate function of the legacy mousedev emulation driver,
+BTN_TOUCH must be the first evdev code emitted in a synchronization frame.
+
+Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was
+interpreted as a touchpad by userspace, while a similar device without
+BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility
+with current userspace it is recommended to follow this distinction. In the
+future, this distinction will be deprecated and the device properties ioctl
+EVIOCGPROP, defined in linux/input.h, will be used to convey the device type.
+
+* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP:
+  - These codes denote one, two, three, and four finger interaction on a
+    trackpad or touchscreen. For example, if the user uses two fingers and moves
+    them on the touchpad in an effort to scroll content on screen,
+    BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion.
+    Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in
+    purpose. A trackpad event generated by finger touches should generate events
+    for one code from each group. At most only one of these BTN_TOOL_<name>
+    codes should have a value of 1 during any synchronization frame.
+
+Note: Historically some drivers emitted multiple of the finger count codes with
+a value of 1 in the same synchronization frame. This usage is deprecated.
+
+Note: In multitouch drivers, the input_mt_report_finger_count() function should
+be used to emit these codes. Please see multi-touch-protocol.txt for details.
+
+EV_REL:
+----------
+EV_REL events describe relative changes in a property. For example, a mouse may
+move to the left by a certain number of units, but its absolute position in
+space is unknown. If the absolute position is known, EV_ABS codes should be used
+instead of EV_REL codes.
+
+A few EV_REL codes have special meanings:
+
+* REL_WHEEL, REL_HWHEEL:
+  - These codes are used for vertical and horizontal scroll wheels,
+    respectively.
+
+EV_ABS:
+----------
+EV_ABS events describe absolute changes in a property. For example, a touchpad
+may emit coordinates for a touch location.
+
+A few EV_ABS codes have special meanings:
+
+* ABS_DISTANCE:
+  - Used to describe the distance of a tool from an interaction surface. This
+    event should only be emitted while the tool is hovering, meaning in close
+    proximity of the device and while the value of the BTN_TOUCH code is 0. If
+    the input device may be used freely in three dimensions, consider ABS_Z
+    instead.
+
+* ABS_MT_<name>:
+  - Used to describe multitouch input events. Please see
+    multi-touch-protocol.txt for details.
+
+EV_SW:
+----------
+EV_SW events describe stateful binary switches. For example, the SW_LID code is
+used to denote when a laptop lid is closed.
+
+Upon binding to a device or resuming from suspend, a driver must report
+the current switch state. This ensures that the device, kernel, and userspace
+state is in sync.
+
+Upon resume, if the switch state is the same as before suspend, then the input
+subsystem will filter out the duplicate switch state reports. The driver does
+not need to keep the state of the switch at any time.
+
+EV_MSC:
+----------
+EV_MSC events are used for input and output events that do not fall under other
+categories.
+
+EV_LED:
+----------
+EV_LED events are used for input and output to set and query the state of
+various LEDs on devices.
+
+EV_REP:
+----------
+EV_REP events are used for specifying autorepeating events.
+
+EV_SND:
+----------
+EV_SND events are used for sending sound commands to simple sound output
+devices.
+
+EV_FF:
+----------
+EV_FF events are used to initialize a force feedback capable device and to cause
+such device to feedback.
+
+EV_PWR:
+----------
+EV_PWR events are a special type of event used specifically for power
+mangement. Its usage is not well defined. To be addressed later.
+
+Guidelines:
+==========
+The guidelines below ensure proper single-touch and multi-finger functionality.
+For multi-touch functionality, see the multi-touch-protocol.txt document for
+more information.
+
+Mice:
+----------
+REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report
+the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report
+further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report
+scroll wheel events where available.
+
+Touchscreens:
+----------
+ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be
+used to report when a touch is active on the screen.
+BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
+contact. BTN_TOOL_<name> events should be reported where possible.
+
+Trackpads:
+----------
+Legacy trackpads that only provide relative position information must report
+events like mice described above.
+
+Trackpads that provide absolute touch position must report ABS_{X,Y} for the
+location of the touch. BTN_TOUCH should be used to report when a touch is active
+on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
+be used to report the number of touches active on the trackpad.
+
+Tablets:
+----------
+BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
+the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH
+should be used to report when the tool is in contact with the tablet.
+BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any
+button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
+BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
+meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
+purpose on the device.
index 6b4b9cdec370fef4f83ed5449329dc8339347f71..1e2724e55cf014319b16bcbde917dabefbed23f7 100644 (file)
@@ -151,6 +151,7 @@ S:  Maintained
 F:     drivers/net/hamradio/6pack.c
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
+M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
 M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -184,10 +185,9 @@ F: Documentation/filesystems/9p.txt
 F:     fs/9p/
 
 A2232 SERIAL BOARD DRIVER
-M:     Enver Haase <A2232@gmx.net>
 L:     linux-m68k@lists.linux-m68k.org
-S:     Maintained
-F:     drivers/char/ser_a2232*
+S:     Orphan
+F:     drivers/staging/generic_serial/ser_a2232*
 
 AACRAID SCSI RAID DRIVER
 M:     Adaptec OEM Raid Solutions <aacraid@adaptec.com>
@@ -877,6 +877,13 @@ F: arch/arm/mach-mv78xx0/
 F:     arch/arm/mach-orion5x/
 F:     arch/arm/plat-orion/
 
+ARM/Orion SoC/Technologic Systems TS-78xx platform support
+M:     Alexander Clouter <alex@digriz.org.uk>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W:     http://www.digriz.org.uk/ts78xx/kernel
+S:     Maintained
+F:     arch/arm/mach-orion5x/ts78xx-*
+
 ARM/MIOA701 MACHINE SUPPORT
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1063,7 +1070,7 @@ F:        arch/arm/mach-shmobile/
 F:     drivers/sh/
 
 ARM/TELECHIPS ARM ARCHITECTURE
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/plat-tcc/
@@ -1823,11 +1830,10 @@ S:      Maintained
 F:     drivers/platform/x86/compal-laptop.c
 
 COMPUTONE INTELLIPORT MULTIPORT CARD
-M:     "Michael H. Warfield" <mhw@wittsend.com>
 W:     http://www.wittsend.com/computone.html
-S:     Maintained
+S:     Orphan
 F:     Documentation/serial/computone.txt
-F:     drivers/char/ip2/
+F:     drivers/staging/tty/ip2/
 
 CONEXANT ACCESSRUNNER USB DRIVER
 M:     Simon Arlott <cxacru@fire.lp0.eu>
@@ -2010,7 +2016,7 @@ F:        drivers/net/wan/cycx*
 CYCLADES ASYNC MUX DRIVER
 W:     http://www.cyclades.com/
 S:     Orphan
-F:     drivers/char/cyclades.c
+F:     drivers/tty/cyclades.c
 F:     include/linux/cyclades.h
 
 CYCLADES PC300 DRIVER
@@ -2124,8 +2130,8 @@ L:        Eng.Linux@digi.com
 W:     http://www.digi.com
 S:     Orphan
 F:     Documentation/serial/digiepca.txt
-F:     drivers/char/epca*
-F:     drivers/char/digi*
+F:     drivers/staging/tty/epca*
+F:     drivers/staging/tty/digi*
 
 DIOLAN U2C-12 I2C DRIVER
 M:     Guenter Roeck <guenter.roeck@ericsson.com>
@@ -4077,7 +4083,7 @@ F:        drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/max6650
@@ -4192,7 +4198,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 M:     Jiri Slaby <jirislaby@gmail.com>
 S:     Maintained
 F:     Documentation/serial/moxa-smartio
-F:     drivers/char/mxser.*
+F:     drivers/tty/mxser.*
 
 MSI LAPTOP SUPPORT
 M:     "Lee, Chun-Yi" <jlee@novell.com>
@@ -4234,7 +4240,7 @@ F:        sound/oss/msnd*
 
 MULTITECH MULTIPORT CARD (ISICOM)
 S:     Orphan
-F:     drivers/char/isicom.c
+F:     drivers/tty/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
@@ -5273,14 +5279,14 @@ F:      drivers/memstick/host/r592.*
 RISCOM8 DRIVER
 S:     Orphan
 F:     Documentation/serial/riscom8.txt
-F:     drivers/char/riscom8*
+F:     drivers/staging/tty/riscom8*
 
 ROCKETPORT DRIVER
 P:     Comtrol Corp.
 W:     http://www.comtrol.com
 S:     Maintained
 F:     Documentation/serial/rocket.txt
-F:     drivers/char/rocket*
+F:     drivers/tty/rocket*
 
 ROSE NETWORK LAYER
 M:     Ralf Baechle <ralf@linux-mips.org>
@@ -5916,10 +5922,9 @@ F:       arch/arm/mach-spear6xx/spear600.c
 F:     arch/arm/mach-spear6xx/spear600_evb.c
 
 SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
-M:     Roger Wolff <R.E.Wolff@BitWizard.nl>
-S:     Supported
+S:     Orphan
 F:     Documentation/serial/specialix.txt
-F:     drivers/char/specialix*
+F:     drivers/staging/tty/specialix*
 
 SPI SUBSYSTEM
 M:     David Brownell <dbrownell@users.sourceforge.net>
@@ -5964,7 +5969,6 @@ F:        arch/alpha/kernel/srm_env.c
 
 STABLE BRANCH
 M:     Greg Kroah-Hartman <greg@kroah.com>
-M:     Chris Wright <chrisw@sous-sol.org>
 L:     stable@kernel.org
 S:     Maintained
 
@@ -6248,7 +6252,8 @@ M:        Greg Ungerer <gerg@uclinux.org>
 W:     http://www.uclinux.org/
 L:     uclinux-dev@uclinux.org  (subscribers-only)
 S:     Maintained
-F:     arch/m68knommu/
+F:     arch/m68k/*/*_no.*
+F:     arch/m68k/include/asm/*_no.*
 
 UCLINUX FOR RENESAS H8/300 (H8300)
 M:     Yoshinori Sato <ysato@users.sourceforge.jp>
@@ -6618,7 +6623,7 @@ F:        fs/hostfs/
 F:     fs/hppfs/
 
 USERSPACE I/O (UIO)
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 M:     Greg Kroah-Hartman <gregkh@suse.de>
 S:     Maintained
 F:     Documentation/DocBook/uio-howto.tmpl
@@ -6916,6 +6921,13 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
 S:     Maintained
 F:     drivers/platform/x86
 
+XEN NETWORK BACKEND DRIVER
+M:     Ian Campbell <ian.campbell@citrix.com>
+L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/xen-netback/*
+
 XEN PCI SUBSYSTEM
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
index 8392b64079df3c0ef838913e656cfe0ff1a1ae69..b967b967572bb1c3bca2ab1b046add090b0f9248 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 39
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index 9bb7b858ed239ef4e590732a75fe250bbf934eaf..7a6d908bb865c2e83a5cc51dcd975830f8cdfc4a 100644 (file)
@@ -4,7 +4,7 @@
 
 extra-y                := head.o vmlinux.lds
 asflags-y      := $(KBUILD_CFLAGS)
-ccflags-y      := -Werror -Wno-sign-compare
+ccflags-y      := -Wno-sign-compare
 
 obj-y    := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
            irq_alpha.o signal.o setup.o ptrace.o time.o \
index 381fec0af52e35a6262586059ccdacbcfd863148..da7bcc372f16c50f6cc3f048b6a6956df748aec4 100644 (file)
@@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1,
 {
        unsigned long flags;
        unsigned long mid = MCPCIA_HOSE2MID(hose->index);
-       unsigned int stat0, value, temp, cpu;
+       unsigned int stat0, value, cpu;
 
        cpu = smp_processor_id();
 
@@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1,
        stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
        *(vuip)MCPCIA_CAP_ERR(mid) = stat0;
        mb();
-       temp = *(vuip)MCPCIA_CAP_ERR(mid);
+       *(vuip)MCPCIA_CAP_ERR(mid);
        DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
 
        mb();
@@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
 {
        unsigned long flags;
        unsigned long mid = MCPCIA_HOSE2MID(hose->index);
-       unsigned int stat0, temp, cpu;
+       unsigned int stat0, cpu;
 
        cpu = smp_processor_id();
 
@@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
        /* Reset status register to avoid losing errors.  */
        stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
        *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
-       temp = *(vuip)MCPCIA_CAP_ERR(mid);
+       *(vuip)MCPCIA_CAP_ERR(mid);
        DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
 
        draina();
@@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
        *((vuip)addr) = value;
        mb();
        mb();  /* magic */
-       temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
+       *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
        mcheck_expected(cpu) = 0;
        mb();
 
@@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr)
 void
 mcpcia_machine_check(unsigned long vector, unsigned long la_ptr)
 {
-       struct el_common *mchk_header;
        struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
        unsigned int cpu = smp_processor_id();
        int expected;
 
-       mchk_header = (struct el_common *)la_ptr;
        mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
        expected = mcheck_expected(cpu);
 
index c3b3781a03de01045ebd270319173ae2b5d347ee..14b26c466c8996cc1e4f7e03ff4391bcbb2bc3c8 100644 (file)
@@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = {
 static struct el_subpacket *
 el_process_regatta_subpacket(struct el_subpacket *header)
 {
-       int status;
-
        if (header->class != EL_CLASS__REGATTA_FAMILY) {
                printk("%s  ** Unexpected header CLASS %d TYPE %d, aborting\n",
                       err_print_prefix,
@@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header)
                printk("%s  ** Occurred on CPU %d:\n", 
                       err_print_prefix,
                       (int)header->by_type.regatta_frame.cpuid);
-               status = privateer_process_logout_frame((struct el_common *)
+               privateer_process_logout_frame((struct el_common *)
                        header->by_type.regatta_frame.data_start, 1);
                break;
        default:
index 1479dc6ebd97a4288ee6d0dd0334ea17dbf02049..51b7fbd9e4c11406ce8950f6cecc64f1ebd7556a 100644 (file)
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = {
 void __init
 init_rtc_irq(void)
 {
-       irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+       irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
                                      handle_simple_irq, "RTC");
        setup_irq(RTC_IRQ, &timer_irqaction);
 }
index d2634e4476b4589f03831f575ec00751b0434ca9..edbddcbd5bc654692fbfc98572ed8277303d49db 100644 (file)
@@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type)
        case PCA56_CPU:
        case PCA57_CPU:
          {
-               unsigned long cbox_config, size;
-
                if (cpu_type == PCA56_CPU) {
                        L1I = CSHAPE(16*1024, 6, 1);
                        L1D = CSHAPE(8*1024, 5, 1);
@@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type)
                }
                L3 = -1;
 
+#if 0
+               unsigned long cbox_config, size;
+
                cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
                size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
 
-#if 0
                L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
 #else
                L2 = external_cache_probe(512*1024, 6);
index 3e6a2893af9f6e87122ca0e872ec95282cdf3fde..6886b834f4871f54c6eb6fd7a232a20ca12d0d0b 100644 (file)
@@ -79,7 +79,6 @@
 static unsigned long __init SMCConfigState(unsigned long baseAddr)
 {
        unsigned char devId;
-       unsigned char devRev;
 
        unsigned long configPort;
        unsigned long indexPort;
@@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr)
                devId = inb(dataPort);
                if (devId == VALID_DEVICE_ID) {
                        outb(DEVICE_REV, indexPort);
-                       devRev = inb(dataPort);
+                       /* unsigned char devRev = */ inb(dataPort);
                        break;
                }
                else
index d3cb28bb8eb0e450fcd4298698b944264d7b1fc0..d92cdc715c6530150563259e6fbc4c859f3e1d9f 100644 (file)
@@ -156,7 +156,6 @@ static void __init
 wildfire_init_irq_per_pca(int qbbno, int pcano)
 {
        int i, irq_bias;
-       unsigned long io_bias;
        static struct irqaction isa_enable = {
                .handler        = no_action,
                .name           = "isa_enable",
@@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
        irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
                 + pcano * WILDFIRE_IRQ_PER_PCA;
 
+#if 0
+       unsigned long io_bias;
+
        /* Only need the following for first PCI bus per PCA. */
        io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
 
-#if 0
        outb(0, DMA1_RESET_REG + io_bias);
        outb(0, DMA2_RESET_REG + io_bias);
        outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
index a58e84f1a63b76cc3e96856d10736a1c03ba6872..918e8e0b72ff1e3220845ea410277a83ff326517 100644 (file)
@@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts)
                year += 100;
 
        ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+       ts->tv_nsec = 0;
 }
 
 
index fdc9d4dbf85b528fb70614f5ff1a199504a3df75..377a7a595b08041fdacb1dfd43d8be206578bda8 100644 (file)
@@ -1540,7 +1540,6 @@ config HIGHMEM
 config HIGHPTE
        bool "Allocate 2nd-level pagetables from highmem"
        depends on HIGHMEM
-       depends on !OUTER_CACHE
 
 config HW_PERF_EVENTS
        bool "Enable hardware performance counter support for perf events"
@@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig"
 
 config ARCH_SUSPEND_POSSIBLE
        depends on !ARCH_S5P64X0 && !ARCH_S5P6442
+       depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
+               CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
        def_bool y
 
 endmenu
index 494224a9b459060f94f39aafeeae8389111d50a0..03d01d783e3bf95c72f28259aad1550b97c5cc68 100644 (file)
@@ -63,17 +63,6 @@ config DEBUG_USER
              8 - SIGSEGV faults
             16 - SIGBUS faults
 
-config DEBUG_ERRORS
-       bool "Verbose kernel error messages"
-       depends on DEBUG_KERNEL
-       help
-         This option controls verbose debugging information which can be
-         printed when the kernel detects an internal error. This debugging
-         information is useful to kernel hackers when tracking down problems,
-         but mostly meaningless to other people. It's safe to say Y unless
-         you are concerned with the code size or don't want to see these
-         messages.
-
 config DEBUG_STACK_USAGE
        bool "Enable stack utilization instrumentation"
        depends on DEBUG_KERNEL
index e7521bca2c3564eaf3d21663fbf4ae3721e43461..6ea9b6f3607af35121e2e117e1580b1d841b520d 100644 (file)
@@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP)     += scoop.o
 obj-$(CONFIG_ARCH_IXP2000)     += uengine.o
 obj-$(CONFIG_ARCH_IXP23XX)     += uengine.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
-obj-$(CONFIG_COMMON_CLKDEV)    += clkdev.o
 obj-$(CONFIG_ARM_TIMER_SP804)  += timer-sp.o
index c4391ba203507a2b1c34a9634ad6fb9c4ff7eb64..1dc98067589412a8ecc42e72e5b21a5f161b4f2a 100644 (file)
@@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
 #define THREAD_NOTIFY_FLUSH    0
 #define THREAD_NOTIFY_EXIT     1
 #define THREAD_NOTIFY_SWITCH   2
+#define THREAD_NOTIFY_COPY     3
 
 #endif
 #endif
index 74554f1742d72efd2c01a639e5fd0e3460221f94..8d95446150a3e7ed0ad8fd5782f7f918529bc3d7 100644 (file)
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES)         += armksyms.o module.o
 obj-$(CONFIG_ARTHUR)           += arthur.o
 obj-$(CONFIG_ISA_DMA)          += dma-isa.o
 obj-$(CONFIG_PCI)              += bios32.o isa.o
-obj-$(CONFIG_PM)               += sleep.o
+obj-$(CONFIG_PM_SLEEP)         += sleep.o
 obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
 obj-$(CONFIG_SMP)              += smp.o smp_tlb.o
 obj-$(CONFIG_HAVE_ARM_SCU)     += smp_scu.o
index d4a0da1e48f40988bb7f92ce34664d86adb74a32..9b05c6a0dceac54615e1dfa472bf28ca946616ac 100644 (file)
@@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch);
 void elf_set_personality(const struct elf32_hdr *x)
 {
        unsigned int eflags = x->e_flags;
-       unsigned int personality = PER_LINUX_32BIT;
+       unsigned int personality = current->personality & ~PER_MASK;
+
+       /*
+        * We only support Linux ELF executables, so always set the
+        * personality to LINUX.
+        */
+       personality |= PER_LINUX;
 
        /*
         * APCS-26 is only valid for OABI executables
         */
-       if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
-               if (eflags & EF_ARM_APCS_26)
-                       personality = PER_LINUX;
-       }
+       if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
+           (eflags & EF_ARM_APCS_26))
+               personality &= ~ADDR_LIMIT_32BIT;
+       else
+               personality |= ADDR_LIMIT_32BIT;
 
        set_personality(personality);
 
index 8dbc126f7152d992472898a639801239d49a40ac..87acc25d7a3e203646f2ee71f1d7c711304d52f2 100644 (file)
@@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info)
                 */
                asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
                isb();
+
+               /*
+                * Clear any configured vector-catch events before
+                * enabling monitor mode.
+                */
+               asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
+               isb();
        }
 
        if (enable_monitor_mode())
index 69cfee0fe00f1cbc38eb7f789ef23aae51688dd6..979da3947f4272c933739064d576f991c412eeac 100644 (file)
@@ -221,7 +221,7 @@ again:
        prev_raw_count &= armpmu->max_period;
 
        if (overflow)
-               delta = armpmu->max_period - prev_raw_count + new_raw_count;
+               delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
        else
                delta = new_raw_count - prev_raw_count;
 
index 94bbedbed6394cf147e2b73bce3b6629d0dbbd9f..5e1e541972277f38a123d7964fca081629584d50 100644 (file)
@@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
        if (clone_flags & CLONE_SETTLS)
                thread->tp_value = regs->ARM_r3;
 
+       thread_notify(THREAD_NOTIFY_COPY, thread);
+
        return 0;
 }
 
index f0000e188c8c8c5a14be6f21c37b961739ecfad6..3b54ad19d4890c40752905af84a345b21ba56b62 100644 (file)
@@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
        struct thread_info *thread = current_thread_info();
        siginfo_t info;
 
-       if (current->personality != PER_LINUX &&
-           current->personality != PER_LINUX_32BIT &&
+       if ((current->personality & PER_MASK) != PER_LINUX &&
            thread->exec_domain->handler) {
                thread->exec_domain->handler(n, regs);
                return regs->ARM_r0;
index ee8b02ed8011dfa615603045a22c5a98956635e4..7bfb827f3fe36a8d71fc874b1a45e446be1d3bac 100644 (file)
@@ -10,7 +10,7 @@
 #define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
 #define GPIO_REG(x)    (*((volatile u32 *)(GPIO_REGS_VIRT + (x))))
 
-#define NR_BUILTIN_GPIO        (192)
+#define NR_BUILTIN_GPIO                IRQ_GPIO_NUM
 
 #define gpio_to_bank(gpio)     ((gpio) >> 5)
 #define gpio_to_irq(gpio)      (IRQ_GPIO_START + (gpio))
index 4621067c7720f457b311faff80e19dddc87ecd3e..713be155a44d716df91aeb74c2d043630f543564 100644 (file)
@@ -8,6 +8,15 @@
 #define MFP_DRIVE_MEDIUM       (0x2 << 13)
 #define MFP_DRIVE_FAST         (0x3 << 13)
 
+#undef MFP_CFG
+#undef MFP_CFG_DRV
+
+#define MFP_CFG(pin, af)               \
+       (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM)
+
+#define MFP_CFG_DRV(pin, af, drv)      \
+       (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv)
+
 /* GPIO */
 #define GPIO0_GPIO             MFP_CFG(GPIO0, AF5)
 #define GPIO1_GPIO             MFP_CFG(GPIO1, AF5)
index 7f568611547e94d5343db7d174712dc732ea0f2c..6a96911b0ad508cdde70e79359a8e3480ce10c16 100644 (file)
@@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
 
 static void __init qsd8x50_init_mmc(void)
 {
-       if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa())
-               vreg_mmc = vreg_get(NULL, "gp6");
-       else
-               vreg_mmc = vreg_get(NULL, "gp5");
+       vreg_mmc = vreg_get(NULL, "gp5");
 
        if (IS_ERR(vreg_mmc)) {
                pr_err("vreg get for vreg_mmc failed (%ld)\n",
index 56f920c55b6aaaaa45737aeeddffadfa1b1a12d2..38b95e949d13b0a87d83a3274b0369e98c8321ed 100644 (file)
@@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
        /* Use existing clock_event for cpu 0 */
        if (!smp_processor_id())
-               return;
+               return 0;
 
        writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
 
index b024a8b374394ac2731596ba39bba462309f04a6..c4639502efcac4d236f5752172bfe2f29f7af27f 100644 (file)
 #define GAFR(x)                GPIO_REG(0x54 + (((x) & 0x70) >> 2))
 
 
-#define NR_BUILTIN_GPIO 128
+#define NR_BUILTIN_GPIO                PXA_GPIO_IRQ_NUM
 
 #define gpio_to_bank(gpio)     ((gpio) >> 5)
 #define gpio_to_irq(gpio)      IRQ_GPIO(gpio)
-#define irq_to_gpio(irq)       IRQ_TO_GPIO(irq)
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+       int gpio;
+
+       if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1)
+               return irq - IRQ_GPIO0;
+
+       gpio = irq - PXA_GPIO_IRQ_BASE;
+       if (gpio >= 2 && gpio < NR_BUILTIN_GPIO)
+               return gpio;
+
+       return -1;
+}
 
 #ifdef CONFIG_CPU_PXA26x
 /* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted,
index a4285fc0087841ad0c48902a2ae5d664f1205745..038402404e3932e78c702181c6d62cd4e4d145a7 100644 (file)
@@ -93,9 +93,6 @@
 #define GPIO_2_x_TO_IRQ(x)     (PXA_GPIO_IRQ_BASE + (x))
 #define IRQ_GPIO(x)    (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
 
-#define IRQ_TO_GPIO_2_x(i)     ((i) - PXA_GPIO_IRQ_BASE)
-#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i))
-
 /*
  * The following interrupts are for board specific purposes. Since
  * the kernel can only run on one machine at a time, we can re-use
index 6bde5956358d88ceef7c23641bb92c77e3a44154..a4af8c52d7ee297b49246be5246aa0e055e1c970 100644 (file)
@@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {}
 
 static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
 {
-       int gpio = IRQ_TO_GPIO(d->irq);
+       int gpio = irq_to_gpio(d->irq);
        uint32_t mask = 0;
 
        if (gpio >= 0 && gpio < 85)
index 1cb5d0f9723fb28ea9c5c1dabe441646c3dfcf25..909756eaf4b72a3b32a36f444517d1748604767a 100644 (file)
@@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {}
  */
 static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
 {
-       int gpio = IRQ_TO_GPIO(d->irq);
+       int gpio = irq_to_gpio(d->irq);
        uint32_t mask;
 
        if (gpio >= 0 && gpio < 128)
index 76a3f654220f2f012b842b9e2794811fba2a4710..65a1aba6823db6be97b9b87115be7caab4e8f2f9 100644 (file)
@@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 void tegra_gpio_resume(void)
 {
        unsigned long flags;
-       int b, p, i;
+       int b;
+       int p;
 
        local_irq_save(flags);
 
@@ -280,7 +281,8 @@ void tegra_gpio_resume(void)
 void tegra_gpio_suspend(void)
 {
        unsigned long flags;
-       int b, p, i;
+       int b;
+       int p;
 
        local_irq_save(flags);
        for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
index 6d7c4eea4dcbcd95a567635667d2ccf557878808..4459470c052de3f97ea29ec138c0a2006787328b 100644 (file)
@@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
 {
        unsigned long flags;
        int ret;
+       long new_rate = rate;
 
-       rate = clk_round_rate(c->parent, rate);
-       if (rate < 0)
-               return rate;
+       new_rate = clk_round_rate(c->parent, new_rate);
+       if (new_rate < 0)
+               return new_rate;
 
        spin_lock_irqsave(&c->parent->spinlock, flags);
 
-       c->u.shared_bus_user.rate = rate;
+       c->u.shared_bus_user.rate = new_rate;
        ret = tegra_clk_shared_bus_update(c->parent);
 
        spin_unlock_irqrestore(&c->parent->spinlock, flags);
index afe209e1e1f85fb299e9822ea07a49b13cc29ecf..74be05f3e03ac58be921aff208c6f6aa460ab683 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/shm.h>
 #include <linux/sched.h>
 #include <linux/io.h>
+#include <linux/personality.h>
 #include <linux/random.h>
 #include <asm/cputype.h>
 #include <asm/system.h>
@@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                mm->cached_hole_size = 0;
        }
        /* 8 bits of randomness in 20 address space bits */
-       if (current->flags & PF_RANDOMIZE)
+       if ((current->flags & PF_RANDOMIZE) &&
+           !(current->personality & ADDR_NO_RANDOMIZE))
                addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
 
 full_search:
index b46eb21f05c7edf8d53c474a54791ea8ea8a922b..bf8a1d1cccb6c33b26412264a2f05228657e49aa 100644 (file)
@@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm920_suspend_size
 .equ   cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_arm920_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index 6a4bdb2c94a7ba0f856bf06c00f95049ab638dc1..0ed85d930c095e3280f7f029973e1530a7fcd4aa 100644 (file)
@@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm926_suspend_size
 .equ   cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_arm926_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index 74483d1977fe788ac2b4b8d42e32952da9e7ed0e..184a9c997e36616dcc6819418d380f9d9e439291 100644 (file)
@@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
 
 .globl cpu_sa1100_suspend_size
 .equ   cpu_sa1100_suspend_size, 4*4
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_sa1100_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
        mrc     p15, 0, r4, c3, c0, 0           @ domain ID
index bfa0c9f611c537c85db433da446f791e2f8ba851..7c99cb4c8e4fb8663158caf9832d6df6e9e27ef4 100644 (file)
@@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
 .globl cpu_v6_suspend_size
 .equ   cpu_v6_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_v6_do_suspend)
        stmfd   sp!, {r4 - r11, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
index c35618e42f6fcbd7808f36474518aed226dba7e1..babfba09c89ff390deb7ddfd0496e813f442e047 100644 (file)
@@ -211,7 +211,7 @@ cpu_v7_name:
 /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
 .globl cpu_v7_suspend_size
 .equ   cpu_v7_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_v7_do_suspend)
        stmfd   sp!, {r4 - r11, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
index 63d8b2044e84114c4401c7851e14b659a39821f2..596213699f37ac8a52d580ae70df8303bc344c94 100644 (file)
@@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
 
 .globl cpu_xsc3_suspend_size
 .equ   cpu_xsc3_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_xsc3_do_suspend)
        stmfd   sp!, {r4 - r10, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index 086038cd86abc1e32ffb0c6552d8fdc85c5a3bd2..ce233bcbf5060c14bfe89c0093020a9c17005904 100644 (file)
@@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext)
 
 .globl cpu_xscale_suspend_size
 .equ   cpu_xscale_suspend_size, 4 * 7
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_xscale_do_suspend)
        stmfd   sp!, {r4 - r10, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index d592b6304b48cf5825b22fe0298d8a2db21b4d52..d15dc47b0e3db50c2c87535aca5f5ff153e4c6b0 100644 (file)
 
 #define PFX "s5p pm: "
 
-/* s3c_pm_check_resume_pin
- *
- * check to see if the pin is configured correctly for sleep mode, and
- * make any necessary adjustments if it is not
-*/
-
-static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
-{
-       /* nothing here yet */
-}
-
 /* s3c_pm_configure_extint
  *
  * configure all external interrupt pins
index e4baf76f374ada34260e37dc88916207ad75abf2..6b733fafe7cda41815d88115fa866d1a6976a71b 100644 (file)
@@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
  */
 static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
 {
-       void *save_at = phys_to_virt(s3c_sleep_save_phys);
        unsigned long addr;
        unsigned long left;
        void *stkpage;
@@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
                        goto skip_check;
                }
 
-               if (in_region(ptr, left, save_at, 32*4 )) {
-                       S3C_PMDBG("skipping %08lx, has save block in\n", addr);
-                       goto skip_check;
-               }
-
                /* calculate and check the checksum */
 
                calc = crc32_le(~0, ptr, left);
index d5b58d31903c740d2a0328a67e3d536adcc832d8..5c0a440d6e16741f34f547b7aa6583a16997b5c4 100644 (file)
@@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count)
  *
  * print any IRQs asserted at resume time (ie, we woke from)
 */
-static void s3c_pm_show_resume_irqs(int start, unsigned long which,
-                                   unsigned long mask)
+static void __maybe_unused s3c_pm_show_resume_irqs(int start,
+                                                  unsigned long which,
+                                                  unsigned long mask)
 {
        int i;
 
index bbf3da012afdf6ab1233f7b6e0bf9eaa7c96194a..f74695075e641ec72562e1c8194a8220327e8bc8 100644 (file)
@@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread)
        put_cpu();
 }
 
+static void vfp_thread_copy(struct thread_info *thread)
+{
+       struct thread_info *parent = current_thread_info();
+
+       vfp_sync_hwstate(parent);
+       thread->vfpstate = parent->vfpstate;
+}
+
 /*
  * When this function is called with the following 'cmd's, the following
  * is true while this function is being run:
@@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread)
 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
 {
        struct thread_info *thread = v;
+       u32 fpexc;
+#ifdef CONFIG_SMP
+       unsigned int cpu;
+#endif
 
-       if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
-               u32 fpexc = fmrx(FPEXC);
+       switch (cmd) {
+       case THREAD_NOTIFY_SWITCH:
+               fpexc = fmrx(FPEXC);
 
 #ifdef CONFIG_SMP
-               unsigned int cpu = thread->cpu;
+               cpu = thread->cpu;
 
                /*
                 * On SMP, if VFP is enabled, save the old state in
@@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
                 * old state.
                 */
                fmxr(FPEXC, fpexc & ~FPEXC_EN);
-               return NOTIFY_DONE;
-       }
+               break;
 
-       if (cmd == THREAD_NOTIFY_FLUSH)
+       case THREAD_NOTIFY_FLUSH:
                vfp_thread_flush(thread);
-       else
+               break;
+
+       case THREAD_NOTIFY_EXIT:
                vfp_thread_exit(thread);
+               break;
+
+       case THREAD_NOTIFY_COPY:
+               vfp_thread_copy(thread);
+               break;
+       }
 
        return NOTIFY_DONE;
 }
index ff5b7cf6be4dd8065bffb566065de245a3f4020e..160543dbec7eb1c9725c6edf3ba9259e5fd9bd9d 100644 (file)
@@ -94,6 +94,13 @@ struct tag_ethernet {
 
 #define ETH_INVALID_PHY        0xff
 
+/* board information */
+#define ATAG_BOARDINFO 0x54410008
+
+struct tag_boardinfo {
+       u32     board_number;
+};
+
 struct tag {
        struct tag_header hdr;
        union {
@@ -102,6 +109,7 @@ struct tag {
                struct tag_cmdline cmdline;
                struct tag_clock clock;
                struct tag_ethernet ethernet;
+               struct tag_boardinfo boardinfo;
        } u;
 };
 
@@ -128,6 +136,7 @@ extern struct tag *bootloader_tags;
 
 extern resource_size_t fbmem_start;
 extern resource_size_t fbmem_size;
+extern u32 board_number;
 
 void setup_processor(void);
 
index 5c7083916c33c14792728da5a08295476daebc17..bb0974cce4accf3c0a0d01cdfce08d7533d86ab7 100644 (file)
@@ -390,6 +390,21 @@ static int __init parse_tag_clock(struct tag *tag)
 }
 __tagtable(ATAG_CLOCK, parse_tag_clock);
 
+/*
+ * The board_number correspond to the bd->bi_board_number in U-Boot. This
+ * parameter is only available during initialisation and can be used in some
+ * kind of board identification.
+ */
+u32 __initdata board_number;
+
+static int __init parse_tag_boardinfo(struct tag *tag)
+{
+       board_number = tag->u.boardinfo.board_number;
+
+       return 0;
+}
+__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
+
 /*
  * Scan the tag table for this tag, and call its parse function. The
  * tag table is built by the linker from all the __tagtable
index b91b2044af9c9660210bb9efd5f11ab60a08dcbc..7aa25756412f16299b69cccfcd1d1c9851aaaec7 100644 (file)
@@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code,
        info.si_code = code;
        info.si_addr = (void __user *)addr;
        force_sig_info(signr, &info, current);
-
-       /*
-        * Init gets no signals that it doesn't have a handler for.
-        * That's all very well, but if it has caused a synchronous
-        * exception and we ignore the resulting signal, it will just
-        * generate the same exception over and over again and we get
-        * nowhere.  Better to kill it and let the kernel panic.
-        */
-       if (is_global_init(current)) {
-               __sighandler_t handler;
-
-               spin_lock_irq(&current->sighand->siglock);
-               handler = current->sighand->action[signr-1].sa.sa_handler;
-               spin_unlock_irq(&current->sighand->siglock);
-               if (handler == SIG_DFL) {
-                       /* init has generated a synchronous exception
-                          and it doesn't have a handler for the signal */
-                       printk(KERN_CRIT "init has generated signal %ld "
-                              "but has no handler for it\n", signr);
-                       do_exit(signr);
-               }
-       }
 }
 
 asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
index 442f08c5e6415f198df5c7843b69d144fa336ed6..86925fd6ea5bb3daf34eb039858ff69f2bb02ec6 100644 (file)
@@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk)
        spin_unlock(&clk_list_lock);
 }
 
-struct clk *clk_get(struct device *dev, const char *id)
+static struct clk *__clk_get(struct device *dev, const char *id)
 {
        struct clk *clk;
 
-       spin_lock(&clk_list_lock);
-
        list_for_each_entry(clk, &at32_clock_list, list) {
                if (clk->dev == dev && strcmp(id, clk->name) == 0) {
-                       spin_unlock(&clk_list_lock);
                        return clk;
                }
        }
 
-       spin_unlock(&clk_list_lock);
        return ERR_PTR(-ENOENT);
 }
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+       struct clk *clk;
+
+       spin_lock(&clk_list_lock);
+       clk = __clk_get(dev, id);
+       spin_unlock(&clk_list_lock);
+
+       return clk;
+}
+
 EXPORT_SYMBOL(clk_get);
 
 void clk_put(struct clk *clk)
@@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused)
        spin_lock(&clk_list_lock);
 
        /* show clock tree as derived from the three oscillators */
-       clk = clk_get(NULL, "osc32k");
+       clk = __clk_get(NULL, "osc32k");
        dump_clock(clk, &r);
        clk_put(clk);
 
-       clk = clk_get(NULL, "osc0");
+       clk = __clk_get(NULL, "osc0");
        dump_clock(clk, &r);
        clk_put(clk);
 
-       clk = clk_get(NULL, "osc1");
+       clk = __clk_get(NULL, "osc1");
        dump_clock(clk, &r);
        clk_put(clk);
 
index 47ba4b9b6db16b5852a0f857236e3d73e5349e75..fbc2aeaebddbde6023bb4853814f2e598436a1bd 100644 (file)
@@ -61,34 +61,34 @@ struct eic {
 static struct eic *nmi_eic;
 static bool nmi_enabled;
 
-static void eic_ack_irq(struct irq_chip *d)
+static void eic_ack_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_mask_irq(struct irq_chip *d)
+static void eic_mask_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_mask_ack_irq(struct irq_chip *d)
+static void eic_mask_ack_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
        eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_unmask_irq(struct irq_chip *d)
+static void eic_unmask_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, IER, 1 << (d->irq - eic->first_irq));
 }
 
-static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type)
+static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        unsigned int irq = d->irq;
        unsigned int i = irq - eic->first_irq;
        u32 mode, edge, level;
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        int_irq = platform_get_irq(pdev, 0);
-       if (!regs || !int_irq) {
+       if (!regs || (int)int_irq <= 0) {
                dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
                return -ENXIO;
        }
index f308e1ddc629310ef882c04361b92ae4cef51622..2e0aa853a4bcecec88339d30b22d112114edc5be 100644 (file)
@@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d)
        pio_writel(pio, IDR, 1 << (gpio & 0x1f));
 }
 
-static void gpio_irq_unmask(struct irq_data *d))
+static void gpio_irq_unmask(struct irq_data *d)
 {
        unsigned                gpio = irq_to_gpio(d->irq);
        struct pio_device       *pio = &pio_dev[gpio >> 5];
index 17503b0ed6c9b3f5865406fb0d5f44241ce6bab7..f868f4ce761ba6f37a7b63617fba356410263ff1 100644 (file)
@@ -53,7 +53,7 @@ cpu_enter_idle:
        st.w    r8[TI_flags], r9
        unmask_interrupts
        sleep   CPU_SLEEP_IDLE
-       .size   cpu_idle_sleep, . - cpu_idle_sleep
+       .size   cpu_enter_idle, . - cpu_enter_idle
 
        /*
         * Common return path for PM functions that don't run from
index 19e2c7c3e63ac41bf62f2e55a4db10d6bf97a13f..44bd0cced7256dc1a04eb78242b8ad5fb8ca882c 100644 (file)
  * Force strict CPU ordering.
  */
 #define nop()  __asm__ __volatile__ ("nop;\n\t" : : )
-#define mb()   __asm__ __volatile__ (""   : : : "memory")
-#define rmb()  __asm__ __volatile__ (""   : : : "memory")
-#define wmb()  __asm__ __volatile__ (""   : : : "memory")
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#define read_barrier_depends()                 do { } while(0)
+#define smp_mb()  mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_read_barrier_depends()     read_barrier_depends()
 
 #ifdef CONFIG_SMP
 asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
                                        unsigned long new, unsigned long old);
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
-# define smp_mb()      do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
-# define smp_rmb()     do { barrier(); smp_check_barrier(); } while (0)
-# define smp_wmb()     do { barrier(); smp_mark_barrier(); } while (0)
-#define smp_read_barrier_depends()     do { barrier(); smp_check_barrier(); } while (0)
-
+/* Force Core data cache coherence */
+# define mb()  do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
+# define rmb() do { barrier(); smp_check_barrier(); } while (0)
+# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
+# define read_barrier_depends()        do { barrier(); smp_check_barrier(); } while (0)
 #else
-# define smp_mb()      barrier()
-# define smp_rmb()     barrier()
-# define smp_wmb()     barrier()
-#define smp_read_barrier_depends()     barrier()
+# define mb()  barrier()
+# define rmb() barrier()
+# define wmb() barrier()
+# define read_barrier_depends()        do { } while (0)
 #endif
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 
 #else /* !CONFIG_SMP */
 
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while(0)
+#define mb()   barrier()
+#define rmb()  barrier()
+#define wmb()  barrier()
+#define read_barrier_depends() do { } while (0)
 
 struct __xchg_dummy {
        unsigned long a[100];
index cdbe075de1dc6841411310ef481ed26a4210c2f2..8b81dc04488aa6864aeb23236cadb65cc1cda676 100644 (file)
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
        _disable_gptimers(mask);
        for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
                if (mask & (1 << i))
-                       group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
+                       group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
        SSYNC();
 }
 EXPORT_SYMBOL(disable_gptimers);
index 8c9a43daf80fad1f4ecaec62f25ea0778cc2be94..cdb4beb6bc8fc9507bf0531317b8b834eca9d4c8 100644 (file)
@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = dev_id;
        smp_mb();
-       evt->event_handler(evt);
+       /*
+        * We want to ACK before we handle so that we can handle smaller timer
+        * intervals.  This way if the timer expires again while we're handling
+        * things, we're more likely to see that 2nd int rather than swallowing
+        * it by ACKing the int at the end of this handler.
+        */
        bfin_gptmr0_ack();
+       evt->event_handler(evt);
        return IRQ_HANDLED;
 }
 
index 6e17a265c4d3158761450463b3c5a66e63a39bab..8bce5ed031e448ed0a51d9e77c947864eed2e2bf 100644 (file)
@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info)
        struct blackfin_flush_data *fdata = info;
 
        /* Invalidate the memory holding the bounds of the flushed region. */
-       invalidate_dcache_range((unsigned long)fdata,
-               (unsigned long)fdata + sizeof(*fdata));
+       blackfin_dcache_invalidate_range((unsigned long)fdata,
+                                        (unsigned long)fdata + sizeof(*fdata));
+
+       /* Make sure all write buffers in the data side of the core
+        * are flushed before trying to invalidate the icache.  This
+        * needs to be after the data flush and before the icache
+        * flush so that the SSYNC does the right thing in preventing
+        * the instruction prefetcher from hitting things in cached
+        * memory at the wrong time -- it runs much further ahead than
+        * the pipeline.
+        */
+       SSYNC();
 
-       flush_icache_range(fdata->start, fdata->end);
+       /* ipi_flaush_icache is invoked by generic flush_icache_range,
+        * so call blackfin arch icache flush directly here.
+        */
+       blackfin_icache_flush_range(fdata->start, fdata->end);
 }
 
 static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
index 26d851d385bb190a81f271f49fe89c74778e3c06..29e17907d9f2c2af079df6e5b8e0b032de792562 100644 (file)
 #define __NR_fanotify_init     337
 #define __NR_fanotify_mark     338
 #define __NR_prlimit64         339
+#define __NR_name_to_handle_at 340
+#define __NR_open_by_handle_at 341
+#define __NR_clock_adjtime     342
+#define __NR_syncfs            343
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            340
+#define NR_syscalls            344
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 1559dea36e5581f62bf2d8b4b1fe6c6da37308f4..1359ee659574135d440deea4495728c1cbb872d2 100644 (file)
@@ -750,4 +750,8 @@ sys_call_table:
        .long sys_fanotify_init
        .long sys_fanotify_mark
        .long sys_prlimit64
+       .long sys_name_to_handle_at     /* 340 */
+       .long sys_open_by_handle_at
+       .long sys_clock_adjtime
+       .long sys_syncfs
 
index 79b1ed198c070dd40dbb3cf4f03dd6f24b79cb71..9b8393d8adb89dae4ada43f66c6d63c8670589f6 100644 (file)
@@ -358,6 +358,10 @@ ENTRY(sys_call_table)
        .long sys_fanotify_init
        .long sys_fanotify_mark
        .long sys_prlimit64
+       .long sys_name_to_handle_at     /* 340 */
+       .long sys_open_by_handle_at
+       .long sys_clock_adjtime
+       .long sys_syncfs
 
        .rept NR_syscalls-(.-sys_call_table)/4
                .long sys_ni_syscall
index 851b3bf6e962eb3b6e95d5459645f5dfc4ea80b6..eccdefe70d4e4ba1b4874df00228faff583e948b 100644 (file)
@@ -6,7 +6,6 @@ config MICROBLAZE
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
-       select USB_ARCH_HAS_EHCI
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select HAVE_OPROFILE
        select HAVE_ARCH_KGDB
index b6ff882f695bbac53e95b2f6b1a1a00a9bf97ac6..8f4d50b0adfa9ce21bf8dd68158df455ef242af3 100644 (file)
@@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
        depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
-                  PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
+                  (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
 
 config PPC_DCR_NATIVE
        bool
index be3cdf9134ce8727b9a7787250fb6ba3dfdb8e44..1833d1a07e797eab6c9f174585b42e456d19f559 100644 (file)
@@ -382,10 +382,12 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_E500_2        (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
            CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
            CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC        (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
-           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E500MC        (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
            CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
            CPU_FTR_DBELL)
+#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+           CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+           CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
 #define CPU_FTRS_GENERIC_32    (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
 
 /* 64-bit CPUs */
@@ -435,11 +437,15 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_COMPATIBLE    (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
 
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE      (CPU_FTRS_E5500)
+#else
 #define CPU_FTRS_POSSIBLE      \
            (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |        \
            CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 |       \
            CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T |           \
            CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+#endif
 #else
 enum {
        CPU_FTRS_POSSIBLE =
@@ -473,16 +479,21 @@ enum {
 #endif
 #ifdef CONFIG_E500
            CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+           CPU_FTRS_E5500 |
 #endif
            0,
 };
 #endif /* __powerpc64__ */
 
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS                (CPU_FTRS_E5500)
+#else
 #define CPU_FTRS_ALWAYS                \
            (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &        \
            CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 &       \
            CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#endif
 #else
 enum {
        CPU_FTRS_ALWAYS =
@@ -513,6 +524,7 @@ enum {
 #endif
 #ifdef CONFIG_E500
            CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+           CPU_FTRS_E5500 &
 #endif
            CPU_FTRS_POSSIBLE,
 };
index 811f04ac3660842baf72973b3786c3d8f9b92c6c..8d1569c290428e82db7d03cec9bca66c66dcf35b 100644 (file)
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
  * on platforms where such control is possible.
  */
 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
-       defined(CONFIG_KPROBES)
+       defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
 #define PAGE_KERNEL_TEXT       PAGE_KERNEL_X
 #else
 #define PAGE_KERNEL_TEXT       PAGE_KERNEL_ROX
index c9b68d07ac4fca3cdd8996e8a43d850034efb407..b9602ee06deb11ee4c80c8eeeb63a2757f587841 100644 (file)
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80240000,
                .cpu_name               = "e5500",
-               .cpu_features           = CPU_FTRS_E500MC,
+               .cpu_features           = CPU_FTRS_E5500,
                .cpu_user_features      = COMMON_USER_BOOKE,
                .mmu_features           = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
                        MMU_FTR_USE_TLBILX,
index 3d3d416339dd1294ae31380ce0c8474c6eaa3b52..5b5e1f002a8ea2ef2053bb16ba20e1e110edd348 100644 (file)
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
 }
 
 /* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+#ifdef CONFIG_PPC_STD_MMU_64
 static void crash_kexec_wait_realmode(int cpu)
 {
        unsigned int msecs;
@@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
        }
        mb();
 }
-#else
-static inline void crash_kexec_wait_realmode(int cpu) {}
-#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
 /*
  * This function will be called by secondary cpus or by kexec cpu
@@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
        crash_ipi_callback(regs);
 }
 
-#else
+#else  /* ! CONFIG_SMP */
+static inline void crash_kexec_wait_realmode(int cpu) {}
+
 static void crash_kexec_prepare_cpus(int cpu)
 {
        /*
@@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
 {
        cpus_in_sr = CPU_MASK_NONE;
 }
-#endif
+#endif /* CONFIG_SMP */
 
 /*
  * Register a function to be called on shutdown.  Only use this if you
index c00d4ca1ee157fc995629825b95af4b8b232a5bb..28581f1ad2c08a8494502e2335cfeef2a295577e 100644 (file)
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int ibmebus_bus_pm_freeze(struct device *dev)
 {
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define ibmebus_bus_pm_freeze          NULL
 #define ibmebus_bus_pm_thaw            NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
 #define ibmebus_bus_pm_poweroff_noirq  NULL
 #define ibmebus_bus_pm_restore_noirq   NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
        .prepare = ibmebus_bus_pm_prepare,
index c834757bebc07663ba44f2c779f210a4f308637d..2b97b80d6d7d65911f951276eec81029cda748e6 100644 (file)
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
                if (!parent)
                        continue;
                if (of_match_node(legacy_serial_parents, parent) != NULL) {
-                       index = add_legacy_soc_port(np, np);
-                       if (index >= 0 && np == stdout)
-                               legacy_serial_console = index;
+                       if (of_device_is_available(np)) {
+                               index = add_legacy_soc_port(np, np);
+                               if (index >= 0 && np == stdout)
+                                       legacy_serial_console = index;
+                       }
                }
                of_node_put(parent);
        }
index c4063b7f49a0e5536ac441834a755cbe5840a7dd..822f63008ae11642b570986c7faf8ca61478ae10 100644 (file)
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
        return 0;
 }
 
+static u64 check_and_compute_delta(u64 prev, u64 val)
+{
+       u64 delta = (val - prev) & 0xfffffffful;
+
+       /*
+        * POWER7 can roll back counter values, if the new value is smaller
+        * than the previous value it will cause the delta and the counter to
+        * have bogus values unless we rolled a counter over.  If a coutner is
+        * rolled back, it will be smaller, but within 256, which is the maximum
+        * number of events to rollback at once.  If we dectect a rollback
+        * return 0.  This can lead to a small lack of precision in the
+        * counters.
+        */
+       if (prev > val && (prev - val) < 256)
+               delta = 0;
+
+       return delta;
+}
+
 static void power_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
                prev = local64_read(&event->hw.prev_count);
                barrier();
                val = read_pmc(event->hw.idx);
+               delta = check_and_compute_delta(prev, val);
+               if (!delta)
+                       return;
        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
-       /* The counters are only 32 bits wide */
-       delta = (val - prev) & 0xfffffffful;
        local64_add(delta, &event->count);
        local64_sub(delta, &event->hw.period_left);
 }
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
                prev = local64_read(&event->hw.prev_count);
                event->hw.idx = 0;
-               delta = (val - prev) & 0xfffffffful;
-               local64_add(delta, &event->count);
+               delta = check_and_compute_delta(prev, val);
+               if (delta)
+                       local64_add(delta, &event->count);
        }
 }
 
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
                                  unsigned long pmc5, unsigned long pmc6)
 {
        struct perf_event *event;
-       u64 val;
+       u64 val, prev;
        int i;
 
        for (i = 0; i < cpuhw->n_limited; ++i) {
                event = cpuhw->limited_counter[i];
                event->hw.idx = cpuhw->limited_hwidx[i];
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
-               local64_set(&event->hw.prev_count, val);
+               prev = local64_read(&event->hw.prev_count);
+               if (check_and_compute_delta(prev, val))
+                       local64_set(&event->hw.prev_count, val);
                perf_event_update_userpage(event);
        }
 }
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
-       delta = (val - prev) & 0xfffffffful;
+       delta = check_and_compute_delta(prev, val);
        local64_add(delta, &event->count);
 
        /*
index 375480c56eb9fd6699c90e385dd1877affb3d052..f33acfd872ad31ef73df610b07a507d2e6bb78a1 100644 (file)
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
        u64 stolen = 0;
        u64 dtb;
 
+       if (!dtl)
+               return 0;
+
        if (i == vpa->dtl_idx)
                return 0;
        while (i < vpa->dtl_idx) {
index a830c5e806577b4c3f443e6617b3964dbf8ae62f..bc5f0dc6ae1e7686f7dd7a10823025d0e138fa13 100644 (file)
@@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
        mpic_setup_this_cpu();
 }
 
+#ifdef CONFIG_PPC64
 #ifdef CONFIG_HOTPLUG_CPU
 static int smp_core99_cpu_notify(struct notifier_block *self,
                                 unsigned long action, void *hcpu)
@@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
 
 static void __init smp_core99_bringup_done(void)
 {
-#ifdef CONFIG_PPC64
        extern void g5_phy_disable_cpu1(void);
 
        /* Close i2c bus if it was used for tb sync */
@@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void)
                set_cpu_present(1, false);
                g5_phy_disable_cpu1();
        }
-#endif /* CONFIG_PPC64 */
-
 #ifdef CONFIG_HOTPLUG_CPU
        register_cpu_notifier(&smp_core99_cpu_nb);
 #endif
+
        if (ppc_md.progress)
                ppc_md.progress("smp_core99_bringup_done", 0x349);
 }
+#endif /* CONFIG_PPC64 */
 
 #ifdef CONFIG_HOTPLUG_CPU
 
@@ -975,7 +975,9 @@ static void pmac_cpu_die(void)
 struct smp_ops_t core99_smp_ops = {
        .message_pass   = smp_mpic_message_pass,
        .probe          = smp_core99_probe,
+#ifdef CONFIG_PPC64
        .bringup_done   = smp_core99_bringup_done,
+#endif
        .kick_cpu       = smp_core99_kick_cpu,
        .setup_cpu      = smp_core99_setup_cpu,
        .give_timebase  = smp_core99_give_timebase,
index 000724149089c032c45e375b30a0128e449d6e41..6c42cfde8415f31dacd0a981cee09535da581667 100644 (file)
@@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void)
        int cpu, ret;
        struct paca_struct *pp;
        struct dtl_entry *dtl;
+       struct kmem_cache *dtl_cache;
 
        if (!firmware_has_feature(FW_FEATURE_SPLPAR))
                return 0;
 
+       dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+                                               DISPATCH_LOG_BYTES, 0, NULL);
+       if (!dtl_cache) {
+               pr_warn("Failed to create dispatch trace log buffer cache\n");
+               pr_warn("Stolen time statistics will be unreliable\n");
+               return 0;
+       }
+
        for_each_possible_cpu(cpu) {
                pp = &paca[cpu];
-               dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
-                                  cpu_to_node(cpu));
+               dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
                if (!dtl) {
                        pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
                                cpu);
index f8f7f28c6343682bc9b00ac7f4a754fe1d00cc47..68ca9290df9451886e74d24fa4834c15780979f5 100644 (file)
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
        struct resource rsrc;
        const int *bus_range;
 
+       if (!of_device_is_available(dev)) {
+               pr_warning("%s: disabled\n", dev->full_name);
+               return -ENODEV;
+       }
+
        pr_debug("Adding PCI host bridge %s\n", dev->full_name);
 
        /* Fetch host bridge registers address */
index 14232d57369c4de4f373f76e095ec95bb559345e..49798532b477b8630c77a203dfb76c62ad3caf46 100644 (file)
@@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev)
        port->ops = ops;
        port->priv = priv;
        port->phys_efptr = 0x100;
-       rio_register_mport(port);
 
        priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
        rio_regs_win = priv->regs_win;
@@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev)
        dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
                        port->sys_size ? 65536 : 256);
 
+       if (rio_register_mport(port))
+               goto err;
+
        if (port->host_deviceid >= 0)
                out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
                        RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
index 02fb017fed472ed96d66aa1a4252b17b2f5bd677..a9da516a527416e12cfdc887939e2eb8cbd5020f 100644 (file)
@@ -4,6 +4,10 @@ menu "UML-specific options"
 
 menu "Host processor type and features"
 
+config CMPXCHG_LOCAL
+       bool
+       default n
+
 source "arch/x86/Kconfig.cpu"
 
 endmenu
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h
new file mode 100644 (file)
index 0000000..9e33b86
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __UM_BUG_H
+#define __UM_BUG_H
+
+#include <asm-generic/bug.h>
+
+#endif
index 43085bfc99c30f963b929a9c7afdcff932ad9236..156cd5d18d2abeabb26a0587cfffd15c52aa6f13 100644 (file)
@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
         * Don't enable translation but enable GART IO and CPU accesses.
         * Also, set DISTLBWALKPRB since GART tables memory is UC.
         */
-       ctl = DISTLBWALKPRB | order << 1;
+       ctl = order << 1;
 
        pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
 }
@@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
 {
        u32 tmp, ctl;
 
-        /* address of the mappings table */
-        addr >>= 12;
-        tmp = (u32) addr<<4;
-        tmp &= ~0xf;
-        pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
-
-        /* Enable GART translation for this hammer. */
-        pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
-        ctl |= GARTEN;
-        ctl &= ~(DISGARTCPU | DISGARTIO);
-        pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+       /* address of the mappings table */
+       addr >>= 12;
+       tmp = (u32) addr<<4;
+       tmp &= ~0xf;
+       pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
+
+       /* Enable GART translation for this hammer. */
+       pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
+       ctl |= GARTEN | DISTLBWALKPRB;
+       ctl &= ~(DISGARTCPU | DISGARTIO);
+       pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
 }
 
 static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
index fd5a1f365c95105ebe095c33df1dfbf91e1715a1..3cce71413d0be86278a7037eccc692d3d205b4e8 100644 (file)
 #define MSR_IA32_MC0_ADDR              0x00000402
 #define MSR_IA32_MC0_MISC              0x00000403
 
+#define MSR_AMD64_MC0_MASK             0xc0010044
+
 #define MSR_IA32_MCx_CTL(x)            (MSR_IA32_MC0_CTL + 4*(x))
 #define MSR_IA32_MCx_STATUS(x)         (MSR_IA32_MC0_STATUS + 4*(x))
 #define MSR_IA32_MCx_ADDR(x)           (MSR_IA32_MC0_ADDR + 4*(x))
 #define MSR_IA32_MCx_MISC(x)           (MSR_IA32_MC0_MISC + 4*(x))
 
+#define MSR_AMD64_MCx_MASK(x)          (MSR_AMD64_MC0_MASK + (x))
+
 /* These are consecutive and not in the normal 4er MCE bank block */
 #define MSR_IA32_MC0_CTL2              0x00000280
 #define MSR_IA32_MCx_CTL2(x)           (MSR_IA32_MC0_CTL2 + (x))
index 86d1ad4962a73a352ec22625658bec7deca21d7c..73fb469908c6852476c10a75a05aa49329f386b8 100644 (file)
@@ -499,7 +499,7 @@ out:
                 * Don't enable translation yet but enable GART IO and CPU
                 * accesses and set DISTLBWALKPRB since GART table memory is UC.
                 */
-               u32 ctl = DISTLBWALKPRB | aper_order << 1;
+               u32 ctl = aper_order << 1;
 
                bus = amd_nb_bus_dev_ranges[i].bus;
                dev_base = amd_nb_bus_dev_ranges[i].dev_base;
index 3ecece0217ef1c10e4bcab2a9cd64b9d10f94833..3532d3bf81050ef145a861b09c294980bf1674c3 100644 (file)
@@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
        /* As a rule processors have APIC timer running in deep C states */
        if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
                set_cpu_cap(c, X86_FEATURE_ARAT);
+
+       /*
+        * Disable GART TLB Walk Errors on Fam10h. We do this here
+        * because this is always needed when GART is enabled, even in a
+        * kernel which has no MCE support built in.
+        */
+       if (c->x86 == 0x10) {
+               /*
+                * BIOS should disable GartTlbWlk Errors themself. If
+                * it doesn't do it here as suggested by the BKDG.
+                *
+                * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
+                */
+               u64 mask;
+
+               rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
+               mask |= (1 << 10);
+               wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+       }
 }
 
 #ifdef CONFIG_X86_32
index 461f62bbd774a0adec028334f8c723e5a33bea1d..cf4e369cea6793ab3b4eac82fcdcd594547cd019 100644 (file)
@@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids
  [ C(L1D) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
-               [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
+               [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
@@ -427,7 +427,9 @@ static __initconst const struct x86_pmu amd_pmu = {
  *
  * Exceptions:
  *
+ * 0x000       FP      PERF_CTL[3], PERF_CTL[5:3] (*)
  * 0x003       FP      PERF_CTL[3]
+ * 0x004       FP      PERF_CTL[3], PERF_CTL[5:3] (*)
  * 0x00B       FP      PERF_CTL[3]
  * 0x00D       FP      PERF_CTL[3]
  * 0x023       DE      PERF_CTL[2:0]
@@ -448,6 +450,8 @@ static __initconst const struct x86_pmu amd_pmu = {
  * 0x0DF       LS      PERF_CTL[5:0]
  * 0x1D6       EX      PERF_CTL[5:0]
  * 0x1D8       EX      PERF_CTL[5:0]
+ *
+ * (*) depending on the umask all FPU counters may be used
  */
 
 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
@@ -460,18 +464,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
 static struct event_constraint *
 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
-       unsigned int event_code = amd_get_event_code(&event->hw);
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int event_code = amd_get_event_code(hwc);
 
        switch (event_code & AMD_EVENT_TYPE_MASK) {
        case AMD_EVENT_FP:
                switch (event_code) {
+               case 0x000:
+                       if (!(hwc->config & 0x0000F000ULL))
+                               break;
+                       if (!(hwc->config & 0x00000F00ULL))
+                               break;
+                       return &amd_f15_PMC3;
+               case 0x004:
+                       if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
+                               break;
+                       return &amd_f15_PMC3;
                case 0x003:
                case 0x00B:
                case 0x00D:
                        return &amd_f15_PMC3;
-               default:
-                       return &amd_f15_PMC53;
                }
+               return &amd_f15_PMC53;
        case AMD_EVENT_LS:
        case AMD_EVENT_DC:
        case AMD_EVENT_EX_LS:
index 82ada01625b98c111a41fbecf72fdd026c08e5f1..b117efd24f71f589286aac56e0b50fb235dac0cc 100644 (file)
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
 #define AGPEXTERN
 #endif
 
+/* GART can only remap to physical addresses < 1TB */
+#define GART_MAX_PHYS_ADDR     (1ULL << 40)
+
 /* backdoor interface to AGP driver */
 AGPEXTERN int agp_memory_reserved;
 AGPEXTERN __u32 *agp_gatt_table;
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
                                size_t size, int dir, unsigned long align_mask)
 {
        unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
-       unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
+       unsigned long iommu_page;
        int i;
 
+       if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
+               return bad_dma_addr;
+
+       iommu_page = alloc_iommu(dev, npages, align_mask);
        if (iommu_page == -1) {
                if (!nonforced_iommu(dev, phys_mem, size))
                        return phys_mem;
index c2871d3c71b64a62c2902a8e5b48178264c4e788..8ed8908cc9f79ab06d828d181555255fa9f65c82 100644 (file)
@@ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id)
                identify_secondary_cpu(c);
 }
 
+static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2)
+{
+       int node1 = early_cpu_to_node(cpu1);
+       int node2 = early_cpu_to_node(cpu2);
+
+       /*
+        * Our CPU scheduler assumes all logical cpus in the same physical cpu
+        * share the same node. But, buggy ACPI or NUMA emulation might assign
+        * them to different node. Fix it.
+        */
+       if (node1 != node2) {
+               pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n",
+                          cpu1, node1, cpu2, node2, node2);
+
+               numa_remove_cpu(cpu1);
+               numa_set_node(cpu1, node2);
+               numa_add_cpu(cpu1);
+       }
+}
+
 static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
 {
        cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
@@ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
        cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
        cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
        cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
+       check_cpu_siblings_on_same_node(cpu1, cpu2);
 }
 
 
@@ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
                        cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
                        cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
+                       check_cpu_siblings_on_same_node(cpu, i);
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
                        cpumask_set_cpu(cpu, cpu_core_mask(i));
+                       check_cpu_siblings_on_same_node(cpu, i);
                        /*
                         *  Does this new cpu bringup a new core?
                         */
index dc701ea585461a3735085447b8e13af6f77b178b..2d6d226f2b10fc511897bd484d25f15c6eef22c1 100644 (file)
@@ -74,6 +74,7 @@
                                compatible = "intel,ce4100-pci", "pci";
                                device_type = "pci";
                                bus-range = <1 1>;
+                               reg = <0x0800 0x0 0x0 0x0 0x0>;
                                ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>;
 
                                interrupt-parent = <&ioapic2>;
                                #address-cells = <2>;
                                #size-cells = <1>;
                                compatible = "isa";
+                               reg = <0xf800 0x0 0x0 0x0 0x0>;
                                ranges = <1 0 0 0 0 0x100>;
 
                                rtc@70 {
index 5c0207bf959bc4a8490347d775eba452c13f2684..275dbc19e2cf55f2e7cb55f1c3ad861cb976c8a6 100644 (file)
@@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table)
                        pentry->freq_hz, pentry->irq);
                        if (!pentry->irq)
                                continue;
-                       mp_irq.type = MP_IOAPIC;
+                       mp_irq.type = MP_INTSRC;
                        mp_irq.irqtype = mp_INT;
 /* triggering mode edge bit 2-3, active high polarity bit 0-1 */
                        mp_irq.irqflag = 5;
-                       mp_irq.srcbus = 0;
+                       mp_irq.srcbus = MP_BUS_ISA;
                        mp_irq.srcbusirq = pentry->irq; /* IRQ */
                        mp_irq.dstapic = MP_APIC_ALL;
                        mp_irq.dstirq = pentry->irq;
@@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
        for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
                pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
                        totallen, (u32)pentry->phys_addr, pentry->irq);
-               mp_irq.type = MP_IOAPIC;
+               mp_irq.type = MP_INTSRC;
                mp_irq.irqtype = mp_INT;
                mp_irq.irqflag = 0xf;   /* level trigger and active low */
-               mp_irq.srcbus = 0;
+               mp_irq.srcbus = MP_BUS_ISA;
                mp_irq.srcbusirq = pentry->irq; /* IRQ */
                mp_irq.dstapic = MP_APIC_ALL;
                mp_irq.dstirq = pentry->irq;
@@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void)
        /* Avoid searching for BIOS MP tables */
        x86_init.mpparse.find_smp_config = x86_init_noop;
        x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-
+       set_bit(MP_BUS_ISA, mp_bus_not_pci);
 }
 
 /*
index 1c7121ba18ffdc902740c396427e448ae62250d4..5cc821cb2e0968090501e8c6e5af36353d5e2f87 100644 (file)
@@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY
 config XEN_SAVE_RESTORE
        bool
        depends on XEN
+       select HIBERNATE_CALLBACKS
        default y
 
 config XEN_DEBUG_FS
index 49dbd78ec3cb0d4481e72e96cdb041507b5ae686..e3c6a06cf725ef08b87a5961b33043bbf493acf3 100644 (file)
@@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
 static __init void xen_init_cpuid_mask(void)
 {
        unsigned int ax, bx, cx, dx;
+       unsigned int xsave_mask;
 
        cpuid_leaf1_edx_mask =
                ~((1 << X86_FEATURE_MCE)  |  /* disable MCE */
@@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void)
                cpuid_leaf1_edx_mask &=
                        ~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
                          (1 << X86_FEATURE_ACPI));  /* disable ACPI */
-
        ax = 1;
-       cx = 0;
        xen_cpuid(&ax, &bx, &cx, &dx);
 
-       /* cpuid claims we support xsave; try enabling it to see what happens */
-       if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
-               unsigned long cr4;
-
-               set_in_cr4(X86_CR4_OSXSAVE);
-               
-               cr4 = read_cr4();
+       xsave_mask =
+               (1 << (X86_FEATURE_XSAVE % 32)) |
+               (1 << (X86_FEATURE_OSXSAVE % 32));
 
-               if ((cr4 & X86_CR4_OSXSAVE) == 0)
-                       cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
-
-               clear_in_cr4(X86_CR4_OSXSAVE);
-       }
+       /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
+       if ((cx & xsave_mask) != xsave_mask)
+               cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
 }
 
 static void xen_set_debugreg(int reg, unsigned long val)
index c82df6c9c0f0a4a1bbc61f1ea881ebdd37fecdef..a991b57f91fe26ae944bdd11a5f03db9307bbf5b 100644 (file)
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte)
        if (io_page &&
            (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
                other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
-               WARN(addr != other_addr,
+               WARN_ONCE(addr != other_addr,
                        "0x%lx is using VM_IO, but it is 0x%lx!\n",
                        (unsigned long)addr, (unsigned long)other_addr);
        } else {
                pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
                other_addr = (_pte.pte & PTE_PFN_MASK);
-               WARN((addr == other_addr) && (!io_page) && (!iomap_set),
+               WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
                        "0x%lx is missing VM_IO (and wasn't fixed)!\n",
                        (unsigned long)addr);
        }
index 90f22cc30799cf8bf71e25c4cf68718c3fbf0cf3..5fa3dd2705c61f88fd2315974d8081881f0f860e 100644 (file)
@@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 }
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
-/*
- * Make sure that plugs that were pending when this function was entered,
- * are now complete and requests pushed to the queue.
-*/
-static inline void queue_sync_plugs(struct request_queue *q)
-{
-       /*
-        * If the current process is plugged and has barriers submitted,
-        * we will livelock if we don't unplug first.
-        */
-       blk_flush_plug(current);
-}
-
 static void blk_delay_work(struct work_struct *work)
 {
        struct request_queue *q;
 
        q = container_of(work, struct request_queue, delay_work.work);
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irq(q->queue_lock);
 }
 
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
-       schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+       queue_delayed_work(kblockd_workqueue, &q->delay_work,
+                               msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
@@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -298,7 +286,6 @@ void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
        cancel_delayed_work_sync(&q->delay_work);
-       queue_sync_plugs(q);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -310,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
  *    held and interrupts disabled.
- *
  */
-void __blk_run_queue(struct request_queue *q, bool force_kblockd)
+void __blk_run_queue(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
@@ -321,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else
@@ -329,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
+/**
+ * blk_run_queue_async - run a single device queue in workqueue context
+ * @q: The queue to run
+ *
+ * Description:
+ *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
+ *    of us.
+ */
+void blk_run_queue_async(struct request_queue *q)
+{
+       if (likely(!blk_queue_stopped(q)))
+               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+}
+
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
@@ -342,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -991,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
                blk_queue_end_tag(q, rq);
 
        add_acct_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -1311,7 +1311,15 @@ get_rq:
 
        plug = current->plug;
        if (plug) {
-               if (!plug->should_sort && !list_empty(&plug->list)) {
+               /*
+                * If this is the first request added after a plug, fire
+                * of a plug trace. If others have been added before, check
+                * if we have multiple devices in this plug. If so, make a
+                * note to sort the list before dispatch.
+                */
+               if (list_empty(&plug->list))
+                       trace_block_plug(q);
+               else if (!plug->should_sort) {
                        struct request *__rq;
 
                        __rq = list_entry_rq(plug->list.prev);
@@ -1327,7 +1335,7 @@ get_rq:
        } else {
                spin_lock_irq(q->queue_lock);
                add_acct_request(q, req, where);
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
 out_unlock:
                spin_unlock_irq(q->queue_lock);
        }
@@ -2644,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug)
 
        plug->magic = PLUG_MAGIC;
        INIT_LIST_HEAD(&plug->list);
+       INIT_LIST_HEAD(&plug->cb_list);
        plug->should_sort = 0;
 
        /*
@@ -2668,33 +2677,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        return !(rqa->q <= rqb->q);
 }
 
-static void flush_plug_list(struct blk_plug *plug)
+/*
+ * If 'from_schedule' is true, then postpone the dispatch of requests
+ * until a safe kblockd context. We due this to avoid accidental big
+ * additional stack usage in driver dispatch, in places where the originally
+ * plugger did not intend it.
+ */
+static void queue_unplugged(struct request_queue *q, unsigned int depth,
+                           bool from_schedule)
+       __releases(q->queue_lock)
+{
+       trace_block_unplug(q, depth, !from_schedule);
+
+       /*
+        * If we are punting this to kblockd, then we can safely drop
+        * the queue_lock before waking kblockd (which needs to take
+        * this lock).
+        */
+       if (from_schedule) {
+               spin_unlock(q->queue_lock);
+               blk_run_queue_async(q);
+       } else {
+               __blk_run_queue(q);
+               spin_unlock(q->queue_lock);
+       }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
+{
+       LIST_HEAD(callbacks);
+
+       if (list_empty(&plug->cb_list))
+               return;
+
+       list_splice_init(&plug->cb_list, &callbacks);
+
+       while (!list_empty(&callbacks)) {
+               struct blk_plug_cb *cb = list_first_entry(&callbacks,
+                                                         struct blk_plug_cb,
+                                                         list);
+               list_del(&cb->list);
+               cb->callback(cb);
+       }
+}
+
+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
        struct request_queue *q;
        unsigned long flags;
        struct request *rq;
+       LIST_HEAD(list);
+       unsigned int depth;
 
        BUG_ON(plug->magic != PLUG_MAGIC);
 
+       flush_plug_callbacks(plug);
        if (list_empty(&plug->list))
                return;
 
-       if (plug->should_sort)
-               list_sort(NULL, &plug->list, plug_rq_cmp);
+       list_splice_init(&plug->list, &list);
+
+       if (plug->should_sort) {
+               list_sort(NULL, &list, plug_rq_cmp);
+               plug->should_sort = 0;
+       }
 
        q = NULL;
+       depth = 0;
+
+       /*
+        * Save and disable interrupts here, to avoid doing it for every
+        * queue lock we have to take.
+        */
        local_irq_save(flags);
-       while (!list_empty(&plug->list)) {
-               rq = list_entry_rq(plug->list.next);
+       while (!list_empty(&list)) {
+               rq = list_entry_rq(list.next);
                list_del_init(&rq->queuelist);
                BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
                BUG_ON(!rq->q);
                if (rq->q != q) {
-                       if (q) {
-                               __blk_run_queue(q, false);
-                               spin_unlock(q->queue_lock);
-                       }
+                       /*
+                        * This drops the queue lock
+                        */
+                       if (q)
+                               queue_unplugged(q, depth, from_schedule);
                        q = rq->q;
+                       depth = 0;
                        spin_lock(q->queue_lock);
                }
                rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2706,38 +2775,28 @@ static void flush_plug_list(struct blk_plug *plug)
                        __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
                else
                        __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
-       }
 
-       if (q) {
-               __blk_run_queue(q, false);
-               spin_unlock(q->queue_lock);
+               depth++;
        }
 
-       BUG_ON(!list_empty(&plug->list));
-       local_irq_restore(flags);
-}
-
-static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
-       flush_plug_list(plug);
+       /*
+        * This drops the queue lock
+        */
+       if (q)
+               queue_unplugged(q, depth, from_schedule);
 
-       if (plug == tsk->plug)
-               tsk->plug = NULL;
+       local_irq_restore(flags);
 }
+EXPORT_SYMBOL(blk_flush_plug_list);
 
 void blk_finish_plug(struct blk_plug *plug)
 {
-       if (plug)
-               __blk_finish_plug(current, plug);
-}
-EXPORT_SYMBOL(blk_finish_plug);
+       blk_flush_plug_list(plug, false);
 
-void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
-       __blk_finish_plug(tsk, plug);
-       tsk->plug = plug;
+       if (plug == current->plug)
+               current->plug = NULL;
 }
-EXPORT_SYMBOL(__blk_flush_plug);
+EXPORT_SYMBOL(blk_finish_plug);
 
 int __init blk_dev_init(void)
 {
index 7482b7fa863ba10b337d7547fb7040767e02b2da..81e31819a597bb0c6ed6dd4f781eb037c986a243 100644 (file)
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        WARN_ON(irqs_disabled());
        spin_lock_irq(q->queue_lock);
        __elv_add_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        /* the queue is stopped so it won't be plugged+unplugged */
        if (rq->cmd_type == REQ_TYPE_PM_RESUME)
                q->request_fn(q);
index eba4a2790c6c4ba2e0467ca58300050e190d6804..6c9b5e189e624888860e5185d1d9b3479b3e49a4 100644 (file)
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
         * request_fn may confuse the driver.  Always use kblockd.
         */
        if (queued)
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
         * the comment in flush_end_io().
         */
        if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
index 261c75c665ae381a4c12dd444f7ed3b847c155a0..6d735122bc59499693d8682d4730c92ff63c91be 100644 (file)
@@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk)
 {
        int ret;
        struct device *dev = disk_to_dev(disk);
-
        struct request_queue *q = disk->queue;
 
        if (WARN_ON(!q))
@@ -521,7 +520,7 @@ int blk_register_queue(struct gendisk *disk)
        if (ret) {
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
-               blk_trace_remove_sysfs(disk_to_dev(disk));
+               blk_trace_remove_sysfs(dev);
                kobject_put(&dev->kobj);
                return ret;
        }
index 61263463e38e17be7c7f742f0bbe9233eec2ecd2..c9df8fc3c99979de8fe21f439b82989d1361e859 100644 (file)
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
 void blk_delete_timer(struct request *);
 void blk_add_timer(struct request *);
 void __generic_unplug_device(struct request_queue *);
+void blk_run_queue_async(struct request_queue *q);
 
 /*
  * Internal atomic flags for request handling
index 3be881ec95ad06fa95ce0710dddb81f0cbef1c50..46b0a1d1d925703d32d5f796ea400f43542e0b3d 100644 (file)
@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue, false);
+                               __blk_run_queue(cfqd->queue);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue, false);
+               __blk_run_queue(cfqd->queue);
        }
 }
 
@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue, false);
+       __blk_run_queue(cfqd->queue);
        spin_unlock_irq(q->queue_lock);
 }
 
index 0cdb4e7ebab4e8eba1e7ca1514525c1120948e5b..6f6abc08bb565e61d63294e1a14f66ac6cda6a3d 100644 (file)
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                break;
 
        case ELEVATOR_INSERT_SORT_MERGE:
index 8210405031544e900bcc97f6a7d21c43a02923fb..7025593a58c89d39cb3ff260801b242730d8fdf2 100644 (file)
@@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int amba_pm_freeze(struct device *dev)
 {
@@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define amba_pm_freeze         NULL
 #define amba_pm_thaw           NULL
@@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev)
 #define amba_pm_poweroff_noirq NULL
 #define amba_pm_restore_noirq  NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM
 
index f051cfff18afe2e703bc6383d8b6277cdc652401..9e0e4fc24c46506e4adcfd6f4e429ad6ddced5f7 100644 (file)
@@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev)
 
        of_device_node_put(&pa->pdev.dev);
        kfree(pa->pdev.dev.platform_data);
+       kfree(pa->pdev.mfd_cell);
        kfree(pa->pdev.resource);
        kfree(pa);
 }
@@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int platform_pm_freeze(struct device *dev)
 {
@@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define platform_pm_freeze             NULL
 #define platform_pm_thaw               NULL
@@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev)
 #define platform_pm_poweroff_noirq     NULL
 #define platform_pm_restore_noirq      NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM_RUNTIME
 
index 052dc53eef388db918f2a0f6ce961be366691577..fbc5b6e7c59156556cf4d02b0970411e0844dd80 100644 (file)
@@ -233,7 +233,7 @@ static int pm_op(struct device *dev,
                }
                break;
 #endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
        case PM_EVENT_FREEZE:
        case PM_EVENT_QUIESCE:
                if (ops->freeze) {
@@ -260,7 +260,7 @@ static int pm_op(struct device *dev,
                        suspend_report_result(ops->restore, error);
                }
                break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
        default:
                error = -EINVAL;
        }
@@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev,
                }
                break;
 #endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
        case PM_EVENT_FREEZE:
        case PM_EVENT_QUIESCE:
                if (ops->freeze_noirq) {
@@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev,
                        suspend_report_result(ops->restore_noirq, error);
                }
                break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
        default:
                error = -EINVAL;
        }
index d77005849af857935fe270725f2ee0d08561d265..219d88a0eeae07c1e963c4997f6894fe79046d2a 100644 (file)
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
                cbq->callback(msg, nsp);
                kfree_skb(skb);
                cn_queue_release_callback(cbq);
+               err = 0;
        }
 
        return err;
index 6b396759e7f596f54eab9d9bd28c06ae13fe6887..8a781540590cdf1e76e79c74d137a647c02642f6 100644 (file)
@@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
        {}
 };
 
-static struct of_platform_driver fsldma_of_driver = {
+static struct platform_driver fsldma_of_driver = {
        .driver = {
                .name = "fsl-elo-dma",
                .owner = THIS_MODULE,
index 7f6f01a4b145ad00bc9c2b1fac7c704d67e145a4..0a775f7987c257237943879a1d08e5ac20f3cd14 100644 (file)
@@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
                reg_val |= (1 << nr);
        else
                reg_val &= ~(1 << nr);
+       iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
 
        mutex_unlock(&chip->lock);
 
index 583e92592073647481d52acf81cd1a520d0e9d85..7630ab7b9bec30cdad9628908d74813075413b65 100644 (file)
@@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
 
        ret = gpiochip_add(&chip->gpio_chip);
        if (ret)
-               goto out_failed;
+               goto out_failed_irq;
 
        if (pdata->setup) {
                ret = pdata->setup(client, chip->gpio_chip.base,
@@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
        i2c_set_clientdata(client, chip);
        return 0;
 
-out_failed:
+out_failed_irq:
        pca953x_irq_teardown(chip);
+out_failed:
        kfree(chip->dyn_pdata);
        kfree(chip);
        return ret;
index 2c6af87051030b1017cc5670fcbfd2bfddad61fa..f970a5f3585e03b5395b74f32f6103d21a67128d 100644 (file)
@@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
                reg_val |= (1 << nr);
        else
                reg_val &= ~(1 << nr);
+       iowrite32(reg_val, &chip->reg->po);
 
        mutex_unlock(&chip->lock);
 
index 38319a69bd0a9de978fc688bded38fa3dea90def..d6d58684712bc8b68cafb662316fe123e53b270d 100644 (file)
@@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
  * Sanity check for the adapter hardware - check the reaction of
  * the bus lines only if it seems to be idle.
  */
-static int test_bus(struct i2c_algo_bit_data *adap, char *name)
+static int test_bus(struct i2c_adapter *i2c_adap)
 {
-       int scl, sda;
+       struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+       const char *name = i2c_adap->name;
+       int scl, sda, ret;
+
+       if (adap->pre_xfer) {
+               ret = adap->pre_xfer(i2c_adap);
+               if (ret < 0)
+                       return -ENODEV;
+       }
 
        if (adap->getscl == NULL)
                pr_info("%s: Testing SDA only, SCL is not readable\n", name);
@@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name)
                       "while pulling SCL high!\n", name);
                goto bailout;
        }
+
+       if (adap->post_xfer)
+               adap->post_xfer(i2c_adap);
+
        pr_info("%s: Test OK\n", name);
        return 0;
 bailout:
        sdahi(adap);
        sclhi(adap);
+
+       if (adap->post_xfer)
+               adap->post_xfer(i2c_adap);
+
        return -ENODEV;
 }
 
@@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
        int ret;
 
        if (bit_test) {
-               ret = test_bus(bit_adap, adap->name);
+               ret = test_bus(adap);
                if (ret < 0)
                        return -ENODEV;
        }
index 70c30e6bce0b2efcfbdee8f90bfd620d62319bd9..9a58994ff7ea54bd46cf6b576e9e43b10e736b79 100644 (file)
@@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
 
        /* Let legacy drivers scan this bus for matching devices */
        if (driver->attach_adapter) {
-               dev_warn(&adap->dev, "attach_adapter method is deprecated\n");
+               dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
+                        driver->driver.name);
                dev_warn(&adap->dev, "Please use another way to instantiate "
                         "your i2c_client\n");
                /* We ignore the return code; if it fails, too bad */
@@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
 
        if (!driver->detach_adapter)
                return 0;
-       dev_warn(&adapter->dev, "detach_adapter method is deprecated\n");
+       dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n",
+                driver->driver.name);
        res = driver->detach_adapter(adapter);
        if (res)
                dev_err(&adapter->dev, "detach_adapter failed (%d) "
index 7f42d3a454d2d6aaebdc41e0e5a8c6700d6a00df..88d8e4cb419a1d0d6cc17828894dc4d3b67e0201 100644 (file)
@@ -39,13 +39,13 @@ struct evdev {
 };
 
 struct evdev_client {
-       int head;
-       int tail;
+       unsigned int head;
+       unsigned int tail;
        spinlock_t buffer_lock; /* protects access to buffer, head and tail */
        struct fasync_struct *fasync;
        struct evdev *evdev;
        struct list_head node;
-       int bufsize;
+       unsigned int bufsize;
        struct input_event buffer[];
 };
 
@@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex);
 static void evdev_pass_event(struct evdev_client *client,
                             struct input_event *event)
 {
-       /*
-        * Interrupts are disabled, just acquire the lock.
-        * Make sure we don't leave with the client buffer
-        * "empty" by having client->head == client->tail.
-        */
+       /* Interrupts are disabled, just acquire the lock. */
        spin_lock(&client->buffer_lock);
-       do {
-               client->buffer[client->head++] = *event;
-               client->head &= client->bufsize - 1;
-       } while (client->head == client->tail);
+
+       client->buffer[client->head++] = *event;
+       client->head &= client->bufsize - 1;
+
+       if (unlikely(client->head == client->tail)) {
+               /*
+                * This effectively "drops" all unconsumed events, leaving
+                * EV_SYN/SYN_DROPPED plus the newest event in the queue.
+                */
+               client->tail = (client->head - 2) & (client->bufsize - 1);
+
+               client->buffer[client->tail].time = event->time;
+               client->buffer[client->tail].type = EV_SYN;
+               client->buffer[client->tail].code = SYN_DROPPED;
+               client->buffer[client->tail].value = 0;
+       }
+
        spin_unlock(&client->buffer_lock);
 
        if (event->type == EV_SYN)
index d6e8bd8a851c26d0e4718d10d30977451bb28009..ebbceedc92f4fb8a9f8bc959734be4b442586d01 100644 (file)
@@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
 }
 EXPORT_SYMBOL(input_set_capability);
 
+static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
+{
+       int mt_slots;
+       int i;
+       unsigned int events;
+
+       if (dev->mtsize) {
+               mt_slots = dev->mtsize;
+       } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
+               mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
+                          dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
+               clamp(mt_slots, 2, 32);
+       } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+               mt_slots = 2;
+       } else {
+               mt_slots = 0;
+       }
+
+       events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
+
+       for (i = 0; i < ABS_CNT; i++) {
+               if (test_bit(i, dev->absbit)) {
+                       if (input_is_mt_axis(i))
+                               events += mt_slots;
+                       else
+                               events++;
+               }
+       }
+
+       for (i = 0; i < REL_CNT; i++)
+               if (test_bit(i, dev->relbit))
+                       events++;
+
+       return events;
+}
+
 #define INPUT_CLEANSE_BITMASK(dev, type, bits)                         \
        do {                                                            \
                if (!test_bit(EV_##type, dev->evbit))                   \
@@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev)
        /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
        input_cleanse_bitmasks(dev);
 
+       if (!dev->hint_events_per_packet)
+               dev->hint_events_per_packet =
+                               input_estimate_events_per_packet(dev);
+
        /*
         * If delay and period are pre-set by the driver, then autorepeating
         * is handled by the driver itself and we don't do it in input.c.
index 09bef79d9da1c6e3aecd6efe40c0d4c4c900ba0b..a26922cf0e84d3049d972b7dc0d79b986b3645fb 100644 (file)
@@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
 static int __devinit twl4030_kp_probe(struct platform_device *pdev)
 {
        struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
-       const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
+       const struct matrix_keymap_data *keymap_data;
        struct twl4030_keypad *kp;
        struct input_dev *input;
        u8 reg;
        int error;
 
-       if (!pdata || !pdata->rows || !pdata->cols ||
+       if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
            pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
                dev_err(&pdev->dev, "Invalid platform_data\n");
                return -EINVAL;
        }
 
+       keymap_data = pdata->keymap_data;
+
        kp = kzalloc(sizeof(*kp), GFP_KERNEL);
        input = input_allocate_device();
        if (!kp || !input) {
index 7077f9bf5ead88a15eb65699e71d00e49715fc84..62bae99424e6e436defda3723654816c44be2609 100644 (file)
@@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
                                   enum xenbus_state backend_state)
 {
        struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
-       int val;
+       int ret, val;
 
        switch (backend_state) {
        case XenbusStateInitialising:
@@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
 
        case XenbusStateInitWait:
 InitWait:
+               ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                                  "feature-abs-pointer", "%d", &val);
+               if (ret < 0)
+                       val = 0;
+               if (val) {
+                       ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
+                                           "request-abs-pointer", "1");
+                       if (ret)
+                               pr_warning("xenkbd: can't request abs-pointer");
+               }
+
                xenbus_switch_state(dev, XenbusStateConnected);
                break;
 
index efa06882de00cb855d992f868079338067813d8c..45f93d0f5592e2c3adcdcfdad32ce83696312647 100644 (file)
@@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
                        IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) {
                printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
                err = -EBUSY;
-               goto fail2;
+               goto fail1;
        }
 
        if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
                        IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) {
                printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
                err = -EBUSY;
-               goto fail3;
+               goto fail2;
        }
 
        serio_set_drvdata(serio, ts);
 
        err = serio_open(serio, drv);
        if (err)
-               return err;
+               goto fail3;
 
        //h3600_flite_control(1, 25);     /* default brightness */
-       input_register_device(ts->dev);
+       err = input_register_device(ts->dev);
+       if (err)
+               goto fail4;
 
        return 0;
 
-fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
+fail4: serio_close(serio);
+fail3: serio_set_drvdata(serio, NULL);
+       free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
 fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: serio_set_drvdata(serio, NULL);
-       input_free_device(input_dev);
+fail1: input_free_device(input_dev);
        kfree(ts);
        return err;
 }
index 3790816643bef85bee2dd1824bcb29818a693f86..8497f56f8e461cf70d8d1b8692e04b11e353323c 100644 (file)
@@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
        led->cdev.flags |= LED_CORE_SUSPENDRESUME;
        led->vcc = vcc;
 
+       /* to handle correctly an already enabled regulator */
+       if (regulator_is_enabled(led->vcc))
+               led->enabled = 1;
+
        mutex_init(&led->mutex);
        INIT_WORK(&led->work, led_work);
 
index 5ef136cdba91dd4a891f00920029962a54185d63..e5d8904fc8f647162d4a7d79150491797cfda6d2 100644 (file)
@@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
        return md_raid5_congested(&rs->md, bits);
 }
 
-static void raid_unplug(struct dm_target_callbacks *cb)
-{
-       struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
-
-       md_raid5_kick_device(rs->md.private);
-}
-
 /*
  * Construct a RAID4/5/6 mapping:
  * Args:
@@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
 
        rs->callbacks.congested_fn = raid_is_congested;
-       rs->callbacks.unplug_fn = raid_unplug;
        dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
        return 0;
index b12b3776c0c0c8ca46b998cac6df78845363f7ee..6e853c61d87eee47d66ddb7a510270f1c4cb2cbc 100644 (file)
@@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request);
 
 /* Support for plugging.
  * This mirrors the plugging support in request_queue, but does not
- * require having a whole queue
+ * require having a whole queue or request structures.
+ * We allocate an md_plug_cb for each md device and each thread it gets
+ * plugged on.  This links tot the private plug_handle structure in the
+ * personality data where we keep a count of the number of outstanding
+ * plugs so other code can see if a plug is active.
  */
-static void plugger_work(struct work_struct *work)
-{
-       struct plug_handle *plug =
-               container_of(work, struct plug_handle, unplug_work);
-       plug->unplug_fn(plug);
-}
-static void plugger_timeout(unsigned long data)
-{
-       struct plug_handle *plug = (void *)data;
-       kblockd_schedule_work(NULL, &plug->unplug_work);
-}
-void plugger_init(struct plug_handle *plug,
-                 void (*unplug_fn)(struct plug_handle *))
-{
-       plug->unplug_flag = 0;
-       plug->unplug_fn = unplug_fn;
-       init_timer(&plug->unplug_timer);
-       plug->unplug_timer.function = plugger_timeout;
-       plug->unplug_timer.data = (unsigned long)plug;
-       INIT_WORK(&plug->unplug_work, plugger_work);
-}
-EXPORT_SYMBOL_GPL(plugger_init);
+struct md_plug_cb {
+       struct blk_plug_cb cb;
+       mddev_t *mddev;
+};
 
-void plugger_set_plug(struct plug_handle *plug)
+static void plugger_unplug(struct blk_plug_cb *cb)
 {
-       if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
-               mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
+       struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
+       if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
+               md_wakeup_thread(mdcb->mddev->thread);
+       kfree(mdcb);
 }
-EXPORT_SYMBOL_GPL(plugger_set_plug);
 
-int plugger_remove_plug(struct plug_handle *plug)
+/* Check that an unplug wakeup will come shortly.
+ * If not, wakeup the md thread immediately
+ */
+int mddev_check_plugged(mddev_t *mddev)
 {
-       if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
-               del_timer(&plug->unplug_timer);
-               return 1;
-       } else
+       struct blk_plug *plug = current->plug;
+       struct md_plug_cb *mdcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
+               if (mdcb->cb.callback == plugger_unplug &&
+                   mdcb->mddev == mddev) {
+                       /* Already on the list, move to top */
+                       if (mdcb != list_first_entry(&plug->cb_list,
+                                                   struct md_plug_cb,
+                                                   cb.list))
+                               list_move(&mdcb->cb.list, &plug->cb_list);
+                       return 1;
+               }
+       }
+       /* Not currently on the callback list */
+       mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
+       if (!mdcb)
                return 0;
-}
-EXPORT_SYMBOL_GPL(plugger_remove_plug);
 
+       mdcb->mddev = mddev;
+       mdcb->cb.callback = plugger_unplug;
+       atomic_inc(&mddev->plug_cnt);
+       list_add(&mdcb->cb.list, &plug->cb_list);
+       return 1;
+}
+EXPORT_SYMBOL_GPL(mddev_check_plugged);
 
 static inline mddev_t *mddev_get(mddev_t *mddev)
 {
@@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev)
        atomic_set(&mddev->active, 1);
        atomic_set(&mddev->openers, 0);
        atomic_set(&mddev->active_io, 0);
+       atomic_set(&mddev->plug_cnt, 0);
        spin_lock_init(&mddev->write_lock);
        atomic_set(&mddev->flush_pending, 0);
        init_waitqueue_head(&mddev->sb_wait);
@@ -4723,7 +4735,6 @@ static void md_clean(mddev_t *mddev)
        mddev->bitmap_info.chunksize = 0;
        mddev->bitmap_info.daemon_sleep = 0;
        mddev->bitmap_info.max_write_behind = 0;
-       mddev->plug = NULL;
 }
 
 static void __md_stop_writes(mddev_t *mddev)
@@ -6688,12 +6699,6 @@ int md_allow_write(mddev_t *mddev)
 }
 EXPORT_SYMBOL_GPL(md_allow_write);
 
-void md_unplug(mddev_t *mddev)
-{
-       if (mddev->plug)
-               mddev->plug->unplug_fn(mddev->plug);
-}
-
 #define SYNC_MARKS     10
 #define        SYNC_MARK_STEP  (3*HZ)
 void md_do_sync(mddev_t *mddev)
index 52b407369e13f0c82b7fa23f3ee2ca7d41483bbb..0b1fd3f1d85b5decfaaba4ab8b70595fffbaba1f 100644 (file)
 typedef struct mddev_s mddev_t;
 typedef struct mdk_rdev_s mdk_rdev_t;
 
-/* generic plugging support - like that provided with request_queue,
- * but does not require a request_queue
- */
-struct plug_handle {
-       void                    (*unplug_fn)(struct plug_handle *);
-       struct timer_list       unplug_timer;
-       struct work_struct      unplug_work;
-       unsigned long           unplug_flag;
-};
-#define        PLUGGED_FLAG 1
-void plugger_init(struct plug_handle *plug,
-                 void (*unplug_fn)(struct plug_handle *));
-void plugger_set_plug(struct plug_handle *plug);
-int plugger_remove_plug(struct plug_handle *plug);
-static inline void plugger_flush(struct plug_handle *plug)
-{
-       del_timer_sync(&plug->unplug_timer);
-       cancel_work_sync(&plug->unplug_work);
-}
-
 /*
  * MD's 'extended' device
  */
@@ -199,6 +179,9 @@ struct mddev_s
        int                             delta_disks, new_level, new_layout;
        int                             new_chunk_sectors;
 
+       atomic_t                        plug_cnt;       /* If device is expecting
+                                                        * more bios soon.
+                                                        */
        struct mdk_thread_s             *thread;        /* management thread */
        struct mdk_thread_s             *sync_thread;   /* doing resync or reconstruct */
        sector_t                        curr_resync;    /* last block scheduled */
@@ -336,7 +319,6 @@ struct mddev_s
        struct list_head                all_mddevs;
 
        struct attribute_group          *to_remove;
-       struct plug_handle              *plug; /* if used by personality */
 
        struct bio_set                  *bio_set;
 
@@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev);
 extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
 extern void restore_bitmap_write_access(struct file *file);
-extern void md_unplug(mddev_t *mddev);
 
 extern void mddev_init(mddev_t *mddev);
 extern int md_run(mddev_t *mddev);
@@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
                                   mddev_t *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
                                   mddev_t *mddev);
+extern int mddev_check_plugged(mddev_t *mddev);
 #endif /* _MD_MD_H */
index c2a21ae56d977d6249f9c8501f124e50c48f15dd..2b7a7ff401dc6fba50047bbc3f0fb1980055da13 100644 (file)
@@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf)
                spin_unlock_irq(&conf->device_lock);
 }
 
-static void md_kick_device(mddev_t *mddev)
-{
-       blk_flush_plug(current);
-       md_wakeup_thread(mddev->thread);
-}
-
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf)
 
        /* Wait until no block IO is waiting */
        wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        /* block any new IO from starting */
        conf->barrier++;
@@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf)
        /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   md_kick_device(conf->mddev));
+                                   );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf)
        wait_event_lock_irq(conf->wait_barrier,
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           ({ flush_pending_writes(conf);
-                              md_kick_device(conf->mddev); }));
+                           flush_pending_writes(conf));
        spin_unlock_irq(&conf->resync_lock);
 }
 static void unfreeze_array(conf_t *conf)
@@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        mdk_rdev_t *blocked_rdev;
+       int plugged;
 
        /*
         * Register the new request and wait if the reconstruction
@@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
         */
+       plugged = mddev_check_plugged(mddev);
+
        disks = conf->raid_disks;
  retry_write:
        blocked_rdev = NULL;
@@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync || !bitmap)
+       if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
        return 0;
@@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev)
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        mdk_rdev_t *rdev;
+       struct blk_plug plug;
 
        md_check_recovery(mddev);
-       
+
+       blk_start_plug(&plug);
        for (;;) {
                char b[BDEVNAME_SIZE];
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev)
                }
                cond_resched();
        }
+       blk_finish_plug(&plug);
 }
 
 
@@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev)
 
        md_unregister_thread(mddev->thread);
        mddev->thread = NULL;
-       blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
        if (conf->r1bio_pool)
                mempool_destroy(conf->r1bio_pool);
        kfree(conf->mirrors);
index 2da83d5665925eea27b441e954ca145569278055..8e9462626ec5cb8d7441b46ddae9a848772153fe 100644 (file)
@@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf)
                spin_unlock_irq(&conf->device_lock);
 }
 
-static void md_kick_device(mddev_t *mddev)
-{
-       blk_flush_plug(current);
-       md_wakeup_thread(mddev->thread);
-}
-
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force)
 
        /* Wait until no block IO is waiting (unless 'force') */
        wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        /* block any new IO from starting */
        conf->barrier++;
 
-       /* No wait for all pending IO to complete */
+       /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   md_kick_device(conf->mddev));
+                                   );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf)
        wait_event_lock_irq(conf->wait_barrier,
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           ({ flush_pending_writes(conf);
-                              md_kick_device(conf->mddev); }));
+                           flush_pending_writes(conf));
+
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
+       int plugged;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
@@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
         */
+       plugged = mddev_check_plugged(mddev);
+
        raid10_find_phys(conf, r10_bio);
  retry_write:
        blocked_rdev = NULL;
@@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync || !mddev->bitmap)
+       if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
-
        return 0;
 }
 
@@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev)
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        mdk_rdev_t *rdev;
+       struct blk_plug plug;
 
        md_check_recovery(mddev);
 
+       blk_start_plug(&plug);
        for (;;) {
                char b[BDEVNAME_SIZE];
 
@@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev)
                }
                cond_resched();
        }
+       blk_finish_plug(&plug);
 }
 
 
index e867ee42b15239707c0dfede4be71d2bc9a72e20..f301e6ae220cfa964941d2a13546f3cfde7b62a5 100644 (file)
  *
  * We group bitmap updates into batches.  Each batch has a number.
  * We may write out several batches at once, but that isn't very important.
- * conf->bm_write is the number of the last batch successfully written.
- * conf->bm_flush is the number of the last batch that was closed to
+ * conf->seq_write is the number of the last batch successfully written.
+ * conf->seq_flush is the number of the last batch that was closed to
  *    new additions.
  * When we discover that we will need to write to any block in a stripe
  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
- * the number of the batch it will be in. This is bm_flush+1.
+ * the number of the batch it will be in. This is seq_flush+1.
  * When we are ready to do a write, if that batch hasn't been written yet,
  *   we plug the array and queue the stripe for later.
  * When an unplug happens, we increment bm_flush, thus closing the current
@@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state)) {
+                       if (test_bit(STRIPE_DELAYED, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
-                               plugger_set_plug(&conf->plug);
-                       } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
-                                  sh->bm_seq - conf->seq_write > 0) {
+                       else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+                                  sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
-                               plugger_set_plug(&conf->plug);
-                       } else {
+                       else {
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
                                                     < (conf->max_nr_stripes *3/4)
                                                     || !conf->inactive_blocked),
                                                    conf->device_lock,
-                                                   md_raid5_kick_device(conf));
+                                                   );
                                conf->inactive_blocked = 0;
                        } else
                                init_stripe(sh, sector, previous);
@@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
                wait_event_lock_irq(conf->wait_for_stripe,
                                    !list_empty(&conf->inactive_list),
                                    conf->device_lock,
-                                   blk_flush_plug(current));
+                                   );
                osh = get_free_stripe(conf);
                spin_unlock_irq(&conf->device_lock);
                atomic_set(&nsh->count, 1);
@@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
                                atomic_inc(&conf->preread_active_stripes);
                        list_add_tail(&sh->lru, &conf->hold_list);
                }
-       } else
-               plugger_set_plug(&conf->plug);
+       }
 }
 
 static void activate_bit_delay(raid5_conf_t *conf)
@@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf)
        }
 }
 
-void md_raid5_kick_device(raid5_conf_t *conf)
-{
-       blk_flush_plug(current);
-       raid5_activate_delayed(conf);
-       md_wakeup_thread(conf->mddev->thread);
-}
-EXPORT_SYMBOL_GPL(md_raid5_kick_device);
-
-static void raid5_unplug(struct plug_handle *plug)
-{
-       raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
-
-       md_raid5_kick_device(conf);
-}
-
 int md_raid5_congested(mddev_t *mddev, int bits)
 {
        raid5_conf_t *conf = mddev->private;
@@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
+       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
+       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int disks, data_disks;
@@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
                                 * add failed due to overlap.  Flush everything
                                 * and wait a while
                                 */
-                               md_raid5_kick_device(conf);
+                               md_wakeup_thread(mddev->thread);
                                release_stripe(sh);
                                schedule();
                                goto retry;
@@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi)
                }
                        
        }
+       if (!plugged)
+               md_wakeup_thread(mddev->thread);
+
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
        spin_unlock_irq(&conf->device_lock);
@@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev)
        struct stripe_head *sh;
        raid5_conf_t *conf = mddev->private;
        int handled;
+       struct blk_plug plug;
 
        pr_debug("+++ raid5d active\n");
 
        md_check_recovery(mddev);
 
+       blk_start_plug(&plug);
        handled = 0;
        spin_lock_irq(&conf->device_lock);
        while (1) {
                struct bio *bio;
 
-               if (conf->seq_flush != conf->seq_write) {
-                       int seq = conf->seq_flush;
+               if (atomic_read(&mddev->plug_cnt) == 0 &&
+                   !list_empty(&conf->bitmap_list)) {
+                       /* Now is a good time to flush some bitmap updates */
+                       conf->seq_flush++;
                        spin_unlock_irq(&conf->device_lock);
                        bitmap_unplug(mddev->bitmap);
                        spin_lock_irq(&conf->device_lock);
-                       conf->seq_write = seq;
+                       conf->seq_write = conf->seq_flush;
                        activate_bit_delay(conf);
                }
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       raid5_activate_delayed(conf);
 
                while ((bio = remove_bio_from_retry(conf))) {
                        int ok;
@@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev)
        spin_unlock_irq(&conf->device_lock);
 
        async_tx_issue_pending_all();
+       blk_finish_plug(&plug);
 
        pr_debug("--- raid5d inactive\n");
 }
@@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev)
                       mdname(mddev));
        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 
-       plugger_init(&conf->plug, raid5_unplug);
-       mddev->plug = &conf->plug;
        if (mddev->queue) {
                int chunk_size;
                /* read-ahead size must cover two whole stripes, which
@@ -5192,7 +5184,6 @@ static int stop(mddev_t *mddev)
        mddev->thread = NULL;
        if (mddev->queue)
                mddev->queue->backing_dev_info.congested_fn = NULL;
-       plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
        free_conf(conf);
        mddev->private = NULL;
        mddev->to_remove = &raid5_attrs_group;
index 8d563a4f022a778a6d9354a3c1bf514c6f5e10b6..3ca77a2613ba0a544ffa9c4aca4e12122d77cc96 100644 (file)
@@ -400,8 +400,6 @@ struct raid5_private_data {
                                            * Cleared when a sync completes.
                                            */
 
-       struct plug_handle      plug;
-
        /* per cpu variables */
        struct raid5_percpu {
                struct page     *spare_page; /* Used when checking P/Q in raid6 */
index c4742fc15529a97cf4472dde06717f9ef7d31233..c9691115f2d26787fa9d85ae98382539d78a8a98 100644 (file)
@@ -300,7 +300,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        retval = remap_pfn_range(vma, vma->vm_start,
-                                PFN_DOWN(virt_to_phys(mem->vaddr)),
+                                mem->dma_handle >> PAGE_SHIFT,
                                 size, vma->vm_page_prot);
        if (retval) {
                dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
index d01574d98870a0473b56be995cd0341786b9b767..f4c8c844b913060c6e0bb12f35db8cba983a81dc 100644 (file)
@@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev)
 }
 EXPORT_SYMBOL(mfd_cell_disable);
 
+static int mfd_platform_add_cell(struct platform_device *pdev,
+                                const struct mfd_cell *cell)
+{
+       if (!cell)
+               return 0;
+
+       pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
+       if (!pdev->mfd_cell)
+               return -ENOMEM;
+
+       return 0;
+}
+
 static int mfd_add_device(struct device *parent, int id,
                          const struct mfd_cell *cell,
                          struct resource *mem_base,
@@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id,
 
        pdev->dev.parent = parent;
 
-       ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+       ret = mfd_platform_add_cell(pdev, cell);
        if (ret)
                goto fail_res;
 
@@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id,
 
        return 0;
 
-/*     platform_device_del(pdev); */
 fail_res:
        kfree(res);
 fail_device:
index 20e4e9395b61ca1c6d85b09511b88f535a8ec120..ecafa4ba238b3ca4991a20dc5e6bb88458aee8de 100644 (file)
@@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
 
 static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
 
-static void gru_noop(unsigned int irq)
+static void gru_noop(struct irq_data *d)
 {
 }
 
 static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
        [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
-               .mask           = gru_noop,
-               .unmask         = gru_noop,
-               .ack            = gru_noop
+               .irq_mask       = gru_noop,
+               .irq_unmask     = gru_noop,
+               .irq_ack        = gru_noop
        }
 };
 
index 237913c5c92c348f70d9d4f160188402fbb48acf..fed215c4cfa19a3676666dbef17c1e4ce8b8d8bc 100644 (file)
@@ -1452,7 +1452,7 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
        oinfo = mtd->ecclayout;
        if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) {
                printk(KERN_ERR "%s: Not enough free bytes in OOB, "
-                       "%d available, %lu needed.\n",
+                       "%d available, %zu needed.\n",
                        MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
                return;
        }
index 6fae04b3fc6da19a1adad5c261157b62e89fe4b2..950646aa4c4b004a174c1951f08397ab808956c0 100644 (file)
@@ -209,22 +209,8 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
        int err = -EIO;
        enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 
-       if (buf >= high_memory) {
-               struct page *pg;
-
-               if (((size_t)buf & PAGE_MASK) !=
-                   ((size_t)(buf + len - 1) & PAGE_MASK)) {
-                       dev_warn(host->dev, "Buffer not fit in one page\n");
-                       goto err_buf;
-               }
-
-               pg = vmalloc_to_page(buf);
-               if (pg == 0) {
-                       dev_err(host->dev, "Failed to vmalloc_to_page\n");
-                       goto err_buf;
-               }
-               p = page_address(pg) + ((size_t)buf & ~PAGE_MASK);
-       }
+       if (buf >= high_memory)
+               goto err_buf;
 
        dma_dev = host->dma_chan->device;
 
@@ -280,7 +266,8 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
        struct nand_chip *chip = mtd->priv;
        struct atmel_nand_host *host = chip->priv;
 
-       if (use_dma && len >= mtd->oobsize)
+       if (use_dma && len > mtd->oobsize)
+               /* only use DMA for bigger than oob size: better performances */
                if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
                        return;
 
@@ -295,7 +282,8 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
        struct nand_chip *chip = mtd->priv;
        struct atmel_nand_host *host = chip->priv;
 
-       if (use_dma && len >= mtd->oobsize)
+       if (use_dma && len > mtd->oobsize)
+               /* only use DMA for bigger than oob size: better performances */
                if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
                        return;
 
@@ -599,7 +587,10 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
                nand_chip->options |= NAND_USE_FLASH_BBT;
        }
 
-       if (cpu_has_dma() && use_dma) {
+       if (!cpu_has_dma())
+               use_dma = 0;
+
+       if (use_dma) {
                dma_cap_mask_t mask;
 
                dma_cap_zero(mask);
@@ -611,7 +602,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
                }
        }
        if (use_dma)
-               dev_info(host->dev, "Using DMA for NAND access.\n");
+               dev_info(host->dev, "Using %s for DMA transfers.\n",
+                                       dma_chan_name(host->dma_chan));
        else
                dev_info(host->dev, "No DMA support for NAND access.\n");
 
index f803c58b941d7196f4ce53d62cf6c5a0a63d33d1..66823eded7a3b7d4ffa7525728d8c587f1628275 100644 (file)
@@ -154,7 +154,7 @@ struct be_eq_obj {
        u16 min_eqd;            /* in usecs */
        u16 max_eqd;            /* in usecs */
        u16 cur_eqd;            /* in usecs */
-       u8  msix_vec_idx;
+       u8  eq_idx;
 
        struct napi_struct napi;
 };
@@ -291,7 +291,7 @@ struct be_adapter {
        u32 num_rx_qs;
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
-       u8 msix_vec_next_idx;
+       u8 eq_next_idx;
        struct be_drv_stats drv_stats;
 
        struct vlan_group *vlan_grp;
index 9a54c8b24ff9d09ed336f70459115d0d2a2e21db..7cb5a114c7338af0c584607c7bcd1e33dba4c0e2 100644 (file)
@@ -1497,7 +1497,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
        if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
                goto tx_eq_free;
 
-       adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+       adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
 
 
        /* Alloc TX eth compl queue */
@@ -1590,7 +1590,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
                if (rc)
                        goto err;
 
-               rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+               rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
 
                /* CQ */
                cq = &rxo->cq;
@@ -1666,11 +1666,11 @@ static irqreturn_t be_intx(int irq, void *dev)
                if (!isr)
                        return IRQ_NONE;
 
-               if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+               if ((1 << adapter->tx_eq.eq_idx & isr))
                        event_handle(adapter, &adapter->tx_eq);
 
                for_all_rx_queues(adapter, rxo, i) {
-                       if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+                       if ((1 << rxo->rx_eq.eq_idx & isr))
                                event_handle(adapter, &rxo->rx_eq);
                }
        }
@@ -1951,7 +1951,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
 static inline int be_msix_vec_get(struct be_adapter *adapter,
                                        struct be_eq_obj *eq_obj)
 {
-       return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
+       return adapter->msix_entries[eq_obj->eq_idx].vector;
 }
 
 static int be_request_irq(struct be_adapter *adapter,
@@ -2345,6 +2345,7 @@ static int be_clear(struct be_adapter *adapter)
        be_mcc_queues_destroy(adapter);
        be_rx_queues_destroy(adapter);
        be_tx_queues_destroy(adapter);
+       adapter->eq_next_idx = 0;
 
        if (be_physfn(adapter) && adapter->sriov_enabled)
                for (vf = 0; vf < num_vfs; vf++)
@@ -3141,12 +3142,14 @@ static int be_resume(struct pci_dev *pdev)
 static void be_shutdown(struct pci_dev *pdev)
 {
        struct be_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev =  adapter->netdev;
 
-       if (netif_running(netdev))
+       if (!adapter)
+               return;
+
+       if (netif_running(adapter->netdev))
                cancel_delayed_work_sync(&adapter->work);
 
-       netif_device_detach(netdev);
+       netif_device_detach(adapter->netdev);
 
        be_cmd_reset_function(adapter);
 
index 34933cb9569ffc070692d8031a8c50eb32b07111..7581518ecfa2eade4c4bb4cba60601e5f39b7be2 100644 (file)
@@ -38,6 +38,8 @@
 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
 #define bfa_ioc_notify_fail(__ioc)                     \
                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc)               \
+                       ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
 #define bfa_ioc_sync_join(__ioc)                       \
                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
 #define bfa_ioc_sync_leave(__ioc)                      \
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
-                       if (bfa_ioc_sync_complete(ioc)) {
+                       if (bfa_ioc_sync_start(ioc)) {
                                iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
  * execution context (driver/bios) must match.
  */
 static bool
-bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
 {
        struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
 
@@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
        if (fwhdr.signature != drv_fwhdr->signature)
                return false;
 
-       if (fwhdr.exec != drv_fwhdr->exec)
+       if (swab32(fwhdr.param) != boot_env)
                return false;
 
        return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
 {
        enum bfi_ioc_state ioc_fwstate;
        bool fwvalid;
+       u32 boot_env;
 
        ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 
+       boot_env = BFI_BOOT_LOADER_OS;
+
        if (force)
                ioc_fwstate = BFI_IOC_UNINIT;
 
@@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
         * check if firmware is valid
         */
        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
-               false : bfa_ioc_fwver_valid(ioc);
+               false : bfa_ioc_fwver_valid(ioc, boot_env);
 
        if (!fwvalid) {
-               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
                return;
        }
 
@@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
        /**
         * Initialize the h/w for any other states.
         */
-       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
 }
 
 void
@@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
  */
 static void
 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
-                   u32 boot_param)
+                   u32 boot_env)
 {
        u32 *fwimg;
        u32 pgnum, pgoff;
@@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
        /*
         * Set boot type and boot param at the end.
        */
-       writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+       writel(boot_type, ((ioc->ioc_regs.smem_page_start)
                        + (BFI_BOOT_TYPE_OFF)));
-       writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
-                       + (BFI_BOOT_PARAM_OFF)));
+       writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+                       + (BFI_BOOT_LOADER_OFF)));
 }
 
 static void
@@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
  * as the entry vector.
  */
 static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
 {
        void __iomem *rb;
 
@@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
         * Initialize IOC state of all functions on a chip reset.
         */
        rb = ioc->pcidev.pci_bar_kva;
-       if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+       if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
        } else {
@@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
        }
 
        bfa_ioc_msgflush(ioc);
-       bfa_ioc_download_fw(ioc, boot_type, boot_param);
+       bfa_ioc_download_fw(ioc, boot_type, boot_env);
 
        /**
         * Enable interrupts just before starting LPU
@@ -2219,13 +2224,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
 static void
 bfa_ioc_recover(struct bfa_ioc *ioc)
 {
-       u16 bdf;
-
-       bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
-                                       ioc->pcidev.device_id);
-
-       pr_crit("Firmware heartbeat failure at %d", bdf);
-       BUG_ON(1);
+       pr_crit("Heart Beat of IOC has failed\n");
+       bfa_ioc_stats(ioc, ioc_hbfails);
+       bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
 static void
diff --git a/drivers/ne