]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
Merge branch 'staging-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Nov 2010 01:14:20 +0000 (17:14 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Nov 2010 01:14:20 +0000 (17:14 -0800)
* 'staging-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (38 commits)
  Revert "staging: tidspbridge: replace iommu custom for opensource implementation"
  Revert "staging: tidspbridge - move shared memory iommu maps to tiomap3430.c"
  Revert "staging: tidspbridge - rename bridge_brd_mem_map/unmap to a proper name"
  Revert "staging: tidspbridge - remove custom mmu code from tiomap3430.c"
  Revert "staging: tidspbridge - fix mmufault support"
  Revert "staging: tidspbridge - remove hw directory"
  Revert "staging: tidspbridge - move all iommu related code to a new file"
  Revert "staging: tidspbridge: remove dw_dmmu_base from cfg_hostres struct"
  Revert "staging: tidspbridge - remove reserved memory clean up"
  Revert "staging: tidspbridge - deprecate reserve/unreserve_memory funtions"
  Revert "staging: tidspbridge - remove dmm custom module"
  Revert "staging: tidspbridge - update Kconfig to select IOMMU module"
  staging: tidspbridge: hardcode SCM macros while fix is upstreamed
  Staging: keucr driver: fix uninitialized variable & proper memset length
  omap: dsp: remove shm from normal memory
  Staging: wlan-ng: Fix wrong #ifdef #endif sequence
  Staging: Update parameters for cfg80211 key management operation
  Staging: ath6kl: Fix pointer casts on 64-bit architectures
  Staging: batman-adv: suppress false warning when changing the mac address
  Staging: batman-adv: fix interface alternating and bonding reggression
  ...

250 files changed:
Documentation/ABI/obsolete/proc-pid-oom_adj [new file with mode: 0644]
Documentation/block/switching-sched.txt
Documentation/filesystems/xfs-delayed-logging-design.txt
Documentation/kernel-parameters.txt
Documentation/leds-class.txt
Documentation/leds/leds-lp5521.txt [new file with mode: 0644]
Documentation/leds/leds-lp5523.txt [new file with mode: 0644]
Documentation/rbtree.txt
Documentation/sysctl/kernel.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/common/gic.c
arch/arm/include/asm/hardware/it8152.h
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/stacktrace.c
arch/arm/kernel/traps.c
arch/arm/kernel/unwind.c
arch/arm/mach-ep93xx/include/mach/dma.h
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/d2net_v2-setup.c
arch/arm/mach-kirkwood/lacie_v2-common.c
arch/arm/mach-kirkwood/lacie_v2-common.h
arch/arm/mach-kirkwood/mpp.c
arch/arm/mach-kirkwood/netspace_v2-setup.c
arch/arm/mach-kirkwood/netxbig_v2-setup.c
arch/arm/mach-kirkwood/ts41x-setup.c
arch/arm/mach-mmp/include/mach/cputype.h
arch/arm/mach-mv78xx0/mpp.c
arch/arm/mach-orion5x/mpp.c
arch/arm/mach-orion5x/ts78xx-setup.c
arch/arm/mach-pxa/cm-x2xx.c
arch/arm/mach-pxa/saar.c
arch/arm/mach-vexpress/ct-ca9x4.c
arch/arm/mm/dma-mapping.c
arch/arm/plat-orion/include/plat/pcie.h
arch/arm/plat-orion/pcie.c
arch/um/include/asm/ptrace-generic.h
arch/um/kernel/ptrace.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/uv/uv_mmrs.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/mmconf-fam10h_64.c
arch/x86/kernel/pvclock.c
arch/x86/mm/tlb.c
arch/x86/pci/xen.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/xen/mmu.c
arch/x86/xen/setup.c
block/blk-core.c
block/blk-ioc.c
block/blk-map.c
block/compat_ioctl.c
block/elevator.c
block/ioctl.c
block/scsi_ioctl.c
crypto/pcrypt.c
drivers/block/aoe/aoeblk.c
drivers/block/cciss.c
drivers/block/cciss.h
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_proc.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/loop.c
drivers/block/xen-blkfront.c
drivers/char/agp/intel-gtt.c
drivers/char/amiserial.c
drivers/char/nozomi.c
drivers/char/pcmcia/synclink_cs.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_manager.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/via/via_dmablit.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/stub/Kconfig
drivers/hwmon/ad7414.c
drivers/hwmon/adt7470.c
drivers/hwmon/gpio-fan.c
drivers/input/input.c
drivers/input/keyboard/adp5588-keys.c
drivers/input/keyboard/atkbd.c
drivers/input/misc/pcf8574_keypad.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/acecad.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-class.c
drivers/leds/led-triggers.c
drivers/leds/leds-gpio.c
drivers/leds/leds-lp5521.c [new file with mode: 0644]
drivers/leds/leds-lp5523.c [new file with mode: 0644]
drivers/leds/ledtrig-timer.c
drivers/macintosh/adb-iop.c
drivers/md/md.c
drivers/misc/apds9802als.c
drivers/misc/bh1770glc.c
drivers/misc/isl29020.c
drivers/net/wireless/rt2x00/Kconfig
drivers/pci/xen-pcifront.c
drivers/rapidio/rio.c
drivers/scsi/scsi_error.c
drivers/serial/8250.c
drivers/serial/8250_pci.c
drivers/serial/bfin_5xx.c
drivers/serial/kgdboc.c
drivers/tty/n_gsm.c
drivers/tty/tty_buffer.c
drivers/tty/tty_ldisc.c
drivers/tty/vt/vc_screen.c
drivers/usb/core/devio.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/goku_udc.h
drivers/usb/gadget/u_serial.c
drivers/usb/host/Kconfig
drivers/usb/host/ehci-mxc.c
drivers/usb/host/ohci-jz4740.c
drivers/usb/misc/iowarrior.c
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/musb/blackfin.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_regs.h
drivers/usb/musb/musbhsdma.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/storage/uas.c
drivers/uwb/allocator.c
drivers/video/backlight/adp8860_bl.c
drivers/video/backlight/l4f00242t03.c
drivers/video/backlight/lms283gf05.c
drivers/video/backlight/mbp_nvidia_bl.c
drivers/video/backlight/pwm_bl.c
drivers/video/backlight/s6e63m0.c
drivers/xen/events.c
fs/bio.c
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/hugetlbfs/inode.c
fs/ioprio.c
fs/locks.c
fs/nfsd/nfs4state.c
fs/openpromfs/inode.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_quota.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/linux/atomic.h [new file with mode: 0644]
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/drbd.h
include/linux/highmem.h
include/linux/i2c/adp5588.h
include/linux/input.h
include/linux/iocontext.h
include/linux/kernel.h
include/linux/leds-lp5521.h [new file with mode: 0644]
include/linux/leds-lp5523.h [new file with mode: 0644]
include/linux/leds.h
include/linux/perf_event.h
include/linux/pwm_backlight.h
include/linux/radix-tree.h
include/linux/resource.h
include/linux/sunrpc/svc_xprt.h
include/linux/tty.h
include/linux/usb.h
include/linux/usb/musb.h
kernel/latencytop.c
kernel/perf_event.c
kernel/printk.c
kernel/range.c
kernel/sysctl.c
kernel/trace/blktrace.c
lib/radix-tree.c
mm/filemap.c
mm/memcontrol.c
mm/mprotect.c
mm/vmscan.c
security/Kconfig
security/apparmor/lsm.c
security/apparmor/policy.c
security/commoncap.c
tools/perf/Documentation/perf-trace.txt
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/scripts/perl/bin/failed-syscalls-record
tools/perf/scripts/perl/bin/rw-by-file-record
tools/perf/scripts/perl/bin/rw-by-pid-record
tools/perf/scripts/perl/bin/rwtop-record
tools/perf/scripts/perl/bin/wakeup-latency-record
tools/perf/scripts/perl/bin/workqueue-stats-record
tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
tools/perf/scripts/python/bin/futex-contention-record
tools/perf/scripts/python/bin/netdev-times-record
tools/perf/scripts/python/bin/sched-migration-record
tools/perf/scripts/python/bin/sctop-record
tools/perf/scripts/python/bin/syscall-counts-by-pid-record
tools/perf/scripts/python/bin/syscall-counts-record
tools/perf/util/ui/util.c

diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644 (file)
index 0000000..cf63f26
--- /dev/null
@@ -0,0 +1,22 @@
+What:  /proc/<pid>/oom_adj
+When:  August 2012
+Why:   /proc/<pid>/oom_adj allows userspace to influence the oom killer's
+       badness heuristic used to determine which task to kill when the kernel
+       is out of memory.
+
+       The badness heuristic has since been rewritten since the introduction of
+       this tunable such that its meaning is deprecated.  The value was
+       implemented as a bitshift on a score generated by the badness()
+       function that did not have any precise units of measure.  With the
+       rewrite, the score is given as a proportion of available memory to the
+       task allocating pages, so using a bitshift which grows the score
+       exponentially is, thus, impossible to tune with fine granularity.
+
+       A much more powerful interface, /proc/<pid>/oom_score_adj, was
+       introduced with the oom killer rewrite that allows users to increase or
+       decrease the badness() score linearly.  This interface will replace
+       /proc/<pid>/oom_adj.
+
+       A warning will be emitted to the kernel log if an application uses this
+       deprecated interface.  After it is printed once, future warnings will be
+       suppressed until the kernel is rebooted.
index d5af3f630814862c5f15194d871766510e16893c..71cfbdc0f74d1da35a9064c6b48490ff5797d660 100644 (file)
@@ -16,7 +16,7 @@ you can do so by typing:
 As of the Linux 2.6.10 kernel, it is now possible to change the
 IO scheduler for a given block device on the fly (thus making it possible,
 for instance, to set the CFQ scheduler for the system default, but
-set a specific device to use the anticipatory or noop schedulers - which
+set a specific device to use the deadline or noop schedulers - which
 can improve that device's throughput).
 
 To set a specific scheduler, simply do this:
@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
 will be displayed, with the currently selected scheduler in brackets:
 
 # cat /sys/block/hda/queue/scheduler
-noop anticipatory deadline [cfq]
-# echo anticipatory > /sys/block/hda/queue/scheduler
+noop deadline [cfq]
+# echo deadline > /sys/block/hda/queue/scheduler
 # cat /sys/block/hda/queue/scheduler
-noop [anticipatory] deadline cfq
+noop [deadline] cfq
index 96d0df28bed323d5596fc051b0ffb96ed8e3c8df..7445bf335dae7eeba4bd6640a82fa3987f48d2cc 100644 (file)
@@ -794,17 +794,6 @@ designed.
 
 Roadmap:
 
-2.6.37 Remove experimental tag from mount option
-       => should be roughly 6 months after initial merge
-       => enough time to:
-               => gain confidence and fix problems reported by early
-                  adopters (a.k.a. guinea pigs)
-               => address worst performance regressions and undesired
-                  behaviours
-               => start tuning/optimising code for parallelism
-               => start tuning/optimising algorithms consuming
-                  excessive CPU time
-
 2.6.39 Switch default mount option to use delayed logging
        => should be roughly 12 months after initial merge
        => enough time to shake out remaining problems before next round of
index ed45e9802aa810a71e1f53fdde2a5d7bbba6789e..92e83e53148fe8b1d4848af9a8fd7442cc5624ba 100644 (file)
@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file
                        arch/x86/kernel/cpu/cpufreq/elanfreq.c.
 
        elevator=       [IOSCHED]
-                       Format: {"anticipatory" | "cfq" | "deadline" | "noop"}
+                       Format: {"cfq" | "deadline" | "noop"}
                        See Documentation/block/as-iosched.txt and
                        Documentation/block/deadline-iosched.txt for details.
 
index 8fd5ca2ae32dde4d9eb27722942a5d099f4afbc3..58b266bd1846f1d0560bdae575f6e764a95fd4b3 100644 (file)
@@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs
 
 Some LEDs can be programmed to blink without any CPU interaction. To
 support this feature, a LED driver can optionally implement the
-blink_set() function (see <linux/leds.h>). If implemented, triggers can
-attempt to use it before falling back to software timers. The blink_set()
-function should return 0 if the blink setting is supported, or -EINVAL
-otherwise, which means that LED blinking will be handled by software.
-
-The blink_set() function should choose a user friendly blinking
-value if it is called with *delay_on==0 && *delay_off==0 parameters. In
-this case the driver should give back the chosen value through delay_on
-and delay_off parameters to the leds subsystem.
+blink_set() function (see <linux/leds.h>). To set an LED to blinking,
+however, it is better to use use the API function led_blink_set(),
+as it will check and implement software fallback if necessary.
+
+To turn off blinking again, use the API function led_brightness_set()
+as that will not just set the LED brightness but also stop any software
+timers that may have been required for blinking.
+
+The blink_set() function should choose a user friendly blinking value
+if it is called with *delay_on==0 && *delay_off==0 parameters. In this
+case the driver should give back the chosen value through delay_on and
+delay_off parameters to the leds subsystem.
 
 Setting the brightness to zero with brightness_set() callback function
 should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644 (file)
index 0000000..c4d8d15
--- /dev/null
@@ -0,0 +1,88 @@
+Kernel driver for lp5521
+========================
+
+* National Semiconductor LP5521 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5521.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+
+LP5521 can drive up to 3 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5521:channelx, where x is 0 .. 2
+
+All three channels can be also controlled using the engine micro programs.
+More details of the instructions can be found from the public data sheet.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : store program (visible only in engine load mode)
+
+Example (start to blink the channel 2 led):
+cd   /sys/class/leds/lp5521:channel2/device
+echo "load" > engine3_mode
+echo "037f4d0003ff6000" > engine3_load
+echo "run" > engine3_mode
+
+stop the engine:
+echo "disabled" > engine3_mode
+
+sysfs contains a selftest entry.
+The test communicates with the chip and checks that
+the clock mode is automatically set to the requested one.
+
+Each channel has its own led current settings.
+/sys/class/leds/lp5521:channel0/led_current - RW
+/sys/class/leds/lp5521:channel0/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+example platform data:
+
+Note: chan_nr can have values between 0 and 2.
+
+static struct lp5521_led_config lp5521_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+               .max_current    = 130,
+        }, {
+                .chan_nr        = 1,
+                .led_current    = 0,
+               .max_current    = 130,
+        }, {
+                .chan_nr        = 2,
+                .led_current    = 0,
+               .max_current    = 130,
+        }
+};
+
+static int lp5521_setup(void)
+{
+       /* setup HW resources */
+}
+
+static void lp5521_release(void)
+{
+       /* Release HW resources */
+}
+
+static void lp5521_enable(bool state)
+{
+       /* Control of chip enable signal */
+}
+
+static struct lp5521_platform_data lp5521_platform_data = {
+        .led_config     = lp5521_led_config,
+        .num_channels   = ARRAY_SIZE(lp5521_led_config),
+        .clock_mode     = LP5521_CLOCK_EXT,
+        .setup_resources   = lp5521_setup,
+        .release_resources = lp5521_release,
+        .enable            = lp5521_enable,
+};
+
+If the current is set to 0 in the platform data, that channel is
+disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644 (file)
index 0000000..fad2feb
--- /dev/null
@@ -0,0 +1,83 @@
+Kernel driver for lp5523
+========================
+
+* National Semiconductor LP5523 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5523.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+LP5523 can drive up to 9 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5523:channelx where x is 0...8
+
+The chip provides 3 engines. Each engine can control channels without
+interaction from the main CPU. Details of the micro engine code can be found
+from the public data sheet. Leds can be muxed to different channels.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : microcode load (visible only in load mode)
+enginex_leds : led mux control (visible only in load mode)
+
+cd /sys/class/leds/lp5523:channel2/device
+echo "load" > engine3_mode
+echo "9d80400004ff05ff437f0000" > engine3_load
+echo "111111111" > engine3_leds
+echo "run" > engine3_mode
+
+sysfs contains a selftest entry. It measures each channel
+voltage level and checks if it looks reasonable. If the level is too high,
+the led is missing; if the level is too low, there is a short circuit.
+
+Selftest uses always the current from the platform data.
+
+Each channel contains led current settings.
+/sys/class/leds/lp5523:channel2/led_current - RW
+/sys/class/leds/lp5523:channel2/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+Example platform data:
+
+Note - chan_nr can have values between 0 and 8.
+
+static struct lp5523_led_config lp5523_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+               .max_current    = 130,
+        },
+...
+        }, {
+                .chan_nr        = 8,
+                .led_current    = 50,
+               .max_current    = 130,
+        }
+};
+
+static int lp5523_setup(void)
+{
+       /* Setup HW resources */
+}
+
+static void lp5523_release(void)
+{
+       /* Release HW resources */
+}
+
+static void lp5523_enable(bool state)
+{
+       /* Control chip enable signal */
+}
+
+static struct lp5523_platform_data lp5523_platform_data = {
+        .led_config     = lp5523_led_config,
+        .num_channels   = ARRAY_SIZE(lp5523_led_config),
+        .clock_mode     = LP5523_CLOCK_EXT,
+        .setup_resources   = lp5523_setup,
+        .release_resources = lp5523_release,
+        .enable            = lp5523_enable,
+};
index 221f38be98f47be1e682876ef55c2984a8484e76..19f8278c38548405165d43256bcf8be021070087 100644 (file)
@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower
 To quote Linux Weekly News:
 
     There are a number of red-black trees in use in the kernel.
-    The anticipatory, deadline, and CFQ I/O schedulers all employ
-    rbtrees to track requests; the packet CD/DVD driver does the same.
+    The deadline and CFQ I/O schedulers employ rbtrees to
+    track requests; the packet CD/DVD driver does the same.
     The high-resolution timer code uses an rbtree to organize outstanding
     timer requests.  The ext3 filesystem tracks directory entries in a
     red-black tree.  Virtual memory areas (VMAs) are tracked with red-black
index 3894eaa23486f951ace787893740ac2850f7d6d9..209e1584c3dc25e8e1af61a3061c779a28f5d11e 100644 (file)
@@ -28,6 +28,7 @@ show up in /proc/sys/kernel:
 - core_uses_pid
 - ctrl-alt-del
 - dentry-state
+- dmesg_restrict
 - domainname
 - hostname
 - hotplug
@@ -213,6 +214,19 @@ to decide what to do with it.
 
 ==============================================================
 
+dmesg_restrict:
+
+This toggle indicates whether unprivileged users are prevented from using
+dmesg(8) to view messages from the kernel's log buffer.  When
+dmesg_restrict is set to (0) there are no restrictions.  When
+dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
+dmesg(8).
+
+The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
+value of dmesg_restrict.
+
+==============================================================
+
 domainname & hostname:
 
 These files can be used to set the NIS/YP domainname and the
index a17b26cd2eb1948cecb26041cc4213070048fbb2..88b74a75d9322f097a8561213e6f5d4eebcfc5b7 100644 (file)
@@ -161,7 +161,7 @@ M:  Greg Kroah-Hartman <gregkh@suse.de>
 L:     linux-serial@vger.kernel.org
 W:     http://serial.sourceforge.net
 S:     Maintained
-T:     quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
 F:     drivers/serial/8250*
 F:     include/linux/serial_8250.h
 
@@ -5910,7 +5910,7 @@ S:        Maintained
 TTY LAYER
 M:     Greg Kroah-Hartman <gregkh@suse.de>
 S:     Maintained
-T:     quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
 F:     drivers/char/tty_*
 F:     drivers/serial/serial_core.c
 F:     include/linux/serial_core.h
@@ -6233,7 +6233,7 @@ USB SUBSYSTEM
 M:     Greg Kroah-Hartman <gregkh@suse.de>
 L:     linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org
-T:     quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
 S:     Supported
 F:     Documentation/usb/
 F:     drivers/net/usb/
@@ -6598,14 +6598,14 @@ F:      drivers/platform/x86
 
 XEN PCI SUBSYSTEM
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:     xen-devel@lists.xensource.com
+L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:     Supported
 F:     arch/x86/pci/*xen*
 F:     drivers/pci/*xen*
 
 XEN SWIOTLB SUBSYSTEM
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:     xen-devel@lists.xensource.com
+L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:     Supported
 F:     arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
@@ -6613,7 +6613,7 @@ F:        drivers/xen/*swiotlb*
 XEN HYPERVISOR INTERFACE
 M:     Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:     xen-devel@lists.xen.org
+L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
 L:     virtualization@lists.osdl.org
 S:     Supported
 F:     arch/x86/xen/
index a19a5266d5fc989327d2f5e82b8068e13462a5b8..8ae3d48d504c31b55e2150b17435428cb3e47e4c 100644 (file)
@@ -6,7 +6,7 @@ config ARM
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
-       select GENERIC_ATOMIC64 if (!CPU_32v6K)
+       select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
        select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
        select HAVE_ARCH_KGDB
        select HAVE_KPROBES if (!XIP_KERNEL)
index ada6359160ebef12614e9cd725449e5353a7a85b..772f95f1aecddf49ada9ceea0caa5295d22c9613 100644 (file)
@@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
                writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
        /*
-        * Set priority on all interrupts.
+        * Set priority on all global interrupts.
         */
-       for (i = 0; i < max_irq; i += 4)
+       for (i = 32; i < max_irq; i += 4)
                writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
 
        /*
-        * Disable all interrupts.
+        * Disable all interrupts.  Leave the PPI and SGIs alone
+        * as these enables are banked registers.
         */
-       for (i = 0; i < max_irq; i += 32)
+       for (i = 32; i < max_irq; i += 32)
                writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
 
        /*
@@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
 
 void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
 {
+       void __iomem *dist_base;
+       int i;
+
        if (gic_nr >= MAX_GIC_NR)
                BUG();
 
+       dist_base = gic_data[gic_nr].dist_base;
+       BUG_ON(!dist_base);
+
        gic_data[gic_nr].cpu_base = base;
 
+       /*
+        * Deal with the banked PPI and SGI interrupts - disable all
+        * PPI interrupts, ensure all SGI interrupts are enabled.
+        */
+       writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+       writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+       /*
+        * Set priority on PPI and SGI interrupts
+        */
+       for (i = 0; i < 32; i += 4)
+               writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
        writel(0xf0, base + GIC_CPU_PRIMASK);
        writel(1, base + GIC_CPU_CTRL);
 }
index 6700c7fc7ebd2e3cfc03e9a11accba01e58440e3..21fa272301f804b9bad3218b74147f6e450fa026 100644 (file)
@@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
   IT8152_PD_IRQ(1)  USB (USBR)
   IT8152_PD_IRQ(0)  Audio controller (ACR)
  */
-#define IT8152_IRQ(x)   (IRQ_BOARD_END + (x))
+#define IT8152_IRQ(x)   (IRQ_BOARD_START + (x))
 
 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
 #define IT8152_LD_IRQ_COUNT     9
index 54593b0c241b4ec78d7b6f23356666787eedd7b4..21e3a4ab3b8c58047304b694aa8161b9ebbf1c31 100644 (file)
@@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
                breakpoint_handler(addr, regs);
                break;
        case ARM_ENTRY_ASYNC_WATCHPOINT:
-               WARN_ON("Asynchronous watchpoint exception taken. "
-                       "Debugging results may be unreliable");
+               WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
        case ARM_ENTRY_SYNC_WATCHPOINT:
                watchpoint_handler(addr, regs);
                break;
index 49643b1467e62d529d4edd661990bb64da1e1f73..07a50357492ac6858bc21d0b7913aefc93cbb1cf 100644 (file)
@@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
                                        enum armv7_counters counter)
 {
-       int ret;
+       int ret = 0;
 
        if (counter == ARMV7_CYCLE_COUNTER)
                ret = pmnc & ARMV7_FLAG_C;
index 20b7411e47fdeef9e31606e665957ea862638f5a..c2e112e1a05fbf0d4485db1236448653e25aa8cc 100644 (file)
@@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
 
        /* only go to a higher address on the stack */
        low = frame->sp;
-       high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+       high = ALIGN(low, THREAD_SIZE);
 
        /* check current frame pointer is within bounds */
        if (fp < (low + 12) || fp + 4 >= high)
index cda78d59aa31b2971ed897b9db78afd6bfb0f36e..446aee97436f22b82ba7d2f16a06d74b507b7042 100644 (file)
@@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
 #ifdef CONFIG_KALLSYMS
-       char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
-       sprint_symbol(sym1, where);
-       sprint_symbol(sym2, from);
-       printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
+       printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
 #else
        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
index 2a161765f6d5fdc0ac07805c2a4a62957adf72a0..d2cb0b3c987216b4b7eea39289b739fbc7d0136d 100644 (file)
@@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
 
        /* only go to a higher address on the stack */
        low = frame->sp;
-       high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+       high = ALIGN(low, THREAD_SIZE);
 
        pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
                 frame->pc, frame->lr, frame->sp);
index 3a5961d3f3b1bf810c8d7d8369132fc64fcee54e..5e31b2b25da95a4511ddfc431d320c24c7c93211 100644 (file)
@@ -1,5 +1,13 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/dma.h
+/**
+ * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
+ *
+ * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
+ * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
+ * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
+ * engine.
+ *
+ * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
+ *
  */
 
 #ifndef __ASM_ARCH_DMA_H
 #include <linux/list.h>
 #include <linux/types.h>
 
+/**
+ * struct ep93xx_dma_buffer - Information about a buffer to be transferred
+ * using the DMA M2P engine
+ *
+ * @list: Entry in DMA buffer list
+ * @bus_addr: Physical address of the buffer
+ * @size: Size of the buffer in bytes
+ */
 struct ep93xx_dma_buffer {
        struct list_head        list;
        u32                     bus_addr;
        u16                     size;
 };
 
+/**
+ * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
+ *
+ * @name: Unique name for this client
+ * @flags: Client flags
+ * @cookie: User data to pass to callback functions
+ * @buffer_started: Non NULL function to call when a transfer is started.
+ *                     The arguments are the user data cookie and the DMA
+ *                     buffer which is starting.
+ * @buffer_finished: Non NULL function to call when a transfer is completed.
+ *                     The arguments are the user data cookie, the DMA buffer
+ *                     which has completed, and a boolean flag indicating if
+ *                     the transfer had an error.
+ */
 struct ep93xx_dma_m2p_client {
        char                    *name;
        u8                      flags;
@@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
                                        struct ep93xx_dma_buffer *buf,
                                        int bytes, int error);
 
-       /* Internal to the DMA code.  */
+       /* private: Internal use only */
        void                    *channel;
 };
 
+/* DMA M2P ports */
 #define EP93XX_DMA_M2P_PORT_I2S1       0x00
 #define EP93XX_DMA_M2P_PORT_I2S2       0x01
 #define EP93XX_DMA_M2P_PORT_AAC1       0x02
@@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
 #define EP93XX_DMA_M2P_PORT_UART3      0x08
 #define EP93XX_DMA_M2P_PORT_IRDA       0x09
 #define EP93XX_DMA_M2P_PORT_MASK       0x0f
-#define EP93XX_DMA_M2P_TX              0x00
-#define EP93XX_DMA_M2P_RX              0x10
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR  0x20
-#define EP93XX_DMA_M2P_IGNORE_ERROR    0x40
-#define EP93XX_DMA_M2P_ERROR_MASK      0x60
 
-int  ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+/* DMA M2P client flags */
+#define EP93XX_DMA_M2P_TX              0x00    /* Memory to peripheral */
+#define EP93XX_DMA_M2P_RX              0x10    /* Peripheral to memory */
+
+/*
+ * DMA M2P client error handling flags. See the EP93xx users guide
+ * documentation on the DMA M2P CONTROL register for more details
+ */
+#define EP93XX_DMA_M2P_ABORT_ON_ERROR  0x20    /* Abort on peripheral error */
+#define EP93XX_DMA_M2P_IGNORE_ERROR    0x40    /* Ignore peripheral errors */
+#define EP93XX_DMA_M2P_ERROR_MASK      0x60    /* Mask of error bits */
+
+/**
+ * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client information to register
+ * returns 0 on success
+ *
+ * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
+ * client
+ */
+int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client to unregister
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ */
 void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * If the current or next transfer positions are free on the M2P client then
+ * the transfer is started immediately. If not, the transfer is added to the
+ * list of pending transfers. This function must not be called from the
+ * buffer_finished callback for an M2P channel.
+ *
+ */
 void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
                           struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
+ * for an M2P channel
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * This function must only be called from the buffer_finished callback for an
+ * M2P channel. It is commonly used to add the next transfer in a chained list
+ * of DMA transfers.
+ */
 void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
                                     struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
+ *
+ * @m2p: DMA client to flush transfers on
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ *
+ */
 void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
 
 #endif /* __ASM_ARCH_DMA_H */
index 51ff23b72d3a3e8d6982b031dd36df0c3c1cc711..3688123b5ad8e5ea233b1346f22d19e3ec149030 100644 (file)
@@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
 
        kirkwood_pcie_id(&dev, &rev);
 
-       if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 ||
-                                       rev == MV88F6281_REV_A1)) ||
-           (dev == MV88F6282_DEV_ID))
-               return 200000000;
+       if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
+               if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
+                       return 200000000;
 
        return 166666667;
 }
index 4aa86e4a152c22bc2707463b85d1f3d98240a509..a31c9499ab36c6a0f235e39cf22e4bd7f522d14c 100644 (file)
@@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
        .init_machine   = d2net_v2_init,
        .map_io         = kirkwood_map_io,
        .init_irq       = kirkwood_init_irq,
-       .timer          = &lacie_v2_timer,
+       .timer          = &kirkwood_timer,
 MACHINE_END
index d3ea1b6c8a02ff8790db0bf51d61f6643028cbc0..285edab776e9f5f389be444c343d7246694058ea 100644 (file)
@@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
                        pr_err("Failed to power up HDD%d\n", i + 1);
        }
 }
-
-/*****************************************************************************
- * Timer
- ****************************************************************************/
-
-static void lacie_v2_timer_init(void)
-{
-       kirkwood_tclk = 166666667;
-       orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
-}
-
-struct sys_timer lacie_v2_timer = {
-       .init = lacie_v2_timer_init,
-};
index af521315b87bd621152cac1ae086edfa1db45373..fc64f578536ecc10f74517e991d948c81c331c14 100644 (file)
@@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
 void lacie_v2_register_i2c_devices(void);
 void lacie_v2_hdd_power_init(int hdd_num);
 
-extern struct sys_timer lacie_v2_timer;
-
 #endif
index 065187d177c6299f12543c3198c23c7cbd8f2476..27901f702feb28d5f4c5a0ff6ce9acbc0780af9d 100644 (file)
@@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
        }
        printk("\n");
 
-       while (*mpp_list) {
+       for ( ; *mpp_list; mpp_list++) {
                unsigned int num = MPP_NUM(*mpp_list);
                unsigned int sel = MPP_SEL(*mpp_list);
                int shift, gpio_mode;
@@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
                if (sel != 0)
                        gpio_mode = 0;
                orion_gpio_set_valid(num, gpio_mode);
-
-               mpp_list++;
        }
 
        printk(KERN_DEBUG "  final MPP regs:");
index 5ea66f1f4178b0b846cfb42e6e600ddb2d3a1f87..65ee21fd2f3bd5657ede5a034924c47e4a3cd6f8 100644 (file)
@@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
        .init_machine   = netspace_v2_init,
        .map_io         = kirkwood_map_io,
        .init_irq       = kirkwood_init_irq,
-       .timer          = &lacie_v2_timer,
+       .timer          = &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
        .init_machine   = netspace_v2_init,
        .map_io         = kirkwood_map_io,
        .init_irq       = kirkwood_init_irq,
-       .timer          = &lacie_v2_timer,
+       .timer          = &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
        .init_machine   = netspace_v2_init,
        .map_io         = kirkwood_map_io,
        .init_irq       = kirkwood_init_irq,
-       .timer          = &lacie_v2_timer,
+       .timer          = &kirkwood_timer,
 MACHINE_END
 #endif
index a1b45d501aef57393a47e0d99e67a423ae3355fc..93afd3c8bfd8a66fdb80bdf6acf0cbc1538c31ed 100644 (file)
@@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
        .init_machine   = netxbig_v2_init,
        .map_io         = kirkwood_map_io,
        .init_irq       = kirkwood_init_irq,
-       .timer          = &lacie_v2_timer,
+       .timer          = &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
        .init_machine   = netxbig_v2_init,
        .map_io         = kirkwood_map_io,
        .init_irq       = kirkwood_init_irq,
-       .timer          = &lacie_v2_timer,
+       .timer          = &kirkwood_timer,
 MACHINE_END
 #endif
index 8be09a0ce4ac724a8e5981dba3ab1a723f5e77f5..3587a281d993825076e9e16032ad4c8b76ec4b88 100644 (file)
 #include "mpp.h"
 #include "tsx1x-common.h"
 
+/* for the PCIe reset workaround */
+#include <plat/pcie.h>
+
+
 #define QNAP_TS41X_JUMPER_JP1  45
 
 static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
 
 static int __init ts41x_pci_init(void)
 {
-       if (machine_is_ts41x())
+       if (machine_is_ts41x()) {
+               /*
+                * Without this explicit reset, the PCIe SATA controller
+                * (Marvell 88sx7042/sata_mv) is known to stop working
+                * after a few minutes.
+                */
+               orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
+
                kirkwood_pcie_init(KW_PCIE0);
+       }
 
    return 0;
 }
index f43a68b213f111a200e0395638e3cc538e442b3e..8a3b56dfd35d7af96d47b367e5ccf0078d5d2f08 100644 (file)
@@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
 #ifdef CONFIG_CPU_MMP2
 static inline int cpu_is_mmp2(void)
 {
-       return (((cpu_readid_id() >> 8) & 0xff) == 0x58);
+       return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+}
 #else
 #define cpu_is_mmp2()  (0)
 #endif
index 354ac514eb899125121eb84127bcb93deac2a115..84db2dfc475ca9b24f5b30fad2637ee099de08f6 100644 (file)
@@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
        }
        printk("\n");
 
-       while (*mpp_list) {
+       for ( ; *mpp_list; mpp_list++) {
                unsigned int num = MPP_NUM(*mpp_list);
                unsigned int sel = MPP_SEL(*mpp_list);
                int shift, gpio_mode;
@@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
                if (sel != 0)
                        gpio_mode = 0;
                orion_gpio_set_valid(num, gpio_mode);
-
-               mpp_list++;
        }
 
        printk(KERN_DEBUG "  final MPP regs:");
index bc4c3b9aaf83346fc054b5cb23eb19287b3e2fc0..db485d3b814484f76faefa2943c4a2a17b8f08c4 100644 (file)
@@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
        /* Initialize gpiolib. */
        orion_gpio_init();
 
-       while (mode->mpp >= 0) {
+       for ( ; mode->mpp >= 0; mode++) {
                u32 *reg;
                int num_type;
                int shift;
@@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
                        orion_gpio_set_unused(mode->mpp);
 
                orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
-
-               mode++;
        }
 
        writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
index 16f1bd5324bebb94b01ec5af9db5eb82d4e3509f..c1c1cd04bdde4dc77dd4408096048c3f45b47964 100644 (file)
@@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
 static struct resource ts78xx_ts_nand_resources = {
        .start          = TS_NAND_DATA,
        .end            = TS_NAND_DATA + 4,
-       .flags          = IORESOURCE_IO,
+       .flags          = IORESOURCE_MEM,
 };
 
 static struct platform_device ts78xx_ts_nand_device = {
index ac5598ce97241f7d29947ee434d539c5c2109fe6..d34b99febeb99c76eef86caf2b20881f45b39fc1 100644 (file)
@@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
 
 static void __init cmx2xx_init_irq(void)
 {
-       pxa27x_init_irq();
-
        if (cpu_is_pxa25x()) {
                pxa25x_init_irq();
                cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
index 4b521e045d754471df4cf4a66daff5d7434992b5..ffa50e633ee6856b88901228f46b6f21d527cc2f 100644 (file)
@@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
        },
 };
 
-#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE)
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
 static uint16_t lcd_power_on[] = {
        /* single frame */
        SMART_CMD_NOOP,
index c2e405a9e0256141d62dfad2f359bfa7bac87555..fd25ccd7272f7045b24a193f4c025a84c3bca97b 100644 (file)
@@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
 
 static void __init ct_ca9x4_map_io(void)
 {
+#ifdef CONFIG_LOCAL_TIMERS
        twd_base = MMIO_P2V(A9_MPCORE_TWD);
+#endif
        v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
 }
 
index e4dd0646e85978b89a164c40100d0561bd1005f7..ac6a36142fcd5a28084b3c669fac9e800fd65497 100644 (file)
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
         * fragmentation of the DMA space, and also prevents allocations
         * smaller than a section from crossing a section boundary.
         */
-       bit = fls(size - 1) + 1;
+       bit = fls(size - 1);
        if (bit > SECTION_SHIFT)
                bit = SECTION_SHIFT;
        align = 1 << bit;
index 3ebfef72b4e700406f43131349eda135747a3304..cc99163e73fdbde0f4945a93c35cdd5830c25ac5 100644 (file)
 #ifndef __PLAT_PCIE_H
 #define __PLAT_PCIE_H
 
+struct pci_bus;
+
 u32 orion_pcie_dev_id(void __iomem *base);
 u32 orion_pcie_rev(void __iomem *base);
 int orion_pcie_link_up(void __iomem *base);
 int orion_pcie_x4_mode(void __iomem *base);
 int orion_pcie_get_local_bus_nr(void __iomem *base);
 void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
+void orion_pcie_reset(void __iomem *base);
 void orion_pcie_setup(void __iomem *base,
                      struct mbus_dram_target_info *dram);
 int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
index 779553a1595e938ead6a4764e84ee8d87087acb9..af2d733c50b5cf9a32ec07932a1b64131d95ca46 100644 (file)
@@ -181,11 +181,6 @@ void __init orion_pcie_setup(void __iomem *base,
        u16 cmd;
        u32 mask;
 
-       /*
-        * soft reset PCIe unit
-        */
-       orion_pcie_reset(base);
-
        /*
         * Point PCIe unit MBUS decode windows to DRAM space.
         */
index 2cd899f75a3cc21f2e53e2f6b8d9b46368f496cc..b7c5bab9bd774db16b8eaaa31441a8aa45e45aea 100644 (file)
@@ -38,8 +38,8 @@ struct pt_regs {
 
 struct task_struct;
 
-extern long subarch_ptrace(struct task_struct *child, long request, long addr,
-                          long data);
+extern long subarch_ptrace(struct task_struct *child, long request,
+       unsigned long addr, unsigned long data);
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
 extern int get_fpregs(struct user_i387_struct __user *buf,
index a5e33f29bbeb7ef71e9a25697e5c8bfd3e664370..701b672c11225aaf6ffcdd9fe9485bccacbd4726 100644 (file)
@@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request,
                break;
 
        case PTRACE_SET_THREAD_AREA:
-               ret = ptrace_set_thread_area(child, addr, datavp);
+               ret = ptrace_set_thread_area(child, addr, vp);
                break;
 
        case PTRACE_FAULTINFO: {
index 286de34b0ed6773c6ccb2b7b052023394b93396f..f6ce0bda3b98a74906cb1c8297f4ba150430699d 100644 (file)
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
 
 static inline u32 native_apic_msr_read(u32 reg)
 {
-       u32 low, high;
+       u64 msr;
 
        if (reg == APIC_DFR)
                return -1;
 
-       rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
-       return low;
+       rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+       return (u32)msr;
 }
 
 static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@ extern void enable_x2apic(void);
 extern void x2apic_icr_write(u32 low, u32 id);
 static inline int x2apic_enabled(void)
 {
-       int msr, msr2;
+       u64 msr;
 
        if (!cpu_has_x2apic)
                return 0;
 
-       rdmsr(MSR_IA32_APICBASE, msr, msr2);
+       rdmsrl(MSR_IA32_APICBASE, msr);
        if (msr & X2APIC_ENABLE)
                return 1;
        return 0;
index b2f2d2e05cec4eba8195b60ae053d4f81a5992ee..6d90adf4428a03d90d183dcd1a77cb2b630fc8e2 100644 (file)
@@ -805,6 +805,78 @@ union uvh_node_present_table_u {
     } s;
 };
 
+/* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+    unsigned long      v;
+    struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
+       unsigned long   rsvd_0_23: 24;  /*    */
+       unsigned long   base    :  8;  /* RW */
+       unsigned long   rsvd_32_47: 16;  /*    */
+       unsigned long   m_alias :  5;  /* RW */
+       unsigned long   rsvd_53_62: 10;  /*    */
+       unsigned long   enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+    unsigned long      v;
+    struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+       unsigned long   rsvd_0_23: 24;  /*    */
+       unsigned long   base    :  8;  /* RW */
+       unsigned long   rsvd_32_47: 16;  /*    */
+       unsigned long   m_alias :  5;  /* RW */
+       unsigned long   rsvd_53_62: 10;  /*    */
+       unsigned long   enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+    unsigned long      v;
+    struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+       unsigned long   rsvd_0_23: 24;  /*    */
+       unsigned long   base    :  8;  /* RW */
+       unsigned long   rsvd_32_47: 16;  /*    */
+       unsigned long   m_alias :  5;  /* RW */
+       unsigned long   rsvd_53_62: 10;  /*    */
+       unsigned long   enable  :  1;  /* RW */
+    } s;
+};
+
 /* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR                  */
 /* ========================================================================= */
@@ -856,6 +928,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
     } s;
 };
 
+/* ========================================================================= */
+/*                          UVH_RH_GAM_CONFIG_MMR                            */
+/* ========================================================================= */
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+union uvh_rh_gam_config_mmr_u {
+    unsigned long      v;
+    struct uvh_rh_gam_config_mmr_s {
+       unsigned long   m_skt     :  6;  /* RW */
+       unsigned long   n_skt     :  4;  /* RW */
+       unsigned long   rsvd_10_11:  2;  /*    */
+       unsigned long   mmiol_cfg :  1;  /* RW */
+       unsigned long   rsvd_13_63: 51;  /*    */
+    } s;
+};
+
 /* ========================================================================= */
 /*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
@@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u {
     } s;
 };
 
-/* ========================================================================= */
-/*                          UVH_SI_ADDR_MAP_CONFIG                           */
-/* ========================================================================= */
-#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
-
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
-
-union uvh_si_addr_map_config_u {
-    unsigned long      v;
-    struct uvh_si_addr_map_config_s {
-       unsigned long   m_skt :  6;  /* RW */
-       unsigned long   rsvd_6_7:  2;  /*    */
-       unsigned long   n_skt :  4;  /* RW */
-       unsigned long   rsvd_12_63: 52;  /*    */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS0_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
-
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias0_overlay_config_u {
-    unsigned long      v;
-    struct uvh_si_alias0_overlay_config_s {
-       unsigned long   rsvd_0_23: 24;  /*    */
-       unsigned long   base    :  8;  /* RW */
-       unsigned long   rsvd_32_47: 16;  /*    */
-       unsigned long   m_alias :  5;  /* RW */
-       unsigned long   rsvd_53_62: 10;  /*    */
-       unsigned long   enable  :  1;  /* RW */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS1_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
-
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias1_overlay_config_u {
-    unsigned long      v;
-    struct uvh_si_alias1_overlay_config_s {
-       unsigned long   rsvd_0_23: 24;  /*    */
-       unsigned long   base    :  8;  /* RW */
-       unsigned long   rsvd_32_47: 16;  /*    */
-       unsigned long   m_alias :  5;  /* RW */
-       unsigned long   rsvd_53_62: 10;  /*    */
-       unsigned long   enable  :  1;  /* RW */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS2_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
-
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias2_overlay_config_u {
-    unsigned long      v;
-    struct uvh_si_alias2_overlay_config_s {
-       unsigned long   rsvd_0_23: 24;  /*    */
-       unsigned long   base    :  8;  /* RW */
-       unsigned long   rsvd_32_47: 16;  /*    */
-       unsigned long   m_alias :  5;  /* RW */
-       unsigned long   rsvd_53_62: 10;  /*    */
-       unsigned long   enable  :  1;  /* RW */
-    } s;
-};
-
 
-#endif /* _ASM_X86_UV_UV_MMRS_H */
+#endif /* __ASM_UV_MMRS_X86_H__ */
index 850657d1b0ed573e23552913d7aae230df30fe9a..3f838d537392b4ddb6d061e82bbf1ce8a2a3b9d2 100644 (file)
@@ -52,7 +52,6 @@
 #include <asm/mce.h>
 #include <asm/kvm_para.h>
 #include <asm/tsc.h>
-#include <asm/atomic.h>
 
 unsigned int num_processors;
 
index ed4118de249ef0d3a72e08865ea5e949ee2d80fe..194539aea1757d79058e3ec17acbeaa68ae6432f 100644 (file)
@@ -379,14 +379,14 @@ struct redir_addr {
 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
 
 static __initdata struct redir_addr redir_addrs[] = {
-       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
-       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
-       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
 };
 
 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
 {
-       union uvh_si_alias0_overlay_config_u alias;
+       union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
        union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
        int i;
 
@@ -660,7 +660,7 @@ void uv_nmi_init(void)
 
 void __init uv_system_init(void)
 {
-       union uvh_si_addr_map_config_u m_n_config;
+       union uvh_rh_gam_config_mmr_u  m_n_config;
        union uvh_node_id_u node_id;
        unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
        int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -670,7 +670,7 @@ void __init uv_system_init(void)
 
        map_low_mmrs();
 
-       m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+       m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
        m_val = m_n_config.s.m_skt;
        n_val = m_n_config.s.n_skt;
        mmr_base =
index 46d58448c3aff9039fdf0b909d069d00d80ef75e..e421b8cd6944af860c4b28a1a176a14320ac9f79 100644 (file)
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
        struct amd_nb *nb;
        int i;
 
-       nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+       nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
+                         cpu_to_node(cpu));
        if (!nb)
                return NULL;
 
-       memset(nb, 0, sizeof(*nb));
        nb->nb_id = nb_id;
 
        /*
index e1af7c055c7d0c86c7e3a1fe789e597825185892..ce0cb4721c9ac9eec8869e64c1fcd1a1ef0fd379 100644 (file)
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf)
                return 0;
        }
 
-       equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
+       equiv_cpu_table = vmalloc(size);
        if (!equiv_cpu_table) {
                pr_err("failed to allocate equivalent CPU table\n");
                return 0;
index 71825806cd44f14cf49e8381e22b5dbd3ac0b29d..6da143c2a6b8b27f03a611bad88047879c80f3bf 100644 (file)
@@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
        wrmsrl(address, val);
 }
 
-static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
+static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
 {
         pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
         return 0;
 }
 
-static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
+static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
         {
                 .callback = set_check_enable_amd_mmconf,
                 .ident = "Sun Microsystems Machine",
@@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
        {}
 };
 
-void __cpuinit check_enable_amd_mmconf_dmi(void)
+/* Called from a __cpuinit function, but only on the BSP. */
+void __ref check_enable_amd_mmconf_dmi(void)
 {
        dmi_check_system(mmconf_dmi_table);
 }
index bab3b9e6f66d0d4919f5f035848037ce5ccf7639..008b91eefa188139f57f7b77de6c7aef6240ef08 100644 (file)
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags)
        valid_flags = flags;
 }
 
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-{
-       u64 product;
-#ifdef __i386__
-       u32 tmp1, tmp2;
-#endif
-
-       if (shift < 0)
-               delta >>= -shift;
-       else
-               delta <<= shift;
-
-#ifdef __i386__
-       __asm__ (
-               "mul  %5       ; "
-               "mov  %4,%%eax ; "
-               "mov  %%edx,%4 ; "
-               "mul  %5       ; "
-               "xor  %5,%5    ; "
-               "add  %4,%%eax ; "
-               "adc  %5,%%edx ; "
-               : "=A" (product), "=r" (tmp1), "=r" (tmp2)
-               : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-#elif defined(__x86_64__)
-       __asm__ (
-               "mul %%rdx ; shrd $32,%%rdx,%%rax"
-               : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-#else
-#error implement me!
-#endif
-
-       return product;
-}
-
 static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
 {
        u64 delta = native_read_tsc() - shadow->tsc_timestamp;
index 49358481c733235918cde7c576a3332fba50c364..12cdbb17ad181dfac805d60f4991ee1eacfb935c 100644 (file)
@@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void)
        }
 }
 
-static int tlb_cpuhp_notify(struct notifier_block *n,
+static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
                unsigned long action, void *hcpu)
 {
        switch (action & 0xf) {
index 117f5b8daf7515bf8ac03deb5522a011c286280d..d7b5109f7a9c28b05e120da3eec295b1716f543e 100644 (file)
@@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                irq = xen_allocate_pirq(v[i], 0, /* not sharable */
                        (type == PCI_CAP_ID_MSIX) ?
                        "pcifront-msi-x" : "pcifront-msi");
-               if (irq < 0)
-                       return -1;
+               if (irq < 0) {
+                       ret = -1;
+                       goto free;
+               }
 
                ret = set_irq_msi(irq, msidesc);
                if (ret)
@@ -164,7 +166,7 @@ error:
        if (ret == -ENODEV)
                dev_err(&dev->dev, "Xen PCI frontend has not registered" \
                        " MSI/MSI-X support!\n");
-
+free:
        kfree(v);
        return ret;
 }
index 20ea20a39e2a2f4b2a0012edd532ebff368f3e5a..a318194002b56c3a953df57a9664c5f02745d2ba 100644 (file)
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode)
         * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
         * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
         */
-       bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
-               UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+       bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
+                               * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
        BUG_ON(!bau_desc);
 
        pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
        struct bau_payload_queue_entry *pqp_malloc;
        struct bau_control *bcp;
 
-       pqp = (struct bau_payload_queue_entry *) kmalloc_node(
-               (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
-               GFP_KERNEL, node);
+       pqp = kmalloc_node((DEST_Q_SIZE + 1)
+                          * sizeof(struct bau_payload_queue_entry),
+                          GFP_KERNEL, node);
        BUG_ON(!pqp);
        pqp_malloc = pqp;
 
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
 
        timeout_us = calculate_destination_timeout();
 
-       uvhub_descs = (struct uvhub_desc *)
-               kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+       uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
        memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
        uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
        for_each_present_cpu(cpu) {
index c237b810b03ff8871e1291f149b3ca379b2af48c..21ed8d7f75a5aa6fa85970d177979457d6b94d5c 100644 (file)
@@ -2126,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
 {
        pmd_t *kernel_pmd;
 
-       level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+       level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
 
        max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
                                  xen_start_info->nr_pt_frames * PAGE_SIZE +
index b1dbdaa23ecc2a632003382c0c9e4e1d6819e386..769c4b01fa32e11f01e3a77a37d42f4c8ab13207 100644 (file)
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
                                                     const struct e820map *e820)
 {
        phys_addr_t max_addr = PFN_PHYS(max_pfn);
-       phys_addr_t last_end = 0;
+       phys_addr_t last_end = ISA_END_ADDRESS;
        unsigned long released = 0;
        int i;
 
+       /* Free any unused memory above the low 1Mbyte. */
        for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
                phys_addr_t end = e820->map[i].addr;
                end = min(max_addr, end);
 
-               released += xen_release_chunk(last_end, end);
-               last_end = e820->map[i].addr + e820->map[i].size;
+               if (last_end < end)
+                       released += xen_release_chunk(last_end, end);
+               last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
        }
 
        if (last_end < max_addr)
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
                XENMEM_memory_map;
        rc = HYPERVISOR_memory_op(op, &memmap);
        if (rc == -ENOSYS) {
+               BUG_ON(xen_initial_domain());
                memmap.nr_entries = 1;
                map[0].addr = 0ULL;
                map[0].size = mem_end;
@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
        }
 
        /*
-        * Even though this is normal, usable memory under Xen, reserve
-        * ISA memory anyway because too many things think they can poke
+        * In domU, the ISA region is normal, usable memory, but we
+        * reserve ISA memory anyway because too many things poke
         * about in there.
         *
-        * In a dom0 kernel, this region is identity mapped with the
-        * hardware ISA area, so it really is out of bounds.
+        * In Dom0, the host E820 information can leave gaps in the
+        * ISA range, which would cause us to release those pages.  To
+        * avoid this, we unconditionally reserve them here.
         */
        e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
                        E820_RESERVED);
index f0834e2f57275d8903f65b655645f66a9b2016ee..4ce953f1b3909f38f213adcc0a39251a34875330 100644 (file)
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int where = ELEVATOR_INSERT_SORT;
        int rw_flags;
 
-       /* REQ_HARDBARRIER is no more */
-       if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
-               "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
-               bio_endio(bio, -EOPNOTSUPP);
-               return 0;
-       }
-
        /*
         * low level driver can indicate that it wants pages above a
         * certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
                        bdevname(bio->bi_bdev, b),
                        bio->bi_rw,
                        (unsigned long long)bio->bi_sector + bio_sectors(bio),
-                       (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+                       (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 
        set_bit(BIO_EOF, &bio->bi_flags);
 }
@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
                return 0;
 
        /* Test device or partition size, when known. */
-       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+       maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
        if (maxsector) {
                sector_t sector = bio->bi_sector;
 
index d22c4c55c40689c23b7d275fbb76af17c0b23efa..3c7a339fe3813483155f31b05ea5aa5cd3224a36 100644 (file)
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
 }
 EXPORT_SYMBOL(get_io_context);
 
-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
-{
-       struct io_context *src = *psrc;
-       struct io_context *dst = *pdst;
-
-       if (src) {
-               BUG_ON(atomic_long_read(&src->refcount) == 0);
-               atomic_long_inc(&src->refcount);
-               put_io_context(dst);
-               *pdst = src;
-       }
-}
-EXPORT_SYMBOL(copy_io_context);
-
 static int __init blk_ioc_init(void)
 {
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
index d4a586d8691ec5ed37311d1e3fc76aa0c310828f..5d5dbe47c2285ee7ccb5f3ca784f9cdb8cb6df3e 100644 (file)
@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        unaligned = 1;
                        break;
                }
+               if (!iov[i].iov_len)
+                       return -EINVAL;
        }
 
        if (unaligned || (q->dma_pad_mask & len) || map_data)
index 119f07b74dc0c41d99c282c1299a80f5f0dfc8c0..58c6ee5b010c4fd4a22206e9c88c5cadedc1a1a2 100644 (file)
@@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
                return 0;
        case BLKGETSIZE:
-               size = bdev->bd_inode->i_size;
+               size = i_size_read(bdev->bd_inode);
                if ((size >> 9) > ~0UL)
                        return -EFBIG;
                return compat_put_ulong(arg, size >> 9);
 
        case BLKGETSIZE64_32:
-               return compat_put_u64(arg, bdev->bd_inode->i_size);
+               return compat_put_u64(arg, i_size_read(bdev->bd_inode));
 
        case BLKTRACESETUP32:
        case BLKTRACESTART: /* compatible */
index 282e8308f7e2bbab837375daf5ad81cfae97ca5e..2569512830d3e65a8a73213879b591917bdd91c4 100644 (file)
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
        q->nr_sorted--;
 
        boundary = q->end_sector;
-       stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
+       stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
                       int plug)
 {
-       if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+       if (rq->cmd_flags & REQ_SOFTBARRIER) {
                /* barriers are scheduling boundary, update end_sector */
                if (rq->cmd_type == REQ_TYPE_FS ||
                    (rq->cmd_flags & REQ_DISCARD)) {
index d724ceb1d46535fee2fa3e65ca57cb09cb88997d..3d866d0037f240c8d636f8b8523df962e72ab123 100644 (file)
@@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
        start >>= 9;
        len >>= 9;
 
-       if (start + len > (bdev->bd_inode->i_size >> 9))
+       if (start + len > (i_size_read(bdev->bd_inode) >> 9))
                return -EINVAL;
        if (secure)
                flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                 * We need to set the startsect first, the driver may
                 * want to override it.
                 */
+               memset(&geo, 0, sizeof(geo));
                geo.start = get_start_sect(bdev);
                ret = disk->fops->getgeo(bdev, &geo);
                if (ret)
@@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                ret = blkdev_reread_part(bdev);
                break;
        case BLKGETSIZE:
-               size = bdev->bd_inode->i_size;
+               size = i_size_read(bdev->bd_inode);
                if ((size >> 9) > ~0UL)
                        return -EFBIG;
                return put_ulong(arg, size >> 9);
        case BLKGETSIZE64:
-               return put_u64(arg, bdev->bd_inode->i_size);
+               return put_u64(arg, i_size_read(bdev->bd_inode));
        case BLKTRACESTART:
        case BLKTRACESTOP:
        case BLKTRACESETUP:
index a8b5a10eb5b04cc15cdd8ddd26ed23fe7c55bc78..4f4230b79bb6ee3c8164e462e40d4bdea3dc1fcd 100644 (file)
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
        if (hdr->iovec_count) {
                const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
                size_t iov_data_len;
-               struct sg_iovec *iov;
+               struct sg_iovec *sg_iov;
+               struct iovec *iov;
+               int i;
 
-               iov = kmalloc(size, GFP_KERNEL);
-               if (!iov) {
+               sg_iov = kmalloc(size, GFP_KERNEL);
+               if (!sg_iov) {
                        ret = -ENOMEM;
                        goto out;
                }
 
-               if (copy_from_user(iov, hdr->dxferp, size)) {
-                       kfree(iov);
+               if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+                       kfree(sg_iov);
                        ret = -EFAULT;
                        goto out;
                }
 
+               /*
+                * Sum up the vecs, making sure they don't overflow
+                */
+               iov = (struct iovec *) sg_iov;
+               iov_data_len = 0;
+               for (i = 0; i < hdr->iovec_count; i++) {
+                       if (iov_data_len + iov[i].iov_len < iov_data_len) {
+                               kfree(sg_iov);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       iov_data_len += iov[i].iov_len;
+               }
+
                /* SG_IO howto says that the shorter of the two wins */
-               iov_data_len = iov_length((struct iovec *)iov,
-                                         hdr->iovec_count);
                if (hdr->dxfer_len < iov_data_len) {
-                       hdr->iovec_count = iov_shorten((struct iovec *)iov,
+                       hdr->iovec_count = iov_shorten(iov,
                                                       hdr->iovec_count,
                                                       hdr->dxfer_len);
                        iov_data_len = hdr->dxfer_len;
                }
 
-               ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+               ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
                                          iov_data_len, GFP_KERNEL);
-               kfree(iov);
+               kfree(sg_iov);
        } else if (hdr->dxfer_len)
                ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
                                      GFP_KERNEL);
index de3078215fe6eadd89f31bfd3a86f426a4a18015..75586f1f86e7b27632b1250b2e4572135bf03e1c 100644 (file)
@@ -504,7 +504,6 @@ err:
 
 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
 {
-       kobject_put(&pcrypt->pinst->kobj);
        free_cpumask_var(pcrypt->cb_cpumask->mask);
        kfree(pcrypt->cb_cpumask);
 
index 541e18879965d01e74081701b50f3e70c78fb1b5..528f6318ded155f03d464f611f20240dd1a90c95 100644 (file)
@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
                BUG();
                bio_endio(bio, -ENXIO);
                return 0;
-       } else if (bio->bi_rw & REQ_HARDBARRIER) {
-               bio_endio(bio, -EOPNOTSUPP);
-               return 0;
        } else if (bio->bi_io_vec == NULL) {
                printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
                BUG();
index 2cc4dda462794a3b2f304b0590880237511505b6..a67d0a611a8ac1d53ee16a00963ef19ade3487c8 100644 (file)
@@ -113,6 +113,8 @@ static struct board_type products[] = {
        {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
        {0x40910E11, "Smart Array 6i", &SA5_access},
        {0x3225103C, "Smart Array P600", &SA5_access},
+       {0x3223103C, "Smart Array P800", &SA5_access},
+       {0x3234103C, "Smart Array P400", &SA5_access},
        {0x3235103C, "Smart Array P400i", &SA5_access},
        {0x3211103C, "Smart Array E200i", &SA5_access},
        {0x3212103C, "Smart Array E200", &SA5_access},
@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
        for (i = 0; i < MAX_CONFIG_WAIT; i++) {
                if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
                        break;
-               msleep(10);
+               usleep_range(10000, 20000);
        }
 }
 
@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
        *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
                        subsystem_vendor_id;
 
-       for (i = 0; i < ARRAY_SIZE(products); i++) {
+       for (i = 0; i < ARRAY_SIZE(products); i++)
                if (*board_id == products[i].board_id)
                        return i;
-       }
        dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
                *board_id);
        return -ENODEV;
@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
        return -ENODEV;
 }
 
-static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
+       void __iomem *vaddr, int wait_for_ready)
+#define BOARD_READY 1
+#define BOARD_NOT_READY 0
 {
-       int i;
+       int i, iterations;
        u32 scratchpad;
 
-       for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
-               scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
-               if (scratchpad == CCISS_FIRMWARE_READY)
-                       return 0;
+       if (wait_for_ready)
+               iterations = CCISS_BOARD_READY_ITERATIONS;
+       else
+               iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
+
+       for (i = 0; i < iterations; i++) {
+               scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+               if (wait_for_ready) {
+                       if (scratchpad == CCISS_FIRMWARE_READY)
+                               return 0;
+               } else {
+                       if (scratchpad != CCISS_FIRMWARE_READY)
+                               return 0;
+               }
                msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
        }
-       dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+       dev_warn(&pdev->dev, "board not ready, timed out.\n");
        return -ENODEV;
 }
 
@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
 static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
 {
        h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+       /* Limit commands in memory limited kdump scenario. */
+       if (reset_devices && h->max_commands > 32)
+               h->max_commands = 32;
+
        if (h->max_commands < 16) {
                dev_warn(&h->pdev->dev, "Controller reports "
                        "max supported commands of %d, an obvious lie. "
@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
                err = -ENOMEM;
                goto err_out_free_res;
        }
-       err = cciss_wait_for_board_ready(h);
+       err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
        if (err)
                goto err_out_free_res;
        err = cciss_find_cfgtables(h);
@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
 #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
 #define cciss_noop(p) cciss_message(p, 3, 0)
 
-static __devinit int cciss_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base)          (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE          (1 << 15)
-
-       int pos;
-       u16 control = 0;
-
-       pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
-       if (pos) {
-               pci_read_config_word(pdev, msi_control_reg(pos), &control);
-               if (control & PCI_MSI_FLAGS_ENABLE) {
-                       dev_info(&pdev->dev, "resetting MSI\n");
-                       pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
-               }
-       }
-
-       pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-       if (pos) {
-               pci_read_config_word(pdev, msi_control_reg(pos), &control);
-               if (control & PCI_MSIX_FLAGS_ENABLE) {
-                       dev_info(&pdev->dev, "resetting MSI-X\n");
-                       pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
-               }
-       }
-
-       return 0;
-}
-
 static int cciss_controller_hard_reset(struct pci_dev *pdev,
        void * __iomem vaddr, bool use_doorbell)
 {
@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
  * states or using the doorbell register. */
 static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
 {
-       u16 saved_config_space[32];
        u64 cfg_offset;
        u32 cfg_base_addr;
        u64 cfg_base_addr_index;
        void __iomem *vaddr;
        unsigned long paddr;
        u32 misc_fw_support, active_transport;
-       int rc, i;
+       int rc;
        CfgTable_struct __iomem *cfgtable;
        bool use_doorbell;
        u32 board_id;
+       u16 command_register;
 
        /* For controllers as old a the p600, this is very nearly
         * the same thing as
@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
         * pci_set_power_state(pci_dev, PCI_D0);
         * pci_restore_state(pci_dev);
         *
-        * but we can't use these nice canned kernel routines on
-        * kexec, because they also check the MSI/MSI-X state in PCI
-        * configuration space and do the wrong thing when it is
-        * set/cleared.  Also, the pci_save/restore_state functions
-        * violate the ordering requirements for restoring the
-        * configuration space from the CCISS document (see the
-        * comment below).  So we roll our own ....
-        *
         * For controllers newer than the P600, the pci power state
         * method of resetting doesn't work so we have another way
         * using the doorbell register.
@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
                return -ENODEV;
        }
 
-       for (i = 0; i < 32; i++)
-               pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+       /* Save the PCI command register */
+       pci_read_config_word(pdev, 4, &command_register);
+       /* Turn the board off.  This is so that later pci_restore_state()
+        * won't turn the board on before the rest of config space is ready.
+        */
+       pci_disable_device(pdev);
+       pci_save_state(pdev);
 
        /* find the first memory BAR, so we can find the cfg table */
        rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
        rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
-
-       /* Restore the PCI configuration space.  The Open CISS
-        * Specification says, "Restore the PCI Configuration
-        * Registers, offsets 00h through 60h. It is important to
-        * restore the command register, 16-bits at offset 04h,
-        * last. Do not restore the configuration status register,
-        * 16-bits at offset 06h."  Note that the offset is 2*i.
-        */
-       for (i = 0; i < 32; i++) {
-               if (i == 2 || i == 3)
-                       continue;
-               pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+       pci_restore_state(pdev);
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               dev_warn(&pdev->dev, "failed to enable device.\n");
+               goto unmap_cfgtable;
        }
-       wmb();
-       pci_write_config_word(pdev, 4, saved_config_space[2]);
+       pci_write_config_word(pdev, 4, command_register);
 
        /* Some devices (notably the HP Smart Array 5i Controller)
           need a little pause here */
        msleep(CCISS_POST_RESET_PAUSE_MSECS);
 
+       /* Wait for board to become not ready, then ready. */
+       dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+       rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+       if (rc) /* Don't bail, might be E500, etc. which can't be reset */
+               dev_warn(&pdev->dev,
+                       "failed waiting for board to become not ready\n");
+       rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
+       if (rc) {
+               dev_warn(&pdev->dev,
+                       "failed waiting for board to become ready\n");
+               goto unmap_cfgtable;
+       }
+       dev_info(&pdev->dev, "board ready.\n");
+
        /* Controller should be in simple mode at this point.  If it's not,
         * It means we're on one of those controllers which doesn't support
         * the doorbell reset method and on which the PCI power management reset
@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
                return 0; /* just try to do the kdump anyhow. */
        if (rc)
                return -ENODEV;
-       if (cciss_reset_msi(pdev))
-               return -ENODEV;
 
        /* Now try to get the controller to respond to a no-op */
        for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void)
                }
        }
        kthread_stop(cciss_scan_thread);
-       remove_proc_entry("driver/cciss", NULL);
+       if (proc_cciss)
+               remove_proc_entry("driver/cciss", NULL);
        bus_unregister(&cciss_bus_type);
 }
 
index ae340ffc8f815de7cf1fdae7518890556826852d..4b8933d778f154169e191938d83c3b4115d291cc 100644 (file)
@@ -200,10 +200,14 @@ struct ctlr_info
  * the above.
  */
 #define CCISS_BOARD_READY_WAIT_SECS (120)
+#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
 #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
 #define CCISS_BOARD_READY_ITERATIONS \
        ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
                CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
+#define CCISS_BOARD_NOT_READY_ITERATIONS \
+       ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+               CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
 #define CCISS_POST_RESET_PAUSE_MSECS (3000)
 #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
 #define CCISS_POST_RESET_NOOP_RETRIES (12)
index ac04ef97eac29a0203e38d08a942c1733e5d9285..ba95cba192be8d1d00f7b7e9b47f1cb05d460470 100644 (file)
@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        init_completion(&md_io.event);
        md_io.error = 0;
 
-       if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
-               rw |= REQ_HARDBARRIER;
+       if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+               rw |= REQ_FUA;
        rw |= REQ_UNPLUG | REQ_SYNC;
 
- retry:
        bio = bio_alloc(GFP_NOIO, 1);
        bio->bi_bdev = bdev->md_bdev;
        bio->bi_sector = sector;
@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        wait_for_completion(&md_io.event);
        ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
 
-       /* check for unsupported barrier op.
-        * would rather check on EOPNOTSUPP, but that is not reliable.
-        * don't try again for ANY return value != 0 */
-       if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
-               /* Try again with no barrier */
-               dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
-               set_bit(MD_NO_BARRIER, &mdev->flags);
-               rw &= ~REQ_HARDBARRIER;
-               bio_put(bio);
-               goto retry;
-       }
  out:
        bio_put(bio);
        return ok;
@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
        u32 xor_sum = 0;
 
        if (!get_ldev(mdev)) {
-               dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
+               dev_err(DEV,
+                       "disk is %s, cannot start al transaction (-%d +%d)\n",
+                       drbd_disk_str(mdev->state.disk), evicted, new_enr);
                complete(&((struct update_al_work *)w)->event);
                return 1;
        }
        /* do we have to do a bitmap write, first?
         * TODO reduce maximum latency:
         * submit both bios, then wait for both,
-        * instead of doing two synchronous sector writes. */
+        * instead of doing two synchronous sector writes.
+        * For now, we must not write the transaction,
+        * if we cannot write out the bitmap of the evicted extent. */
        if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
                drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
 
-       mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
+       /* The bitmap write may have failed, causing a state change. */
+       if (mdev->state.disk < D_INCONSISTENT) {
+               dev_err(DEV,
+                       "disk is %s, cannot write al transaction (-%d +%d)\n",
+                       drbd_disk_str(mdev->state.disk), evicted, new_enr);
+               complete(&((struct update_al_work *)w)->event);
+               put_ldev(mdev);
+               return 1;
+       }
+
+       mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
        buffer = (struct al_transaction *)page_address(mdev->md_io_page);
 
        buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
        unsigned int enr;
        unsigned long add = 0;
        char ppb[10];
-       int i;
+       int i, tmp;
 
        wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
 
@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
                enr = lc_element_by_index(mdev->act_log, i)->lc_number;
                if (enr == LC_FREE)
                        continue;
-               add += drbd_bm_ALe_set_all(mdev, enr);
+               tmp = drbd_bm_ALe_set_all(mdev, enr);
+               dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
+               add += tmp;
        }
 
        lc_unlock(mdev->act_log);
index 9bdcf4393c0aa9525c9c7918355cb9b985809a73..1ea1a34e78b281d8ff8658028b38e2f423716917 100644 (file)
@@ -114,11 +114,11 @@ struct drbd_conf;
 #define D_ASSERT(exp)  if (!(exp)) \
         dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
 
-#define ERR_IF(exp) if (({                             \
-       int _b = (exp) != 0;                            \
-       if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n",     \
-               __func__, #exp, __FILE__, __LINE__);    \
-        _b;                                            \
+#define ERR_IF(exp) if (({                                             \
+       int _b = (exp) != 0;                                            \
+       if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n",      \
+                       __func__, #exp, __FILE__, __LINE__);            \
+       _b;                                                             \
        }))
 
 /* Defines to control fault insertion */
@@ -749,17 +749,12 @@ struct drbd_epoch {
 
 /* drbd_epoch flag bits */
 enum {
-       DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
-       DE_BARRIER_IN_NEXT_EPOCH_DONE,
-       DE_CONTAINS_A_BARRIER,
        DE_HAVE_BARRIER_NUMBER,
-       DE_IS_FINISHING,
 };
 
 enum epoch_event {
        EV_PUT,
        EV_GOT_BARRIER_NR,
-       EV_BARRIER_DONE,
        EV_BECAME_LAST,
        EV_CLEANUP = 32, /* used as flag */
 };
@@ -801,11 +796,6 @@ enum {
        __EE_CALL_AL_COMPLETE_IO,
        __EE_MAY_SET_IN_SYNC,
 
-       /* This epoch entry closes an epoch using a barrier.
-        * On sucessful completion, the epoch is released,
-        * and the P_BARRIER_ACK send. */
-       __EE_IS_BARRIER,
-
        /* In case a barrier failed,
         * we need to resubmit without the barrier flag. */
        __EE_RESUBMITTED,
@@ -820,7 +810,6 @@ enum {
 };
 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
 #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
-#define EE_IS_BARRIER          (1<<__EE_IS_BARRIER)
 #define        EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
 #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
 #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@ enum {
                                 * Gets cleared when the state.conn
                                 * goes into C_CONNECTED state. */
        WRITE_BM_AFTER_RESYNC,  /* A kmalloc() during resync failed */
-       NO_BARRIER_SUPP,        /* underlying block device doesn't implement barriers */
        CONSIDER_RESYNC,
 
-       MD_NO_BARRIER,          /* meta data device does not support barriers,
-                                  so don't even try */
+       MD_NO_FUA,              /* Users wants us to not use FUA/FLUSH on meta data dev */
        SUSPEND_IO,             /* suspend application io */
        BITMAP_IO,              /* suspend application io;
                                   once no more io in flight, start bitmap io */
        BITMAP_IO_QUEUED,       /* Started bitmap IO */
-       GO_DISKLESS,            /* Disk failed, local_cnt reached zero, we are going diskless */
+       GO_DISKLESS,            /* Disk is being detached, on io-error or admin request. */
+       WAS_IO_ERROR,           /* Local disk failed returned IO error */
        RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
        NET_CONGESTED,          /* The data socket is congested */
 
@@ -947,7 +935,6 @@ enum write_ordering_e {
        WO_none,
        WO_drain_io,
        WO_bdev_flush,
-       WO_bio_barrier
 };
 
 struct fifo_buffer {
@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
 extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
 extern void drbd_go_diskless(struct drbd_conf *mdev);
+extern void drbd_ldev_destroy(struct drbd_conf *mdev);
 
 
 /* Meta data layout
@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
        case EP_PASS_ON:
                if (!forcedetach) {
                        if (__ratelimit(&drbd_ratelimit_state))
-                               dev_err(DEV, "Local IO failed in %s."
-                                            "Passing error on...\n", where);
+                               dev_err(DEV, "Local IO failed in %s.\n", where);
                        break;
                }
                /* NOTE fall through to detach case if forcedetach set */
        case EP_DETACH:
        case EP_CALL_HELPER:
+               set_bit(WAS_IO_ERROR, &mdev->flags);
                if (mdev->state.disk > D_FAILED) {
                        _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
-                       dev_err(DEV, "Local IO failed in %s."
-                                    "Detaching...\n", where);
+                       dev_err(DEV,
+                               "Local IO failed in %s. Detaching...\n", where);
                }
                break;
        }
@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
 static inline sector_t drbd_get_capacity(struct block_device *bdev)
 {
        /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
-       return bdev ? bdev->bd_inode->i_size >> 9 : 0;
+       return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
 }
 
 /**
@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
        __release(local);
        D_ASSERT(i >= 0);
        if (i == 0) {
+               if (mdev->state.disk == D_DISKLESS)
+                       /* even internal references gone, safe to destroy */
+                       drbd_ldev_destroy(mdev);
                if (mdev->state.disk == D_FAILED)
+                       /* all application IO references gone. */
                        drbd_go_diskless(mdev);
                wake_up(&mdev->misc_wait);
        }
@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
 {
        int io_allowed;
 
+       /* never get a reference while D_DISKLESS */
+       if (mdev->state.disk == D_DISKLESS)
+               return 0;
+
        atomic_inc(&mdev->local_cnt);
        io_allowed = (mdev->state.disk >= mins);
        if (!io_allowed)
@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
 {
        int r;
 
-       if (test_bit(MD_NO_BARRIER, &mdev->flags))
+       if (test_bit(MD_NO_FUA, &mdev->flags))
                return;
 
        r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
        if (r) {
-               set_bit(MD_NO_BARRIER, &mdev->flags);
+               set_bit(MD_NO_FUA, &mdev->flags);
                dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
        }
 }
index 25c7a73c50621734cf22fda0654e93825e08e0ca..6be5401d0e88fd193b1af75c303c8140d4f6ccd3 100644 (file)
@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
            ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
                ns.conn = os.conn;
 
+       /* we cannot fail (again) if we already detached */
+       if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+               ns.disk = D_DISKLESS;
+
+       /* if we are only D_ATTACHING yet,
+        * we can (and should) go directly to D_DISKLESS. */
+       if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
+               ns.disk = D_DISKLESS;
+
        /* After C_DISCONNECTING only C_STANDALONE may follow */
        if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
                ns.conn = os.conn;
@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
            !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
                set_bit(DEVICE_DYING, &mdev->flags);
 
-       mdev->state.i = ns.i;
+       /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+        * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+        * drbd_ldev_destroy() won't happen before our corresponding
+        * after_state_ch works run, where we put_ldev again. */
+       if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+           (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+               atomic_inc(&mdev->local_cnt);
+
+       mdev->state = ns;
        wake_up(&mdev->misc_wait);
        wake_up(&mdev->state_wait);
 
@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                        if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
                                drbd_uuid_new_current(mdev);
                                clear_bit(NEW_CUR_UUID, &mdev->flags);
-                               drbd_md_sync(mdev);
                        }
                        spin_lock_irq(&mdev->req_lock);
                        _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
            os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
                drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
 
-       /* first half of local IO error */
-       if (os.disk > D_FAILED && ns.disk == D_FAILED) {
-               enum drbd_io_error_p eh = EP_PASS_ON;
+       /* first half of local IO error, failure to attach,
+        * or administrative detach */
+       if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+               enum drbd_io_error_p eh;
+               int was_io_error;
+               /* corresponding get_ldev was in __drbd_set_state, to serialize
+                * our cleanup here with the transition to D_DISKLESS,
+                * so it is safe to dreference ldev here. */
+               eh = mdev->ldev->dc.on_io_error;
+               was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+               /* current state still has to be D_FAILED,
+                * there is only one way out: to D_DISKLESS,
+                * and that may only happen after our put_ldev below. */
+               if (mdev->state.disk != D_FAILED)
+                       dev_err(DEV,
+                               "ASSERT FAILED: disk is %s during detach\n",
+                               drbd_disk_str(mdev->state.disk));
 
                if (drbd_send_state(mdev))
-                       dev_warn(DEV, "Notified peer that my disk is broken.\n");
+                       dev_warn(DEV, "Notified peer that I am detaching my disk\n");
                else
-                       dev_err(DEV, "Sending state for drbd_io_error() failed\n");
+                       dev_err(DEV, "Sending state for detaching disk failed\n");
 
                drbd_rs_cancel_all(mdev);
 
-               if (get_ldev_if_state(mdev, D_FAILED)) {
-                       eh = mdev->ldev->dc.on_io_error;
-                       put_ldev(mdev);
-               }
-               if (eh == EP_CALL_HELPER)
+               /* In case we want to get something to stable storage still,
+                * this may be the last chance.
+                * Following put_ldev may transition to D_DISKLESS. */
+               drbd_md_sync(mdev);
+               put_ldev(mdev);
+
+               if (was_io_error && eh == EP_CALL_HELPER)
                        drbd_khelper(mdev, "local-io-error");
        }
 
+        /* second half of local IO error, failure to attach,
+         * or administrative detach,
+         * after local_cnt references have reached zero again */
+        if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+                /* We must still be diskless,
+                 * re-attach has to be serialized with this! */
+                if (mdev->state.disk != D_DISKLESS)
+                        dev_err(DEV,
+                                "ASSERT FAILED: disk is %s while going diskless\n",
+                                drbd_disk_str(mdev->state.disk));
 
-       /* second half of local IO error handling,
-        * after local_cnt references have reached zero: */
-       if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
-               mdev->rs_total = 0;
-               mdev->rs_failed = 0;
-               atomic_set(&mdev->rs_pending_cnt, 0);
-       }
-
-       if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
-               /* We must still be diskless,
-                * re-attach has to be serialized with this! */
-               if (mdev->state.disk != D_DISKLESS)
-                       dev_err(DEV,
-                               "ASSERT FAILED: disk is %s while going diskless\n",
-                               drbd_disk_str(mdev->state.disk));
+                mdev->rs_total = 0;
+                mdev->rs_failed = 0;
+                atomic_set(&mdev->rs_pending_cnt, 0);
 
-               /* we cannot assert local_cnt == 0 here, as get_ldev_if_state
-                * will inc/dec it frequently. Since we became D_DISKLESS, no
-                * one has touched the protected members anymore, though, so we
-                * are safe to free them here. */
                if (drbd_send_state(mdev))
-                       dev_warn(DEV, "Notified peer that I detached my disk.\n");
+                       dev_warn(DEV, "Notified peer that I'm now diskless.\n");
                else
-                       dev_err(DEV, "Sending state for detach failed\n");
-
-               lc_destroy(mdev->resync);
-               mdev->resync = NULL;
-               lc_destroy(mdev->act_log);
-               mdev->act_log = NULL;
-               __no_warn(local,
-                       drbd_free_bc(mdev->ldev);
-                       mdev->ldev = NULL;);
-
-               if (mdev->md_io_tmpp) {
-                       __free_page(mdev->md_io_tmpp);
-                       mdev->md_io_tmpp = NULL;
-               }
+                       dev_err(DEV, "Sending state for being diskless failed\n");
+               /* corresponding get_ldev in __drbd_set_state
+                * this may finaly trigger drbd_ldev_destroy. */
+               put_ldev(mdev);
        }
 
        /* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
 
        drbd_set_defaults(mdev);
 
-       /* for now, we do NOT yet support it,
-        * even though we start some framework
-        * to eventually support barriers */
-       set_bit(NO_BARRIER_SUPP, &mdev->flags);
-
        atomic_set(&mdev->ap_bio_cnt, 0);
        atomic_set(&mdev->ap_pending_cnt, 0);
        atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
        drbd_thread_init(mdev, &mdev->asender, drbd_asender);
 
        mdev->agreed_pro_version = PRO_VERSION_MAX;
-       mdev->write_ordering = WO_bio_barrier;
+       mdev->write_ordering = WO_bdev_flush;
        mdev->resync_wenr = LC_FREE;
 }
 
@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
        D_ASSERT(list_empty(&mdev->resync_work.list));
        D_ASSERT(list_empty(&mdev->unplug_work.list));
        D_ASSERT(list_empty(&mdev->go_diskless.list));
-
 }
 
 
@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
 
        get_random_bytes(&val, sizeof(u64));
        _drbd_uuid_set(mdev, UI_CURRENT, val);
+       /* get it to stable storage _now_ */
+       drbd_md_sync(mdev);
 }
 
 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
        return 1;
 }
 
+void drbd_ldev_destroy(struct drbd_conf *mdev)
+{
+       lc_destroy(mdev->resync);
+       mdev->resync = NULL;
+       lc_destroy(mdev->act_log);
+       mdev->act_log = NULL;
+       __no_warn(local,
+               drbd_free_bc(mdev->ldev);
+               mdev->ldev = NULL;);
+
+       if (mdev->md_io_tmpp) {
+               __free_page(mdev->md_io_tmpp);
+               mdev->md_io_tmpp = NULL;
+       }
+       clear_bit(GO_DISKLESS, &mdev->flags);
+}
+
 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 {
        D_ASSERT(mdev->state.disk == D_FAILED);
        /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
         * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
-        * the protected members anymore, though, so in the after_state_ch work
-        * it will be safe to free them. */
+        * the protected members anymore, though, so once put_ldev reaches zero
+        * again, it will be safe to free them. */
        drbd_force_state(mdev, NS(disk, D_DISKLESS));
-       /* We need to wait for return of references checked out while we still
-        * have been D_FAILED, though (drbd_md_sync, bitmap io). */
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
-
-       clear_bit(GO_DISKLESS, &mdev->flags);
        return 1;
 }
 
@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
        D_ASSERT(mdev->state.disk == D_FAILED);
        if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
                drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
-               /* don't drbd_queue_work_front,
-                * we need to serialize with the after_state_ch work
-                * of the -> D_FAILED transition. */
 }
 
 /**
index 87925e97e613ac34de1bc6edad9d54b8de5c47bf..29e5c70e4e26c7f6e35b8bb1bec0b7f65442f3a0 100644 (file)
@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
                retcode = ERR_DISK_CONFIGURED;
                goto fail;
        }
+       /* It may just now have detached because of IO error.  Make sure
+        * drbd_ldev_destroy is done already, we may end up here very fast,
+        * e.g. if someone calls attach from the on-io-error handler,
+        * to realize a "hot spare" feature (not that I'd recommend that) */
+       wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
        /* allocation not in the IO path, cqueue thread context */
        nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        /* Reset the "barriers don't work" bits here, then force meta data to
         * be written, to ensure we determine if barriers are supported. */
        if (nbc->dc.no_md_flush)
-               set_bit(MD_NO_BARRIER, &mdev->flags);
+               set_bit(MD_NO_FUA, &mdev->flags);
        else
-               clear_bit(MD_NO_BARRIER, &mdev->flags);
+               clear_bit(MD_NO_FUA, &mdev->flags);
 
        /* Point of no return reached.
         * Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        nbc = NULL;
        resync_lru = NULL;
 
-       mdev->write_ordering = WO_bio_barrier;
-       drbd_bump_write_ordering(mdev, WO_bio_barrier);
+       mdev->write_ordering = WO_bdev_flush;
+       drbd_bump_write_ordering(mdev, WO_bdev_flush);
 
        if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
                set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
  force_diskless_dec:
        put_ldev(mdev);
  force_diskless:
-       drbd_force_state(mdev, NS(disk, D_DISKLESS));
+       drbd_force_state(mdev, NS(disk, D_FAILED));
        drbd_md_sync(mdev);
  release_bdev2_fail:
        if (nbc)
@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        return 0;
 }
 
+/* Detaching the disk is a process in multiple stages.  First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
 static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                          struct drbd_nl_cfg_reply *reply)
 {
+       drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
        reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
+       if (mdev->state.disk == D_DISKLESS)
+               wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+       drbd_resume_io(mdev);
        return 0;
 }
 
@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
                drbd_uuid_new_current(mdev);
                clear_bit(NEW_CUR_UUID, &mdev->flags);
-               drbd_md_sync(mdev);
        }
        drbd_suspend_io(mdev);
        reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
index ad325c5d0ce19c48612b68cc22f096bf602319c0..7e6ac307e2dec3ba1c9de2f825d0b1c9cbbbf9af 100644 (file)
@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
                [WO_none] = 'n',
                [WO_drain_io] = 'd',
                [WO_bdev_flush] = 'f',
-               [WO_bio_barrier] = 'b',
        };
 
        seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
index efd6169acf2f04bf758c68ceee419549351c7e64..d299fe9e78c8acc80b3a3f2fb370675bf7b299be 100644 (file)
 
 #include "drbd_vli.h"
 
-struct flush_work {
-       struct drbd_work w;
-       struct drbd_epoch *epoch;
-};
-
 enum finish_epoch {
        FE_STILL_LIVE,
        FE_DESTROYED,
@@ -66,16 +61,6 @@ static int drbd_do_auth(struct drbd_conf *mdev);
 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
 
-static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
-{
-       struct drbd_epoch *prev;
-       spin_lock(&mdev->epoch_lock);
-       prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
-       if (prev == epoch || prev == mdev->current_epoch)
-               prev = NULL;
-       spin_unlock(&mdev->epoch_lock);
-       return prev;
-}
 
 #define GFP_TRY        (__GFP_HIGHMEM | __GFP_NOWARN)
 
@@ -981,7 +966,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
        return TRUE;
 }
 
-static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
+static void drbd_flush(struct drbd_conf *mdev)
 {
        int rv;
 
@@ -997,24 +982,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
                }
                put_ldev(mdev);
        }
-
-       return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-}
-
-static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
-       struct flush_work *fw = (struct flush_work *)w;
-       struct drbd_epoch *epoch = fw->epoch;
-
-       kfree(w);
-
-       if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
-               drbd_flush_after_epoch(mdev, epoch);
-
-       drbd_may_finish_epoch(mdev, epoch, EV_PUT |
-                             (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
-
-       return 1;
 }
 
 /**
@@ -1027,15 +994,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
                                               struct drbd_epoch *epoch,
                                               enum epoch_event ev)
 {
-       int finish, epoch_size;
+       int epoch_size;
        struct drbd_epoch *next_epoch;
-       int schedule_flush = 0;
        enum finish_epoch rv = FE_STILL_LIVE;
 
        spin_lock(&mdev->epoch_lock);
        do {
                next_epoch = NULL;
-               finish = 0;
 
                epoch_size = atomic_read(&epoch->epoch_size);
 
@@ -1045,16 +1010,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
                        break;
                case EV_GOT_BARRIER_NR:
                        set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
-
-                       /* Special case: If we just switched from WO_bio_barrier to
-                          WO_bdev_flush we should not finish the current epoch */
-                       if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
-                           mdev->write_ordering != WO_bio_barrier &&
-                           epoch == mdev->current_epoch)
-                               clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
-                       break;
-               case EV_BARRIER_DONE:
-                       set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
                        break;
                case EV_BECAME_LAST:
                        /* nothing to do*/
@@ -1063,23 +1018,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
 
                if (epoch_size != 0 &&
                    atomic_read(&epoch->active) == 0 &&
-                   test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
-                   epoch->list.prev == &mdev->current_epoch->list &&
-                   !test_bit(DE_IS_FINISHING, &epoch->flags)) {
-                       /* Nearly all conditions are met to finish that epoch... */
-                       if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
-                           mdev->write_ordering == WO_none ||
-                           (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
-                           ev & EV_CLEANUP) {
-                               finish = 1;
-                               set_bit(DE_IS_FINISHING, &epoch->flags);
-                       } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
-                                mdev->write_ordering == WO_bio_barrier) {
-                               atomic_inc(&epoch->active);
-                               schedule_flush = 1;
-                       }
-               }
-               if (finish) {
+                   test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
                        if (!(ev & EV_CLEANUP)) {
                                spin_unlock(&mdev->epoch_lock);
                                drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1041,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
                                /* atomic_set(&epoch->active, 0); is already zero */
                                if (rv == FE_STILL_LIVE)
                                        rv = FE_RECYCLED;
+                               wake_up(&mdev->ee_wait);
                        }
                }
 
@@ -1113,22 +1053,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
 
        spin_unlock(&mdev->epoch_lock);
 
-       if (schedule_flush) {
-               struct flush_work *fw;
-               fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
-               if (fw) {
-                       fw->w.cb = w_flush;
-                       fw->epoch = epoch;
-                       drbd_queue_work(&mdev->data.work, &fw->w);
-               } else {
-                       dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
-                       set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
-                       /* That is not a recursion, only one level */
-                       drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-                       drbd_may_finish_epoch(mdev, epoch, EV_PUT);
-               }
-       }
-
        return rv;
 }
 
@@ -1144,19 +1068,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
                [WO_none] = "none",
                [WO_drain_io] = "drain",
                [WO_bdev_flush] = "flush",
-               [WO_bio_barrier] = "barrier",
        };
 
        pwo = mdev->write_ordering;
        wo = min(pwo, wo);
-       if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
-               wo = WO_bdev_flush;
        if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
                wo = WO_drain_io;
        if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
                wo = WO_none;
        mdev->write_ordering = wo;
-       if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
+       if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
                dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
 }
 
@@ -1192,7 +1113,7 @@ next_bio:
        bio->bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        /* we special case some flags in the multi-bio case, see below
-        * (REQ_UNPLUG, REQ_HARDBARRIER) */
+        * (REQ_UNPLUG) */
        bio->bi_rw = rw;
        bio->bi_private = e;
        bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1147,6 @@ next_bio:
                        bio->bi_rw &= ~REQ_UNPLUG;
 
                drbd_generic_make_request(mdev, fault_type, bio);
-
-               /* strip off REQ_HARDBARRIER,
-                * unless it is the first or last bio */
-               if (bios && bios->bi_next)
-                       bios->bi_rw &= ~REQ_HARDBARRIER;
        } while (bios);
        maybe_kick_lo(mdev);
        return 0;
@@ -1244,45 +1160,9 @@ fail:
        return -ENOMEM;
 }
 
-/**
- * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
- * @mdev:      DRBD device.
- * @w:         work object.
- * @cancel:    The connection will be closed anyways (unused in this callback)
- */
-int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
-{
-       struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
-       /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
-          (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
-          so that we can finish that epoch in drbd_may_finish_epoch().
-          That is necessary if we already have a long chain of Epochs, before
-          we realize that REQ_HARDBARRIER is actually not supported */
-
-       /* As long as the -ENOTSUPP on the barrier is reported immediately
-          that will never trigger. If it is reported late, we will just
-          print that warning and continue correctly for all future requests
-          with WO_bdev_flush */
-       if (previous_epoch(mdev, e->epoch))
-               dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
-
-       /* we still have a local reference,
-        * get_ldev was done in receive_Data. */
-
-       e->w.cb = e_end_block;
-       if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
-               /* drbd_submit_ee fails for one reason only:
-                * if was not able to allocate sufficient bios.
-                * requeue, try again later. */
-               e->w.cb = w_e_reissue;
-               drbd_queue_work(&mdev->data.work, &e->w);
-       }
-       return 1;
-}
-
 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       int rv, issue_flush;
+       int rv;
        struct p_barrier *p = &mdev->data.rbuf.barrier;
        struct drbd_epoch *epoch;
 
@@ -1300,44 +1180,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
         * Therefore we must send the barrier_ack after the barrier request was
         * completed. */
        switch (mdev->write_ordering) {
-       case WO_bio_barrier:
        case WO_none:
                if (rv == FE_RECYCLED)
                        return TRUE;
-               break;
+
+               /* receiver context, in the writeout path of the other node.
+                * avoid potential distributed deadlock */
+               epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+               if (epoch)
+                       break;
+               else
+                       dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+                       /* Fall through */
 
        case WO_bdev_flush:
        case WO_drain_io:
-               if (rv == FE_STILL_LIVE) {
-                       set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
-                       drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-                       rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
-               }
-               if (rv == FE_RECYCLED)
-                       return TRUE;
-
-               /* The asender will send all the ACKs and barrier ACKs out, since
-                  all EEs moved from the active_ee to the done_ee. We need to
-                  provide a new epoch object for the EEs that come in soon */
-               break;
-       }
-
-       /* receiver context, in the writeout path of the other node.
-        * avoid potential distributed deadlock */
-       epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
-       if (!epoch) {
-               dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
-               issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
                drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-               if (issue_flush) {
-                       rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
-                       if (rv == FE_RECYCLED)
-                               return TRUE;
+               drbd_flush(mdev);
+
+               if (atomic_read(&mdev->current_epoch->epoch_size)) {
+                       epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+                       if (epoch)
+                               break;
                }
 
-               drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
+               epoch = mdev->current_epoch;
+               wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
+
+               D_ASSERT(atomic_read(&epoch->active) == 0);
+               D_ASSERT(epoch->flags == 0);
 
                return TRUE;
+       default:
+               dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
+               return FALSE;
        }
 
        epoch->flags = 0;
@@ -1652,15 +1528,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 {
        struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
        sector_t sector = e->sector;
-       struct drbd_epoch *epoch;
        int ok = 1, pcmd;
 
-       if (e->flags & EE_IS_BARRIER) {
-               epoch = previous_epoch(mdev, e->epoch);
-               if (epoch)
-                       drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
-       }
-
        if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
                if (likely((e->flags & EE_WAS_ERROR) == 0)) {
                        pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1686,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        e->epoch = mdev->current_epoch;
        atomic_inc(&e->epoch->epoch_size);
        atomic_inc(&e->epoch->active);
-
-       if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
-               struct drbd_epoch *epoch;
-               /* Issue a barrier if we start a new epoch, and the previous epoch
-                  was not a epoch containing a single request which already was
-                  a Barrier. */
-               epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
-               if (epoch == e->epoch) {
-                       set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                       rw |= REQ_HARDBARRIER;
-                       e->flags |= EE_IS_BARRIER;
-               } else {
-                       if (atomic_read(&epoch->epoch_size) > 1 ||
-                           !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
-                               set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
-                               set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                               rw |= REQ_HARDBARRIER;
-                               e->flags |= EE_IS_BARRIER;
-                       }
-               }
-       }
        spin_unlock(&mdev->epoch_lock);
 
        dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1843,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                break;
        }
 
-       if (mdev->state.pdsk == D_DISKLESS) {
+       if (mdev->state.pdsk < D_INCONSISTENT) {
                /* In case we have the only disk of the cluster, */
                drbd_set_out_of_sync(mdev, e->sector, e->size);
                e->flags |= EE_CALL_AL_COMPLETE_IO;
+               e->flags &= ~EE_MAY_SET_IN_SYNC;
                drbd_al_begin_io(mdev, e->sector);
        }
 
@@ -3362,7 +3211,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                if (ns.conn == C_MASK) {
                        ns.conn = C_CONNECTED;
                        if (mdev->state.disk == D_NEGOTIATING) {
-                               drbd_force_state(mdev, NS(disk, D_DISKLESS));
+                               drbd_force_state(mdev, NS(disk, D_FAILED));
                        } else if (peer_state.disk == D_NEGOTIATING) {
                                dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
                                peer_state.disk = D_DISKLESS;
index 9e91a2545fc869273d39ca9737904e56d5f203b9..11a75d32a2e27f0d78c26b511921780f4d336264 100644 (file)
@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
                if (!hlist_unhashed(&req->colision))
                        hlist_del(&req->colision);
                else
-                       D_ASSERT((s & RQ_NET_MASK) == 0);
+                       D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
 
                /* for writes we need to do some extra housekeeping */
                if (rw == WRITE)
@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
                             mdev->state.conn >= C_CONNECTED));
 
        if (!(local || remote) && !is_susp(mdev->state)) {
-               dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
+               if (__ratelimit(&drbd_ratelimit_state))
+                       dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
                goto fail_free_complete;
        }
 
@@ -942,12 +943,21 @@ allocate_barrier:
        if (local) {
                req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
 
-               if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
-                                    : rw == READ  ? DRBD_FAULT_DT_RD
-                                    :               DRBD_FAULT_DT_RA))
+               /* State may have changed since we grabbed our reference on the
+                * mdev->ldev member. Double check, and short-circuit to endio.
+                * In case the last activity log transaction failed to get on
+                * stable storage, and this is a WRITE, we may not even submit
+                * this bio. */
+               if (get_ldev(mdev)) {
+                       if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
+                                            : rw == READ  ? DRBD_FAULT_DT_RD
+                                            :               DRBD_FAULT_DT_RA))
+                               bio_endio(req->private_bio, -EIO);
+                       else
+                               generic_make_request(req->private_bio);
+                       put_ldev(mdev);
+               } else
                        bio_endio(req->private_bio, -EIO);
-               else
-                       generic_make_request(req->private_bio);
        }
 
        /* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
                return 0;
        }
 
-       /* Reject barrier requests if we know the underlying device does
-        * not support them.
-        * XXX: Need to get this info from peer as well some how so we
-        * XXX: reject if EITHER side/data/metadata area does not support them.
-        *
-        * because of those XXX, this is not yet enabled,
-        * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
-        */
-       if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
-               /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
-               bio_endio(bio, -EOPNOTSUPP);
-               return 0;
-       }
-
        /*
         * what we "blindly" assume:
         */
index 108d58015cd119a6e85873095acab12ab011a6cc..b0551ba7ad0c9355a36621d3a6405ba76a8ab9d0 100644 (file)
@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
        put_ldev(mdev);
 }
 
-static int is_failed_barrier(int ee_flags)
-{
-       return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
-                       == (EE_IS_BARRIER|EE_WAS_ERROR);
-}
-
 /* writes on behalf of the partner, or resync writes,
  * "submitted" by the receiver, final stage.  */
 static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
        int is_syncer_req;
        int do_al_complete_io;
 
-       /* if this is a failed barrier request, disable use of barriers,
-        * and schedule for resubmission */
-       if (is_failed_barrier(e->flags)) {
-               drbd_bump_write_ordering(mdev, WO_bdev_flush);
-               spin_lock_irqsave(&mdev->req_lock, flags);
-               list_del(&e->w.list);
-               e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
-               e->w.cb = w_e_reissue;
-               /* put_ldev actually happens below, once we come here again. */
-               __release(local);
-               spin_unlock_irqrestore(&mdev->req_lock, flags);
-               drbd_queue_work(&mdev->data.work, &e->w);
-               return;
-       }
-
        D_ASSERT(e->block_id != ID_VACANT);
 
        /* after we moved e to done_ee,
@@ -925,7 +904,7 @@ out:
        drbd_md_sync(mdev);
 
        if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
-               dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
+               dev_info(DEV, "Writing the whole bitmap\n");
                drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
        }
 
index 1e5284ef65fa47313d4a6609526b6dfecf00615e..7ea0bea2f7e3f7a0ecc2a2d4b5df15a9b5a7cccc 100644 (file)
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        if (bio_rw(bio) == WRITE) {
                struct file *file = lo->lo_backing_file;
 
-               /* REQ_HARDBARRIER is deprecated */
-               if (bio->bi_rw & REQ_HARDBARRIER) {
-                       ret = -EOPNOTSUPP;
-                       goto out;
-               }
-
                if (bio->bi_rw & REQ_FLUSH) {
                        ret = vfs_fsync(file, 0);
                        if (unlikely(ret && ret != -EINVAL)) {
index 06e2812ba12405dc0a63eea531943b53c4aa067b..255035cfc88ab8ff3a279578487facd6d17a113f 100644 (file)
@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)
 
        ring_req->operation = rq_data_dir(req) ?
                BLKIF_OP_WRITE : BLKIF_OP_READ;
-       if (req->cmd_flags & REQ_HARDBARRIER)
-               ring_req->operation = BLKIF_OP_WRITE_BARRIER;
 
        ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
        BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
index 6b6760ea2435bac1865325aec74d579f9687f87e..9272c38dd3c6ce8a66e9e0d2b0a5085ddd2973a7 100644 (file)
@@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
        unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
        u32 pte_flags;
 
-       if (type_mask == AGP_USER_UNCACHED_MEMORY)
+       if (type_mask == AGP_USER_MEMORY)
                pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
        else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
                if (gfdt)
                        pte_flags |= GEN6_PTE_GFDT;
        } else { /* set 'normal'/'cached' to LLC by default */
-               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
                if (gfdt)
                        pte_flags |= GEN6_PTE_GFDT;
        }
index b0a70461a12c9c19269b8fe6c443251960c131b1..c0bd6f472c523a377f4d6efef8d90f8f6fbfe373 100644 (file)
@@ -1299,7 +1299,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
 {
        struct async_struct * info = tty->driver_data;
        struct async_icount cprev, cnow;        /* kernel counter temps */
-       struct serial_icounter_struct icount;
        void __user *argp = (void __user *)arg;
        unsigned long flags;
 
index dd3f9b1f11b4f59685853873977b3913daf3b0a2..294d03e8c61a6a249b6008878efd08386c86d0a9 100644 (file)
@@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
                      unsigned int cmd, unsigned long arg)
 {
        struct port *port = tty->driver_data;
-       void __user *argp = (void __user *)arg;
        int rval = -ENOIOCTLCMD;
 
        DBG1("******** IOCTL, cmd: %d", cmd);
index bfc10f89d9511d8f26367329c5b7658e8a6b526c..eaa41992fbe234a17bd1d102e05fd39229e0ac5d 100644 (file)
@@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = {
        .hangup = mgslpc_hangup,
        .tiocmget = tiocmget,
        .tiocmset = tiocmset,
+       .get_icount = mgslpc_get_icount,
        .proc_fops = &mgslpc_proc_fops,
 };
 
index dcbeb98f195a7addf665e0134af9ee799280a279..f7af91cb273d58232d41c68b712b2d6d4404671c 100644 (file)
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
        struct drm_crtc *tmp;
        int crtc_mask = 1;
 
-       WARN(!crtc, "checking null crtc?");
+       WARN(!crtc, "checking null crtc?\n");
 
        dev = crtc->dev;
 
index c1a26217a5305ee65a07b85fa77e4c15db77436b..a245d17165ae28b4eab835d612c92e008e3ab5d4 100644 (file)
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
                        .addr   = DDC_ADDR,
                        .flags  = I2C_M_RD,
                        .len    = len,
-                       .buf    = buf + start,
+                       .buf    = buf,
                }
        };
 
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
 static u8 *
 drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-       int i, j = 0;
+       int i, j = 0, valid_extensions = 0;
        u8 *block, *new;
 
        if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 
        for (j = 1; j <= block[0x7e]; j++) {
                for (i = 0; i < 4; i++) {
-                       if (drm_do_probe_ddc_edid(adapter, block, j,
-                                                 EDID_LENGTH))
+                       if (drm_do_probe_ddc_edid(adapter,
+                                 block + (valid_extensions + 1) * EDID_LENGTH,
+                                 j, EDID_LENGTH))
                                goto out;
-                       if (drm_edid_block_valid(block + j * EDID_LENGTH))
+                       if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+                               valid_extensions++;
                                break;
+                       }
                }
                if (i == 4)
-                       goto carp;
+                       dev_warn(connector->dev->dev,
+                        "%s: Ignoring invalid EDID block %d.\n",
+                        drm_get_connector_name(connector), j);
+       }
+
+       if (valid_extensions != block[0x7e]) {
+               block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+               block[0x7e] = valid_extensions;
+               new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+               if (!new)
+                       goto out;
+               block = new;
        }
 
        return block;
index 3467dd420760fa51084a2ba00d097b90d6c214a2..80745f85902cf73fe17bf2ec41863dcb36a38550 100644 (file)
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
 unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+module_param_named(powersave, i915_powersave, int, 0600);
 
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
index 2c2c19b6285ecf331edbac9c53e0bdd44b2093ad..90414ae86afcd0758d49b2c70dfb39f613ab22f5 100644 (file)
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
 
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
index 8eb8453208b5ce491e77c1c06c77c1c0a2da41bc..ef188e391406a1b3a2763e92d578c8f9e03b28b3 100644 (file)
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 static int i915_ring_idle(struct drm_device *dev,
                          struct intel_ring_buffer *ring)
 {
-       if (list_empty(&ring->gpu_write_list))
+       if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
                return 0;
 
        i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
        int ret;
 
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      list_empty(&dev_priv->bsd_ring.active_list) &&
-                      list_empty(&dev_priv->blt_ring.active_list));
+                      list_empty(&dev_priv->mm.active_list));
        if (lists_empty)
                return 0;
 
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
         * write domain
         */
        if (obj->write_domain &&
-           obj->write_domain != obj->pending_read_domains) {
+           (obj->write_domain != obj->pending_read_domains ||
+            obj_priv->ring != ring)) {
                flush_domains |= obj->write_domain;
                invalidate_domains |=
                        obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
        return 0;
 }
 
+static int
+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
+                               struct drm_file *file,
+                               struct intel_ring_buffer *ring,
+                               struct drm_gem_object **objects,
+                               int count)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret, i;
+
+       /* Zero the global flush/invalidate flags. These
+        * will be modified as new domains are computed
+        * for each object
+        */
+       dev->invalidate_domains = 0;
+       dev->flush_domains = 0;
+       dev_priv->mm.flush_rings = 0;
+       for (i = 0; i < count; i++)
+               i915_gem_object_set_to_gpu_domain(objects[i], ring);
+
+       if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+                         __func__,
+                        dev->invalidate_domains,
+                        dev->flush_domains);
+#endif
+               i915_gem_flush(dev, file,
+                              dev->invalidate_domains,
+                              dev->flush_domains,
+                              dev_priv->mm.flush_rings);
+       }
+
+       for (i = 0; i < count; i++) {
+               struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+               /* XXX replace with semaphores */
+               if (obj->ring && ring != obj->ring) {
+                       ret = i915_gem_object_wait_rendering(&obj->base, true);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
        }
 
-       /* Zero the global flush/invalidate flags. These
-        * will be modified as new domains are computed
-        * for each object
-        */
-       dev->invalidate_domains = 0;
-       dev->flush_domains = 0;
-       dev_priv->mm.flush_rings = 0;
-
-       for (i = 0; i < args->buffer_count; i++) {
-               struct drm_gem_object *obj = object_list[i];
-
-               /* Compute new gpu domains and update invalidate/flush */
-               i915_gem_object_set_to_gpu_domain(obj, ring);
-       }
-
-       if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
-               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-                         __func__,
-                        dev->invalidate_domains,
-                        dev->flush_domains);
-#endif
-               i915_gem_flush(dev, file,
-                              dev->invalidate_domains,
-                              dev->flush_domains,
-                              dev_priv->mm.flush_rings);
-       }
+       ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
+                                             object_list, args->buffer_count);
+       if (ret)
+               goto err;
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
                        alignment = i915_gem_get_gtt_alignment(obj);
                if (obj_priv->gtt_offset & (alignment - 1)) {
                        WARN(obj_priv->pin_count,
-                            "bo is already pinned with incorrect alignment:"
-                            " offset=%x, req.alignment=%x\n",
+                            "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
                             obj_priv->gtt_offset, alignment);
                        ret = i915_gem_object_unbind(obj);
                        if (ret)
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                     struct drm_file *file_priv)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       void *obj_addr;
-       int ret;
-       char __user *user_data;
+       void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+       char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
-       user_data = (char __user *) (uintptr_t) args->data_ptr;
-       obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+       DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
 
-       DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
-       ret = copy_from_user(obj_addr, user_data, args->size);
-       if (ret)
-               return -EFAULT;
+       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+               unsigned long unwritten;
+
+               /* The physical object once assigned is fixed for the lifetime
+                * of the obj, so we can safely drop the lock and continue
+                * to access vaddr.
+                */
+               mutex_unlock(&dev->struct_mutex);
+               unwritten = copy_from_user(vaddr, user_data, args->size);
+               mutex_lock(&dev->struct_mutex);
+               if (unwritten)
+                       return -EFAULT;
+       }
 
        drm_agp_chipset_flush(dev);
        return 0;
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
        int lists_empty;
 
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->render_ring.active_list) &&
-                     list_empty(&dev_priv->bsd_ring.active_list) &&
-                     list_empty(&dev_priv->blt_ring.active_list);
+                     list_empty(&dev_priv->mm.active_list);
 
        return !lists_empty;
 }
index 43a4013f53fa24e212849af68940384b777d30a1..d8ae7d1d0cc671d9d59393fd2d11ddff317b322f 100644 (file)
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      list_empty(&dev_priv->bsd_ring.active_list) &&
-                      list_empty(&dev_priv->blt_ring.active_list));
+                      list_empty(&dev_priv->mm.active_list));
        if (lists_empty)
                return -ENOSPC;
 
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      list_empty(&dev_priv->bsd_ring.active_list) &&
-                      list_empty(&dev_priv->blt_ring.active_list));
+                      list_empty(&dev_priv->mm.active_list));
        BUG_ON(!lists_empty);
 
        return 0;
index 989c19d2d959b6bc6c54a0e4bf31d9aee093efe6..454c064f8ef7f8d0daffe0ed40394354b152ee55 100644 (file)
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
        /* Clock gating state */
        intel_init_clock_gating(dev);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (HAS_PCH_SPLIT(dev)) {
                ironlake_enable_drps(dev);
+               intel_init_emon(dev);
+       }
 
        /* Cache mode state */
        I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
index 990f065374b22eacfc2fcddd68c873a5081ee773..48d8fd686ea91d9fb3ba307036ecd0f5388174e9 100644 (file)
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
        udelay(500);
 }
 
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       u32 reg, temp;
+
+       /* enable normal train */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+       I915_WRITE(reg, temp);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_NONE;
+       }
+       I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+       /* wait one idle pattern time */
+       POSTING_READ(reg);
+       udelay(1000);
+}
+
 /* The FDI link training functions for ILK/Ibexpeak. */
 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 {
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 
        DRM_DEBUG_KMS("FDI train done\n");
 
-       /* enable normal train */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
-       I915_WRITE(reg, temp);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       if (HAS_PCH_CPT(dev)) {
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-       } else {
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_NONE;
-       }
-       I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
-       /* wait one idle pattern time */
-       POSTING_READ(reg);
-       udelay(1000);
 }
 
 static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
        I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
 
+       intel_fdi_normal_train(crtc);
+
        /* For PCH DP, enable TRANS_DP_CTL */
        if (HAS_PCH_CPT(dev) &&
            intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        udelay(100);
 
        /* Ironlake workaround, disable clock pointer after downing FDI */
-       I915_WRITE(FDI_RX_CHICKEN(pipe),
-                  I915_READ(FDI_RX_CHICKEN(pipe) &
-                            ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+       if (HAS_PCH_IBX(dev))
+               I915_WRITE(FDI_RX_CHICKEN(pipe),
+                          I915_READ(FDI_RX_CHICKEN(pipe) &
+                                    ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
 
        /* still set train pattern 1 */
        reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev)
        fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
        fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
                MEMMODE_FSTART_SHIFT;
-       fstart = fmax;
 
        vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
                PXVFREQ_PX_SHIFT;
 
-       dev_priv->fmax = fstart; /* IPS callback will increase this */
+       dev_priv->fmax = fmax; /* IPS callback will increase this */
        dev_priv->fstart = fstart;
 
-       dev_priv->max_delay = fmax;
+       dev_priv->max_delay = fstart;
        dev_priv->min_delay = fmin;
        dev_priv->cur_delay = fstart;
 
-       DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
-                        fstart);
+       DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+                        fmax, fmin, fstart);
 
        I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
index 891f4f1d63b11570b7ede1c4e5e49814f2dcb5d0..c8e005553310a5eaeae98e4cb8ed349b4b204164 100644 (file)
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
                        status = connector_status_connected;
        }
 
-       return bit;
+       return status;
 }
 
 /**
index 9af9f86a8765c82833706aa1320ca9baed3fa416..21551fe745416abb4597341b18d647f2529e85ce 100644 (file)
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 extern void intel_init_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                                      struct drm_gem_object *obj,
index f1a649990ea9e61d5f7ff6f8b15e127d6fcc2f17..4324a326f98ee28f4d67936b7a4789556344c4fb 100644 (file)
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode;
 
-       if (intel_lvds->edid) {
-               drm_mode_connector_update_edid_property(connector,
-                                                       intel_lvds->edid);
+       if (intel_lvds->edid)
                return drm_add_edid_modes(connector, intel_lvds->edid);
-       }
 
        mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
        if (mode == 0)
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
         */
        intel_lvds->edid = drm_get_edid(connector,
                                        &dev_priv->gmbus[pin].adapter);
-
+       if (intel_lvds->edid) {
+               if (drm_add_edid_modes(connector,
+                                      intel_lvds->edid)) {
+                       drm_mode_connector_update_edid_property(connector,
+                                                               intel_lvds->edid);
+               } else {
+                       kfree(intel_lvds->edid);
+                       intel_lvds->edid = NULL;
+               }
+       }
        if (!intel_lvds->edid) {
                /* Didn't get an EDID, so
                 * Set wide sync ranges so we get all modes
index 917c7dc3cd6b33ee151ad2e960743fe35016cec0..9b0d9a867aeada1f1c9d601e21f76e2d9e4e7351 100644 (file)
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
        return 0;
 
 err_out:
-       iounmap(opregion->header);
+       iounmap(base);
        return err;
 }
index afb96d25219afe473b116802f30616c0edb1cf86..02ff0a481f470cea6e3f4f109805e07fbd84725e 100644 (file)
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
 {
        int uv_hscale = uv_hsubsampling(rec->flags);
        int uv_vscale = uv_vsubsampling(rec->flags);
-       u32 stride_mask, depth, tmp;
+       u32 stride_mask;
+       int depth;
+       u32 tmp;
 
        /* check src dimensions */
        if (IS_845G(dev) || IS_I830(dev)) {
index 09f2dc353ae239f0d2a6f86a5940c1d7b27656e4..b83306f9244b6c887d84976b4f1801449445941a 100644 (file)
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
 
        I915_WRITE_CTL(ring,
                        ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
-                       | RING_NO_REPORT | RING_VALID);
+                       | RING_REPORT_64K | RING_VALID);
 
        head = I915_READ_HEAD(ring) & HEAD_ADDR;
        /* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
        i915_gem_object_unpin(ring->gem_object);
        drm_gem_object_unreference(ring->gem_object);
        ring->gem_object = NULL;
+
+       if (ring->cleanup)
+               ring->cleanup(ring);
+
        cleanup_status_page(dev, ring);
 }
 
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
 {
        unsigned long end;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 head;
+
+       head = intel_read_status_page(ring, 4);
+       if (head) {
+               ring->head = head & HEAD_ADDR;
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+               if (ring->space >= n)
+                       return 0;
+       }
 
        trace_i915_ring_wait_begin (dev);
        end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
        /* do nothing */
 }
 
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+       (IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+       return ring->private;
+}
+
+static int blt_ring_init(struct drm_device *dev,
+                        struct intel_ring_buffer *ring)
+{
+       if (NEED_BLT_WORKAROUND(dev)) {
+               struct drm_i915_gem_object *obj;
+               u32 __iomem *ptr;
+               int ret;
+
+               obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+               if (obj == NULL)
+                       return -ENOMEM;
+
+               ret = i915_gem_object_pin(&obj->base, 4096);
+               if (ret) {
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ptr = kmap(obj->pages[0]);
+               iowrite32(MI_BATCH_BUFFER_END, ptr);
+               iowrite32(MI_NOOP, ptr+1);
+               kunmap(obj->pages[0]);
+
+               ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+               if (ret) {
+                       i915_gem_object_unpin(&obj->base);
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ring->private = obj;
+       }
+
+       return init_ring_common(dev, ring);
+}
+
+static void blt_ring_begin(struct drm_device *dev,
+                          struct intel_ring_buffer *ring,
+                         int num_dwords)
+{
+       if (ring->private) {
+               intel_ring_begin(dev, ring, num_dwords+2);
+               intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+               intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+       } else
+               intel_ring_begin(dev, ring, 4);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+                          struct intel_ring_buffer *ring,
+                          u32 invalidate_domains,
+                          u32 flush_domains)
+{
+       blt_ring_begin(dev, ring, 4);
+       intel_ring_emit(dev, ring, MI_FLUSH_DW);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+                    struct intel_ring_buffer *ring,
+                    u32 flush_domains)
+{
+       u32 seqno = i915_gem_get_seqno(dev);
+
+       blt_ring_begin(dev, ring, 4);
+       intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(dev, ring,
+                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(dev, ring, seqno);
+       intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+       intel_ring_advance(dev, ring);
+
+       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+       return seqno;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+       if (!ring->private)
+               return;
+
+       i915_gem_object_unpin(ring->private);
+       drm_gem_object_unreference(ring->private);
+       ring->private = NULL;
+}
+
 static const struct intel_ring_buffer gen6_blt_ring = {
        .name                   = "blt ring",
        .id                     = RING_BLT,
        .mmio_base              = BLT_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
+       .init                   = blt_ring_init,
        .write_tail             = ring_write_tail,
-       .flush                  = gen6_ring_flush,
-       .add_request            = ring_add_request,
+       .flush                  = blt_ring_flush,
+       .add_request            = blt_ring_add_request,
        .get_seqno              = ring_status_page_get_seqno,
        .user_irq_get           = blt_ring_get_user_irq,
        .user_irq_put           = blt_ring_put_user_irq,
        .dispatch_gem_execbuffer        = gen6_ring_dispatch_gem_execbuffer,
+       .cleanup                        = blt_ring_cleanup,
 };
 
 int intel_init_render_ring_buffer(struct drm_device *dev)
index a05aff0e5764d67e421a5a4995f2b0b92dfcd925..3126c2681983e21ba729d9ca599d104f4f1323de 100644 (file)
@@ -63,6 +63,7 @@ struct  intel_ring_buffer {
                        struct drm_i915_gem_execbuffer2 *exec,
                        struct drm_clip_rect *cliprects,
                        uint64_t exec_offset);
+       void            (*cleanup)(struct intel_ring_buffer *ring);
 
        /**
         * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct  intel_ring_buffer {
 
        wait_queue_head_t irq_queue;
        drm_local_map_t map;
+
+       void *private;
 };
 
 static inline u32
index f12a5b3ec050320505c6dc0b1cb44dcdde3fbeed..488c36c8f5e6069ebbb6cf5f9b830747a61c062e 100644 (file)
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
        u32 grbm_int_cntl = 0;
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                return -EINVAL;
        }
        /* don't enable anything if the ih is disabled */
@@ -2295,6 +2295,7 @@ restart_ih:
                        case 0: /* D1 vblank */
                                if (disp_int & LB_D1_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int &= ~LB_D1_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2317,7 @@ restart_ih:
                        case 0: /* D2 vblank */
                                if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2339,7 @@ restart_ih:
                        case 0: /* D3 vblank */
                                if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2361,7 @@ restart_ih:
                        case 0: /* D4 vblank */
                                if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2383,7 @@ restart_ih:
                        case 0: /* D5 vblank */
                                if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2405,7 @@ restart_ih:
                        case 0: /* D6 vblank */
                                if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D6 vblank\n");
index 0e8f28a689271c88a88d3865ead4890089049dca..8e10aa9f74b052a25e86d04e65173c8a7a19eb76 100644 (file)
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.ram.ptr) {
-               WARN(1, "R100 PCI GART already initialized.\n");
+               WARN(1, "R100 PCI GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
        uint32_t tmp = 0;
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
index 34527e600fe94f91ab33da78285b695afbb6cd6e..cde1d3480d932c55ae33855c3a2336eadfe9ab54 100644 (file)
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.vram.robj) {
-               WARN(1, "RV370 PCIE GART already initialized.\n");
+               WARN(1, "RV370 PCIE GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
index 33952a12f0a31a49986a248a39c38cb48cdf618c..0f806cc7dc75f02f5bb00f98fc0d65b662aad688 100644 (file)
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
 {
        u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
                ASIC_T_SHIFT;
-       u32 actual_temp = 0;
 
-       if ((temp >> 7) & 1)
-               actual_temp = 0;
-       else
-               actual_temp = (temp >> 1) & 0xff;
-
-       return actual_temp * 1000;
+       return temp * 1000;
 }
 
 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.vram.robj) {
-               WARN(1, "R600 PCIE GART already initialized.\n");
+               WARN(1, "R600 PCIE GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 hdmi1, hdmi2;
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                return -EINVAL;
        }
        /* don't enable anything if the ih is disabled */
index 04cac7ec90397fa7f4f46f9d966fb41b2d3c5368..87ead090c7d5a5195af6cf8d15be5a5809ff9678 100644 (file)
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
        if (crev < 2)
                return false;
 
-       router.valid = false;
-
        obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
        path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
            (ctx->bios + data_offset +
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
                                continue;
 
+                       router.ddc_valid = false;
+                       router.cd_valid = false;
                        for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
                                uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
 
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                                 usDeviceTag));
 
                                } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
-                                       router.valid = false;
                                        for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
-                                               u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+                                               u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
                                                if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
                                                        ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
                                                                (ctx->bios + data_offset +
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                        ATOM_I2C_RECORD *i2c_record;
                                                        ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
                                                        ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+                                                       ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
                                                        ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
                                                                (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
                                                                (ctx->bios + data_offset +
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                                case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
                                                                        ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
                                                                                record;
-                                                                       router.valid = true;
-                                                                       router.mux_type = ddc_path->ucMuxType;
-                                                                       router.mux_control_pin = ddc_path->ucMuxControlPin;
-                                                                       router.mux_state = ddc_path->ucMuxState[enum_id];
+                                                                       router.ddc_valid = true;
+                                                                       router.ddc_mux_type = ddc_path->ucMuxType;
+                                                                       router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+                                                                       router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+                                                                       break;
+                                                               case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+                                                                       cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+                                                                               record;
+                                                                       router.cd_valid = true;
+                                                                       router.cd_mux_type = cd_path->ucMuxType;
+                                                                       router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+                                                                       router.cd_mux_state = cd_path->ucMuxState[enum_id];
                                                                        break;
                                                                }
                                                                record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
        size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
        struct radeon_router router;
 
-       router.valid = false;
+       router.ddc_valid = false;
+       router.cd_valid = false;
 
        bios_connectors = kzalloc(bc_size, GFP_KERNEL);
        if (!bios_connectors)
index 4dac4b0a02eea1928837c7009f88e84f227a81f5..fe6c74780f18e67bb55ea5fdeefdbb038ffadbf1 100644 (file)
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
                                        continue;
 
                                if (priority == true) {
-                                       DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
-                                       DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
+                                       DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+                                       DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
                                        conflict->status = connector_status_disconnected;
                                        radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
                                } else {
-                                       DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
-                                       DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
+                                       DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+                                       DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
                                        current_status = connector_status_disconnected;
                                }
                                break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
                            mode->vdisplay == native_mode->vdisplay) {
                                *native_mode = *mode;
                                drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
-                               DRM_INFO("Determined LVDS native mode details from EDID\n");
+                               DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
                                break;
                        }
                }
        }
        if (!native_mode->clock) {
-               DRM_INFO("No LVDS native mode details, disabling RMX\n");
+               DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
                radeon_encoder->rmx_type = RMX_OFF;
        }
 }
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                radeon_connector->shared_ddc = true;
                                shared_ddc = true;
                        }
-                       if (radeon_connector->router_bus && router->valid &&
+                       if (radeon_connector->router_bus && router->ddc_valid &&
                            (radeon_connector->router.router_id == router->router_id)) {
                                radeon_connector->shared_ddc = false;
                                shared_ddc = false;
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev,
        radeon_connector->connector_object_id = connector_object_id;
        radeon_connector->hpd = *hpd;
        radeon_connector->router = *router;
-       if (router->valid) {
+       if (router->ddc_valid || router->cd_valid) {
                radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
                if (!radeon_connector->router_bus)
                        goto failed;
index 0383631da69c42db417f27f6b87ddf58f19eb24e..1df4dc6c063cc44e0e7e943da9b62e6c85d27d38 100644 (file)
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
                                 radeon_connector->ddc_bus->rec.en_data_reg,
                                 radeon_connector->ddc_bus->rec.y_clk_reg,
                                 radeon_connector->ddc_bus->rec.y_data_reg);
-                       if (radeon_connector->router_bus)
+                       if (radeon_connector->router.ddc_valid)
                                DRM_INFO("  DDC Router 0x%x/0x%x\n",
-                                        radeon_connector->router.mux_control_pin,
-                                        radeon_connector->router.mux_state);
+                                        radeon_connector->router.ddc_mux_control_pin,
+                                        radeon_connector->router.ddc_mux_state);
+                       if (radeon_connector->router.cd_valid)
+                               DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+                                        radeon_connector->router.cd_mux_control_pin,
+                                        radeon_connector->router.cd_mux_state);
                } else {
                        if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
                            connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
        int ret = 0;
 
        /* on hw with routers, select right port */
-       if (radeon_connector->router.valid)
-               radeon_router_select_port(radeon_connector);
+       if (radeon_connector->router.ddc_valid)
+               radeon_router_select_ddc_port(radeon_connector);
 
        if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
            (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
        int ret = 0;
 
        /* on hw with routers, select right port */
-       if (radeon_connector->router.valid)
-               radeon_router_select_port(radeon_connector);
+       if (radeon_connector->router.ddc_valid)
+               radeon_router_select_ddc_port(radeon_connector);
 
        if (!radeon_connector->ddc_bus)
                return -1;
index ae58b6849a2eaa4fd259b88f3d43d2497733161b..f678257c42e6ab54f668951b93e685cf04d287d5 100644 (file)
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
 static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
        if (radeon_encoder->active_device &
            (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
        radeon_atom_output_lock(encoder, true);
        radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
+       /* select the clock/data port if it uses a router */
+       if (connector) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               if (radeon_connector->router.cd_valid)
+                       radeon_router_select_cd_port(radeon_connector);
+       }
+
        /* this is needed for the pll/ss setup to work correctly in some cases */
        atombios_set_encoder_crtc_source(encoder);
 }
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig;
+
+       /* check for pre-DCE3 cards with shared encoders;
+        * can't really use the links individually, so don't disable
+        * the encoder if it's in use by another connector
+        */
+       if (!ASIC_IS_DCE3(rdev)) {
+               struct drm_encoder *other_encoder;
+               struct radeon_encoder *other_radeon_encoder;
+
+               list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+                       other_radeon_encoder = to_radeon_encoder(other_encoder);
+                       if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+                           drm_helper_encoder_in_use(other_encoder))
+                               goto disable_done;
+               }
+       }
+
        radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
        switch (radeon_encoder->encoder_id) {
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
                break;
        }
 
+disable_done:
        if (radeon_encoder_is_digital(encoder)) {
                if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
                        r600_hdmi_disable(encoder);
index 216392d0353bcace87cd2a80d06dda358e285ee4..daacb281dfafea57f26ac258dfbdca3c6d0c0767 100644 (file)
@@ -240,7 +240,8 @@ retry:
                 */
                if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
                        /* good news we believe it's a lockup */
-                       WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+                       WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+                            fence->seq, seq);
                        /* FIXME: what should we do ? marking everyone
                         * as signaled for now
                         */
index 6a13ee38a5b9fc71201a0a9a38dd93b3ab5957eb..0cfbba02c4d03773df29eb07efe74010a989002b 100644 (file)
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
        };
 
        /* on hw with routers, select right port */
-       if (radeon_connector->router.valid)
-               radeon_router_select_port(radeon_connector);
+       if (radeon_connector->router.ddc_valid)
+               radeon_router_select_ddc_port(radeon_connector);
 
        ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
        if (ret == 2)
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
                          addr, val);
 }
 
-/* router switching */
-void radeon_router_select_port(struct radeon_connector *radeon_connector)
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
 {
        u8 val;
 
-       if (!radeon_connector->router.valid)
+       if (!radeon_connector->router.ddc_valid)
                return;
 
        radeon_i2c_get_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x3, &val);
-       val &= radeon_connector->router.mux_control_pin;
+       val &= ~radeon_connector->router.ddc_mux_control_pin;
        radeon_i2c_put_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x3, val);
        radeon_i2c_get_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x1, &val);
-       val &= radeon_connector->router.mux_control_pin;
-       val |= radeon_connector->router.mux_state;
+       val &= ~radeon_connector->router.ddc_mux_control_pin;
+       val |= radeon_connector->router.ddc_mux_state;
+       radeon_i2c_put_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+       u8 val;
+
+       if (!radeon_connector->router.cd_valid)
+               return;
+
+       radeon_i2c_get_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x3, &val);
+       val &= ~radeon_connector->router.cd_mux_control_pin;
+       radeon_i2c_put_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x3, val);
+       radeon_i2c_get_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x1, &val);
+       val &= ~radeon_connector->router.cd_mux_control_pin;
+       val |= radeon_connector->router.cd_mux_state;
        radeon_i2c_put_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x1, val);
index 92457163d07094dba9bd5975dc63dfb4968e92a6..680f57644e865ce2a5e4fdc97b5154a7dffb11a5 100644 (file)
@@ -401,13 +401,19 @@ struct radeon_hpd {
 };
 
 struct radeon_router {
-       bool valid;
        u32 router_id;
        struct radeon_i2c_bus_rec i2c_info;
        u8 i2c_addr;
-       u8 mux_type;
-       u8 mux_control_pin;
-       u8 mux_state;
+       /* i2c mux */
+       bool ddc_valid;
+       u8 ddc_mux_type;
+       u8 ddc_mux_control_pin;
+       u8 ddc_mux_state;
+       /* clock/data mux */
+       bool cd_valid;
+       u8 cd_mux_type;
+       u8 cd_mux_control_pin;
+       u8 cd_mux_state;
 };
 
 struct radeon_connector {
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
                                u8 slave_addr,
                                u8 addr,
                                u8 val);
-extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
 extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
index d7ab914164103bdda7bf9ea72e8e35a2c78a5f37..8eb183466015326f7ca93b2647e85a610fe91f6f 100644 (file)
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
                type = ttm_bo_type_device;
        }
        *bo_ptr = NULL;
+
+retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
        bo->gobj = gobj;
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
-
-retry:
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        mutex_lock(&rdev->vram_mutex);
index fe95bb35317ea26c2cabe33fc0bae285bfc32cef..01c2c736a1daff116284db0f84f3896977e6eecb 100644 (file)
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
        gtt = container_of(backend, struct radeon_ttm_backend, backend);
        gtt->offset = bo_mem->start << PAGE_SHIFT;
        if (!gtt->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+                    gtt->num_pages, bo_mem, backend);
        }
        r = radeon_gart_bind(gtt->rdev, gtt->offset,
                             gtt->num_pages, gtt->pages);
index f683e51a2a0674518b476d6a2157b39a303170a6..5512e4e5e636ebb91ea73e99d63a4e60bb95bbe6 100644 (file)
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.ram.ptr) {
-               WARN(1, "RS400 GART already initialized.\n");
+               WARN(1, "RS400 GART already initialized\n");
                return 0;
        }
        /* Check gart size */
index b091a1f6fa4ed2b86dc1975717abbf017667c251..f1c6e02c2e6b41f18338688b1dc9d49b33dbe8bb 100644 (file)
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.vram.robj) {
-               WARN(1, "RS600 GART already initialized.\n");
+               WARN(1, "RS600 GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
                ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
index a1cb783c7131c56de8aa19cf5afb6a668e5e4345..3ca77dc03915017e78d0e7f7e8e479d9f279fc56 100644 (file)
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* Notes:
- *
- * We store bo pointer in drm_mm_node struct so we know which bo own a
- * specific node. There is no protection on the pointer, thus to make
- * sure things don't go berserk you have to access this pointer while
- * holding the global lru lock and make sure anytime you free a node you
- * reset the pointer to NULL.
- */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
 #include <linux/mm.h>
 #include <linux/file.h>
 #include <linux/module.h>
+#include <asm/atomic.h>
 
 #define TTM_ASSERT_LOCKED(param)
 #define TTM_DEBUG(fmt, arg...)
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
        ttm_bo_mem_put(bo, &bo->mem);
 
        atomic_set(&bo->reserved, 0);
+
+       /*
+        * Make processes trying to reserve really pick it up.
+        */
+       smp_mb__after_atomic_dec();
        wake_up_all(&bo->event_queue);
 }
 
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
        struct ttm_bo_driver *driver;
-       void *sync_obj;
+       void *sync_obj = NULL;
        void *sync_obj_arg;
        int put_count;
        int ret;
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                spin_lock(&glob->lru_lock);
        }
 queue:
-       sync_obj = bo->sync_obj;
-       sync_obj_arg = bo->sync_obj_arg;
        driver = bdev->driver;
+       if (bo->sync_obj)
+               sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       sync_obj_arg = bo->sync_obj_arg;
 
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
        spin_unlock(&glob->lru_lock);
        spin_unlock(&bo->lock);
 
-       if (sync_obj)
+       if (sync_obj) {
                driver->sync_obj_flush(sync_obj, sync_obj_arg);
+               driver->sync_obj_unref(&sync_obj);
+       }
        schedule_delayed_work(&bdev->wq,
                              ((HZ / 100) < 1) ? 1 : HZ / 100);
 }
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        int ret;
 
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                        return ret;
                if (mem->mm_node)
                        break;
-               spin_lock(&glob->lru_lock);
-               if (list_empty(&man->lru)) {
-                       spin_unlock(&glob->lru_lock);
-                       break;
-               }
-               spin_unlock(&glob->lru_lock);
                ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
                                                no_wait_reserve, no_wait_gpu);
                if (unlikely(ret != 0))
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement)
 {
-       int i;
+       BUG_ON((placement->fpfn || placement->lpfn) &&
+              (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
 
-       if (placement->fpfn || placement->lpfn) {
-               if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
-                       printk(KERN_ERR TTM_PFX "Page number range to small "
-                               "Need %lu pages, range is [%u, %u]\n",
-                               bo->mem.num_pages, placement->fpfn,
-                               placement->lpfn);
-                       return -EINVAL;
-               }
-       }
-       for (i = 0; i < placement->num_placement; i++) {
-               if (!capable(CAP_SYS_ADMIN)) {
-                       if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
-                               printk(KERN_ERR TTM_PFX "Need to be root to "
-                                       "modify NO_EVICT status.\n");
-                               return -EINVAL;
-                       }
-               }
-       }
-       for (i = 0; i < placement->num_busy_placement; i++) {
-               if (!capable(CAP_SYS_ADMIN)) {
-                       if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
-                               printk(KERN_ERR TTM_PFX "Need to be root to "
-                                       "modify NO_EVICT status.\n");
-                               return -EINVAL;
-                       }
-               }
-       }
        return 0;
 }
 
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (num_pages == 0) {
                printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+               if (destroy)
+                       (*destroy)(bo);
+               else
+                       kfree(bo);
                return -EINVAL;
        }
        bo->destroy = destroy;
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        int ret = -EINVAL;
        struct ttm_mem_type_manager *man;
 
-       if (type >= TTM_NUM_MEM_TYPES) {
-               printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
-               return ret;
-       }
-
+       BUG_ON(type >= TTM_NUM_MEM_TYPES);
        man = &bdev->man[type];
-       if (man->has_type) {
-               printk(KERN_ERR TTM_PFX
-                      "Memory manager already initialized for type %d\n",
-                      type);
-               return ret;
-       }
+       BUG_ON(man->has_type);
 
        ret = bdev->driver->init_mem_type(bdev, type, man);
        if (ret)
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 
        ret = 0;
        if (type != TTM_PL_SYSTEM) {
-               if (!p_size) {
-                       printk(KERN_ERR TTM_PFX
-                              "Zero size memory manager type %d\n",
-                              type);
-                       return ret;
-               }
-
                ret = (*man->func->init)(man, p_size);
                if (ret)
                        return ret;
index 7410c190c8911a9342ba57ea616d77a722061312..038e947d00f986ef987ef914fd8432f2202c25f3 100644 (file)
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
-#include <linux/jiffies.h>
+#include "drm_mm.h"
 #include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/file.h>
+#include <linux/spinlock.h>
 #include <linux/module.h>
 
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+       struct drm_mm mm;
+       spinlock_t lock;
+};
+
 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
                               struct ttm_buffer_object *bo,
                               struct ttm_placement *placement,
                               struct ttm_mem_reg *mem)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
-       struct drm_mm *mm = man->priv;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+       struct drm_mm *mm = &rman->mm;
        struct drm_mm_node *node = NULL;
        unsigned long lpfn;
        int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
                if (unlikely(ret))
                        return ret;
 
-               spin_lock(&glob->lru_lock);
+               spin_lock(&rman->lock);
                node = drm_mm_search_free_in_range(mm,
                                        mem->num_pages, mem->page_alignment,
                                        placement->fpfn, lpfn, 1);
                if (unlikely(node == NULL)) {
-                       spin_unlock(&glob->lru_lock);
+                       spin_unlock(&rman->lock);
                        return 0;
                }
                node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-                                                       mem->page_alignment,
-                                                       placement->fpfn,
-                                                       lpfn);
-               spin_unlock(&glob->lru_lock);
+                                                    mem->page_alignment,
+                                                    placement->fpfn,
+                                                    lpfn);
+               spin_unlock(&rman->lock);
        } while (node == NULL);
 
        mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
                                struct ttm_mem_reg *mem)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
        if (mem->mm_node) {
-               spin_lock(&glob->lru_lock);
+               spin_lock(&rman->lock);
                drm_mm_put_block(mem->mm_node);
-               spin_unlock(&glob->lru_lock);
+               spin_unlock(&rman->lock);
                mem->mm_node = NULL;
        }
 }
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
                           unsigned long p_size)
 {
-       struct drm_mm *mm;
+       struct ttm_range_manager *rman;
        int ret;
 
-       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-       if (!mm)
+       rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+       if (!rman)
                return -ENOMEM;
 
-       ret = drm_mm_init(mm, 0, p_size);
+       ret = drm_mm_init(&rman->mm, 0, p_size);
        if (ret) {
-               kfree(mm);
+               kfree(rman);
                return ret;
        }
 
-       man->priv = mm;
+       spin_lock_init(&rman->lock);
+       man->priv = rman;
        return 0;
 }
 
 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
-       struct drm_mm *mm = man->priv;
-       int ret = 0;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+       struct drm_mm *mm = &rman->mm;
 
-       spin_lock(&glob->lru_lock);
+       spin_lock(&rman->lock);
        if (drm_mm_clean(mm)) {
                drm_mm_takedown(mm);
-               kfree(mm);
+               spin_unlock(&rman->lock);
+               kfree(rman);
                man->priv = NULL;
-       } else
-               ret = -EBUSY;
-       spin_unlock(&glob->lru_lock);
-       return ret;
+               return 0;
+       }
+       spin_unlock(&rman->lock);
+       return -EBUSY;
 }
 
 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
                             const char *prefix)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
-       struct drm_mm *mm = man->priv;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
-       spin_lock(&glob->lru_lock);
-       drm_mm_debug_table(mm, prefix);
-       spin_unlock(&glob->lru_lock);
+       spin_lock(&rman->lock);
+       drm_mm_debug_table(&rman->mm, prefix);
+       spin_unlock(&rman->lock);
 }
 
 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
index a7bab87a548bd25de465d5fff45436a177346e33..af789dc869b94e997621ff615c184383c857377c 100644 (file)
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
                return ret;
 
        ret = be->func->bind(be, bo_mem);
-       if (ret) {
-               printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+       if (unlikely(ret != 0))
                return ret;
-       }
 
        ttm->state = tt_bound;
 
index 9b5b4d9dd62c9fceffe9c5fa0dd218ff7119d5a7..3e038a394c510efd6194618a7f3a1f185380f7fc 100644 (file)
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
        vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
                first_pfn + 1;
 
-       if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+       vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+       if (NULL == vsg->pages)
                return -ENOMEM;
-       memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
        down_read(&current->mm->mmap_sem);
        ret = get_user_pages(current, current->mm,
                             (unsigned long)xfer->mem_addr,
index 51d9f9f1d7f2aa1b2ea04f309569d71bd9244600..76954e3528c1d6ccb763973c22b7f7979ad4f1b4 100644 (file)
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 
        fence_rep.error = ret;
        fence_rep.fence_seq = (uint64_t) sequence;
+       fence_rep.pad64 = 0;
 
        user_fence_rep = (struct drm_vmw_fence_rep __user *)
            (unsigned long)arg->fence_rep;
index 87c6e6156d7de8bc809a62bab06dbf64a7da512b..cceeb42789b655e82e5431d9810b9fa033f883fd 100644 (file)
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
                               &vmw_vram_ne_placement,
                               false, &vmw_dmabuf_bo_free);
        vmw_overlay_resume_all(dev_priv);
+       if (unlikely(ret != 0))
+               vfbs->buffer = NULL;
 
        return ret;
 }
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
        struct vmw_framebuffer_surface *vfbs =
                vmw_framebuffer_to_vfbs(&vfb->base);
 
+       if (unlikely(vfbs->buffer == NULL))
+               return 0;
+
        bo = &vfbs->buffer->base;
        ttm_bo_unref(&bo);
        vfbs->buffer = NULL;
index a01c47ddb5bc724617b74c15ad3bffc2ed63067f..29113c9b26a8c01cc217cc82906e98a83a8fa36c 100644 (file)
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
                return -EINVAL;
        }
 
-       dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+       dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
 
        if (!dev_priv->ldu_priv)
                return -ENOMEM;
index df2036ed18d5f2c920ba61f99b716998a6f25329..f1a52f9e72988da041c