Merge branch 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspe...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 11 Sep 2010 22:50:53 +0000 (15:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 11 Sep 2010 22:50:53 +0000 (15:50 -0700)
* 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
  PM / Hibernate: Avoid hitting OOM during preallocation of memory
  PM QoS: Correct pr_debug() misuse and improve parameter checks
  PM: Prevent waiting forever on asynchronous resume after failing suspend

267 files changed:
Documentation/DocBook/kernel-locking.tmpl
Documentation/block/cfq-iosched.txt [new file with mode: 0644]
Documentation/cgroups/blkio-controller.txt
Documentation/gpio.txt
Documentation/mutex-design.txt
Documentation/sound/alsa/HD-Audio-Models.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/it8152.c
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/perf_event.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/clock.c
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx25/mach-cpuimx25.c
arch/arm/mach-mx3/clock-imx35.c
arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx3/mach-cpuimx35.c
arch/arm/mach-mx5/clock-mx51.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/cpufreq-pxa3xx.c
arch/arm/mach-pxa/include/mach/mfp-pxa300.h
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock.c
arch/arm/mach-shmobile/pm_runtime.c [new file with mode: 0644]
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/plat-mxc/Kconfig
arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
arch/arm/plat-mxc/tzic.c
arch/arm/plat-pxa/pwm.c
arch/arm/tools/mach-types
arch/powerpc/include/asm/fsldma.h
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/windows.c
arch/x86/include/asm/iomap.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/tsc.c
arch/x86/kvm/emulate.c
arch/x86/kvm/i8259.c
arch/x86/kvm/irq.h
arch/x86/mm/iomap_32.c
arch/x86/oprofile/nmi_int.c
block/blk-cgroup.c
block/blk-core.c
block/blk-sysfs.c
block/blk.h
block/cfq-iosched.c
block/elevator.c
drivers/ata/ahci.c
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-sff.c
drivers/ata/pata_artop.c
drivers/ata/pata_via.c
drivers/ata/sata_mv.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/char/vt_ioctl.c
drivers/gpio/sx150x.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/hwmon/hp_accel.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/input/input.c
drivers/input/mouse/bcm5974.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_wac.c
drivers/mmc/core/sdio.c
drivers/mmc/host/at91_mci.c
drivers/mmc/host/imxmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/net/3c59x.c
drivers/net/b44.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/bonding/bond_main.c
drivers/net/ks8851.c
drivers/net/niu.c
drivers/net/stmmac/stmmac_main.c
drivers/net/usb/ipheth.c
drivers/net/via-velocity.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-pl031.c
drivers/s390/char/tape_block.c
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/constants.c
drivers/scsi/hpsa.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/vhost/vhost.c
drivers/video/pxa168fb.c
fs/binfmt_misc.c
fs/bio-integrity.c
fs/direct-io.c
fs/exec.c
fs/fcntl.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/minix/namei.c
fs/nfsd/nfs4state.c
fs/ocfs2/alloc.c
fs/ocfs2/blockcheck.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/mmap.c
fs/ocfs2/namei.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/proc/page.c
fs/proc/task_mmu.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_ioctl.c
include/asm-generic/gpio.h
include/linux/cgroup.h
include/linux/elevator.h
include/linux/i2c/sx150x.h
include/linux/io-mapping.h
include/linux/kfifo.h
include/linux/ksm.h
include/linux/lglock.h
include/linux/libata.h
include/linux/mm.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/mutex.h
include/linux/semaphore.h
include/linux/swap.h
include/linux/vmstat.h
include/net/cls_cgroup.h
include/net/ip_vs.h
include/net/sock.h
include/net/udp.h
kernel/cgroup.c
kernel/debug/kdb/kdb_bp.c
kernel/gcov/fs.c
kernel/groups.c
kernel/hrtimer.c
kernel/mutex.c
kernel/perf_event.c
kernel/power/hibernate.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/sched.c
kernel/sched_fair.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_kprobe.c
kernel/watchdog.c
lib/scatterlist.c
mm/Kconfig
mm/backing-dev.c
mm/bounce.c
mm/compaction.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmzone.c
mm/page_alloc.c
mm/swapfile.c
mm/vmstat.c
net/core/dev.c
net/core/skbuff.c
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/fib_trie.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/datagram.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/reassembly.c
net/ipv6/udp.c
net/irda/irlan/irlan_common.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_xmit.c
net/sctp/sm_statefuns.c
net/unix/af_unix.c
security/apparmor/include/resource.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/path.c
security/apparmor/policy.c
security/apparmor/resource.c
security/integrity/ima/ima.h
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_main.c
security/keys/keyctl.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/isa/msnd/msnd_pinnacle.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen.h
sound/pci/oxygen/oxygen_lib.c
sound/pci/oxygen/virtuoso.c
sound/pci/oxygen/xonar_wm87x6.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/mixer.c
sound/usb/pcm.c
tools/perf/util/callchain.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
virt/kvm/kvm_main.c

index 0b1a3f97f285361a4075c8e267d42b2053747d9a..a0d479d1e1dd872bd1ae7b4d17d4582df03a384c 100644 (file)
@@ -1961,6 +1961,12 @@ machines due to caching.
    </sect1>
   </chapter>
 
+  <chapter id="apiref">
+   <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+  </chapter>
+
   <chapter id="references">
    <title>Further reading</title>
 
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644 (file)
index 0000000..e578fee
--- /dev/null
@@ -0,0 +1,45 @@
+CFQ ioscheduler tunables
+========================
+
+slice_idle
+----------
+This specifies how long CFQ should idle for next request on certain cfq queues
+(for sequential workloads) and service trees (for random workloads) before
+queue is expired and CFQ selects next queue to dispatch from.
+
+By default slice_idle is a non-zero value. That means by default we idle on
+queues/service trees. This can be very helpful on highly seeky media like
+single spindle SATA/SAS disks where we can cut down on overall number of
+seeks and see improved throughput.
+
+Setting slice_idle to 0 will remove all the idling on queues/service tree
+level and one should see an overall improved throughput on faster storage
+devices like multiple SATA/SAS disks in hardware RAID configuration. The down
+side is that isolation provided from WRITES also goes down and notion of
+IO priority becomes weaker.
+
+So depending on storage and workload, it might be useful to set slice_idle=0.
+In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
+keeping slice_idle enabled should be useful. For any configurations where
+there are multiple spindles behind single LUN (Host based hardware RAID
+controller or for storage arrays), setting slice_idle=0 might end up in better
+throughput and acceptable latencies.
+
+CFQ IOPS Mode for group scheduling
+===================================
+Basic CFQ design is to provide priority based time slices. Higher priority
+process gets bigger time slice and lower priority process gets smaller time
+slice. Measuring time becomes harder if storage is fast and supports NCQ and
+it would be better to dispatch multiple requests from multiple cfq queues in
+request queue at a time. In such scenario, it is not possible to measure time
+consumed by single queue accurately.
+
+What is possible though is to measure number of requests dispatched from a
+single queue and also allow dispatch from multiple cfq queue at the same time.
+This effectively becomes the fairness in terms of IOPS (IO operations per
+second).
+
+If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
+to IOPS mode and starts providing fairness in terms of number of requests
+dispatched. Note that this mode switching takes effect only for group
+scheduling. For non-cgroup users nothing should change.
index 48e0b21b00594dac1971472bef9105e04d2bfa77..6919d62591d97580d3132f539e0edd50d9985449 100644 (file)
@@ -217,6 +217,7 @@ Details of cgroup files
 CFQ sysfs tunable
 =================
 /sys/block/<disk>/queue/iosched/group_isolation
+-----------------------------------------------
 
 If group_isolation=1, it provides stronger isolation between groups at the
 expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient
 and one wants stronger isolation between groups, then set group_isolation=1
 but this will come at cost of reduced throughput.
 
+/sys/block/<disk>/queue/iosched/slice_idle
+------------------------------------------
+On a faster hardware CFQ can be slow, especially with sequential workload.
+This happens because CFQ idles on a single queue and single queue might not
+drive deeper request queue depths to keep the storage busy. In such scenarios
+one can try setting slice_idle=0 and that would switch CFQ to IOPS
+(IO operations per second) mode on NCQ supporting hardware.
+
+That means CFQ will not idle between cfq queues of a cfq group and hence be
+able to driver higher queue depth and achieve better throughput. That also
+means that cfq provides fairness among groups in terms of IOPS and not in
+terms of disk time.
+
+/sys/block/<disk>/queue/iosched/group_idle
+------------------------------------------
+If one disables idling on individual cfq queues and cfq service trees by
+setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
+on the group in an attempt to provide fairness among groups.
+
+By default group_idle is same as slice_idle and does not do anything if
+slice_idle is enabled.
+
+One can experience an overall throughput drop if you have created multiple
+groups and put applications in that group which are not driving enough
+IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
+on individual groups and throughput should improve.
+
 What works
 ==========
 - Currently only sync IO queues are support. All the buffered writes are
index d96a6dba57489bc6bbf3e747d82cd450084e5609..9633da01ff46afb008566ccb53aa381606654e85 100644 (file)
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
 
 If you want to initialize a structure with an invalid GPIO number, use
 some negative number (perhaps "-EINVAL"); that will never be valid.  To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
 
        int gpio_is_valid(int number);
 
 A number that's not valid will be rejected by calls which may request
 or free GPIOs (see below).  Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
-
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
+example, a number might be valid but temporarily unused on a given board.
 
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime.  Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
 
 Using GPIOs
 -----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
 ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
 and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
 three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
 
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space.  (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
 into the kernel on that architecture.
 
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
 can enable it and build it into the kernel optionally.
 
 If neither of these options are selected, the platform does not support
index c91ccc0720fa97f42a1a616fd83e23628ae85ab1..38c10fd7f4110448facd7089b985c4776d264d85 100644 (file)
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
 mutex semantics are sufficient for your code, then there are a couple
 of advantages of mutexes:
 
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
    'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
    A smaller structure size means less RAM footprint, and better
    CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
  void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  int  mutex_lock_interruptible_nested(struct mutex *lock,
                                       unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
index ce46fa1e643e876344071d480e1e6d6d54b89856..37c6aad5e590ac1375944b8edf0656aa7b106607 100644 (file)
@@ -296,6 +296,7 @@ Conexant 5051
 Conexant 5066
 =============
   laptop       Basic Laptop config (default)
+  hp-laptop    HP laptops, e g G60
   dell-laptop  Dell laptops
   dell-vostro  Dell Vostro
   olpc-xo-1_5  OLPC XO 1.5
index 087912aa09bda419450654f904c96289dcb1b43c..e7c528ff1013048cd4c6dadc0dbfc23f11204685 100644 (file)
@@ -1445,6 +1445,16 @@ S:       Maintained
 F:     Documentation/video4linux/cafe_ccic
 F:     drivers/media/video/cafe_ccic*
 
+CAIF NETWORK LAYER
+M:     Sjur Braendeland <sjur.brandeland@stericsson.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     Documentation/networking/caif/
+F:     drivers/net/caif/
+F:     include/linux/caif/
+F:     include/net/caif/
+F:     net/caif/
+
 CALGARY x86-64 IOMMU
 M:     Muli Ben-Yehuda <muli@il.ibm.com>
 M:     "Jon D. Mason" <jdmason@kudzu.us>
@@ -2787,11 +2797,6 @@ S:       Maintained
 F:     arch/x86/kernel/hpet.c
 F:     arch/x86/include/asm/hpet.h
 
-HPET:  ACPI
-M:     Bob Picco <bob.picco@hp.com>
-S:     Maintained
-F:     drivers/char/hpet.c
-
 HPFS FILESYSTEM
 M:     Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
 W:     http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3404,7 +3409,7 @@ F:        drivers/s390/kvm/
 
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
-W:     http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W:     http://kernel.org/pub/linux/utils/kernel/kexec/
 L:     kexec@lists.infradead.org
 S:     Maintained
 F:     include/linux/kexec.h
@@ -4810,6 +4815,7 @@ RCUTORTURE MODULE
 M:     Josh Triplett <josh@freedesktop.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
 
@@ -4834,6 +4840,7 @@ M:        Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:     http://www.rdrop.com/users/paulmck/rclock/
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/
 F:     include/linux/rcu*
 F:     include/linux/srcu*
@@ -4841,12 +4848,10 @@ F:      kernel/rcu*
 F:     kernel/srcu*
 X:     kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
 M:     Paul Gortmaker <p_gortmaker@yahoo.com>
 S:     Maintained
-F:     Documentation/rtc.txt
-F:     drivers/rtc/
-F:     include/linux/rtc.h
+F:     drivers/char/rtc.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
index 16bc8eb4901c9335b32a774082e9bc1cc77bb8b9..553b7cf17bfb0bac057eaedf0402a0bca2aa47e8 100644 (file)
@@ -1576,97 +1576,6 @@ config AUTO_ZRELADDR
          0xf8000000. This assumes the zImage being placed in the first 128MB
          from start of memory.
 
-config ZRELADDR
-       hex "Physical address of the decompressed kernel image"
-       depends on !AUTO_ZRELADDR
-       default 0x00008000 if ARCH_BCMRING ||\
-               ARCH_CNS3XXX ||\
-               ARCH_DOVE ||\
-               ARCH_EBSA110 ||\
-               ARCH_FOOTBRIDGE ||\
-               ARCH_INTEGRATOR ||\
-               ARCH_IOP13XX ||\
-               ARCH_IOP33X ||\
-               ARCH_IXP2000 ||\
-               ARCH_IXP23XX ||\
-               ARCH_IXP4XX ||\
-               ARCH_KIRKWOOD ||\
-               ARCH_KS8695 ||\
-               ARCH_LOKI ||\
-               ARCH_MMP ||\
-               ARCH_MV78XX0 ||\
-               ARCH_NOMADIK ||\
-               ARCH_NUC93X ||\
-               ARCH_NS9XXX ||\
-               ARCH_ORION5X ||\
-               ARCH_SPEAR3XX ||\
-               ARCH_SPEAR6XX ||\
-               ARCH_TEGRA ||\
-               ARCH_U8500 ||\
-               ARCH_VERSATILE ||\
-               ARCH_W90X900
-       default 0x08008000 if ARCH_MX1 ||\
-               ARCH_SHARK
-       default 0x10008000 if ARCH_MSM ||\
-               ARCH_OMAP1 ||\
-               ARCH_RPC
-       default 0x20008000 if ARCH_S5P6440 ||\
-               ARCH_S5P6442 ||\
-               ARCH_S5PC100 ||\
-               ARCH_S5PV210
-       default 0x30008000 if ARCH_S3C2410 ||\
-               ARCH_S3C2400 ||\
-               ARCH_S3C2412 ||\
-               ARCH_S3C2416 ||\
-               ARCH_S3C2440 ||\
-               ARCH_S3C2443
-       default 0x40008000 if ARCH_STMP378X ||\
-               ARCH_STMP37XX ||\
-               ARCH_SH7372 ||\
-               ARCH_SH7377 ||\
-               ARCH_S5PV310
-       default 0x50008000 if ARCH_S3C64XX ||\
-               ARCH_SH7367
-       default 0x60008000 if ARCH_VEXPRESS
-       default 0x80008000 if ARCH_MX25 ||\
-               ARCH_MX3 ||\
-               ARCH_NETX ||\
-               ARCH_OMAP2PLUS ||\
-               ARCH_PNX4008
-       default 0x90008000 if ARCH_MX5 ||\
-               ARCH_MX91231
-       default 0xa0008000 if ARCH_IOP32X ||\
-               ARCH_PXA ||\
-               MACH_MX27
-       default 0xc0008000 if ARCH_LH7A40X ||\
-               MACH_MX21
-       default 0xf0008000 if ARCH_AAEC2000 ||\
-               ARCH_L7200
-       default 0xc0028000 if ARCH_CLPS711X
-       default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
-       default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
-       default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
-       default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
-       default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
-       default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
-       default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
-       default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
-       default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
-       default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
-       default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
-       default 0xc0208000 if ARCH_SA1100 && SA1111
-       default 0xc0008000 if ARCH_SA1100 && !SA1111
-       default 0x30108000 if ARCH_S3C2410 && PM_H1940
-       default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
-       default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
-       help
-         ZRELADDR is the physical address where the decompressed kernel
-         image will be placed. ZRELADDR has to be specified when the
-         assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
-         selected.
-
 endmenu
 
 menu "CPU Power Management"
index f705213caa881af9c07e181c0d2a3a1a26a5d98a..4a590f4113e2af044ea1764aeb0681ad7f50a74c 100644 (file)
 MKIMAGE         := $(srctree)/scripts/mkuboot.sh
 
 ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
 endif
 
 # Note: the following conditions must always be true:
+#   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
 #   PARAMS_PHYS must be within 4MB of ZRELADDR
 #   INITRD_PHYS must be in RAM
+ZRELADDR    := $(zreladdr-y)
 PARAMS_PHYS := $(params_phys-y)
 INITRD_PHYS := $(initrd_phys-y)
 
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE  $@
 ifeq ($(CONFIG_ZBOOT_ROM),y)
 $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
 else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
 endif
 
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
index 68775e33476c2fafb4c20d88f7f676c836a8edc1..b23f6bc46cfa1dd1029bb53dc7009c3c790f1088 100644 (file)
@@ -79,6 +79,10 @@ endif
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  := -Wa,-march=all
 
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 endif
index 6af9907c3b5ccad2ae2d73e37f5470c5b2f6b897..6825c34646d4e02f24b0eefe7ab4e012bca05208 100644 (file)
@@ -177,7 +177,7 @@ not_angel:
                and     r4, pc, #0xf8000000
                add     r4, r4, #TEXT_OFFSET
 #else
-               ldr     r4, =CONFIG_ZRELADDR
+               ldr     r4, =zreladdr
 #endif
                subs    r0, r0, r1              @ calculate the delta offset
 
index 6c091356245593b87860d2ccb6221650fc62855b..7974baacafcea74ec055a46ee0f6cea496f24e6f 100644 (file)
@@ -263,6 +263,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+               __func__, dma_addr, size);
+       return (dev->bus == &pci_bus_type) &&
+               ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index c226fe10553e2952ec982ef3ec5fcaf8538586e0..c568da7dcae45e60e8630e2b3060599f561d6555 100644 (file)
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
  * DMA access and 1 if the buffer needs to be bounced.
  *
  */
-#ifdef CONFIG_SA1111
 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
-                                  size_t size)
-{
-       return 0;
-}
-#endif
 
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
index 48837e6d888722dc96f594247a80026bf9b75e29..b5799a3b7117d5480eec6456008f79e01d1064ae 100644 (file)
@@ -17,7 +17,7 @@
  * counter interrupts are regular interrupts and not an NMI. This
  * means that when we receive the interrupt we can call
  * perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
+ * interrupts disabled.
  */
 static inline void
 set_perf_event_pending(void)
index d02cfb683487eeafea4ef407a1a4e6f2d4ce4112..c891eb76c0e313406847e7b9fbe968bb1b8fa459 100644 (file)
 #define __NR_perf_event_open           (__NR_SYSCALL_BASE+364)
 #define __NR_recvmmsg                  (__NR_SYSCALL_BASE+365)
 #define __NR_accept4                   (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init             (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark             (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64                 (__NR_SYSCALL_BASE+369)
 
 /*
  * The following SWIs are ARM private.
index afeb71fa72cb81fc0e2fb5652c653ef34e7258bb..5c26eccef9982665b1e1672416b9bc996f3b2dae 100644 (file)
                CALL(sys_perf_event_open)
 /* 365 */      CALL(sys_recvmmsg)
                CALL(sys_accept4)
+               CALL(sys_fanotify_init)
+               CALL(sys_fanotify_mark)
+               CALL(sys_prlimit64)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 417c392ddf1cb55066fa5f99e83e77514bd89901..ecbb0288e5dd95c80b420635dffee6ef600f86a8 100644 (file)
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event fake_event = event->hw;
 
-       if (event->pmu && event->pmu != &pmu)
-               return 0;
+       if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+               return 1;
 
        return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
 }
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
index 753c0d31a3d3f0b407cfe20b68f5ba66189cd1a7..c67b47f1c0fd805751cf226a65315337a2507942 100644 (file)
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
        .pmc_mask       = 1 << AT91SAM9G45_ID_SSC1,
        .type           = CLK_TYPE_PERIPHERAL,
 };
-static struct clk tcb_clk = {
-       .name           = "tcb_clk",
+static struct clk tcb0_clk = {
+       .name           = "tcb0_clk",
        .pmc_mask       = 1 << AT91SAM9G45_ID_TCB,
        .type           = CLK_TYPE_PERIPHERAL,
 };
@@ -192,6 +192,14 @@ static struct clk ohci_clk = {
        .parent         = &uhphs_clk,
 };
 
+/* One additional fake clock for second TC block */
+static struct clk tcb1_clk = {
+       .name           = "tcb1_clk",
+       .pmc_mask       = 0,
+       .type           = CLK_TYPE_PERIPHERAL,
+       .parent         = &tcb0_clk,
+};
+
 static struct clk *periph_clocks[] __initdata = {
        &pioA_clk,
        &pioB_clk,
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
        &spi1_clk,
        &ssc0_clk,
        &ssc1_clk,
-       &tcb_clk,
+       &tcb0_clk,
        &pwm_clk,
        &tsc_clk,
        &dma_clk,
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
        &mmc1_clk,
        // irq0
        &ohci_clk,
+       &tcb1_clk,
 };
 
 /*
index 809114d5a5a6690ec3c4f510bb6cb88fc7fede99..5e71ccd5e7d381ca211d1f73653351bdca814c6b 100644 (file)
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
                .end    = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
                .flags  = IORESOURCE_MEM,
        },
-       [2] = {
+       [1] = {
                .start  = AT91SAM9G45_ID_DMA,
                .end    = AT91SAM9G45_ID_DMA,
                .flags  = IORESOURCE_IRQ,
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
 static void __init at91_add_device_tc(void)
 {
        /* this chip has one clock and irq for all six TC channels */
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+       at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb0_device);
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+       at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb1_device);
 }
 #else
index c4c8865d52d7bfd84f5a22416ef7fe6bc1c472aa..65eb0943194f4b0ea4ea65613435ceef4d875c75 100644 (file)
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
                .start  = AT91_PIN_PC11,
                .end    = AT91_PIN_PC11,
                .flags  = IORESOURCE_IRQ
+                       | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
        }
 };
 
 static struct dm9000_plat_data dm9000_platdata = {
-       .flags          = DM9000_PLATF_16BITONLY,
+       .flags          = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
 };
 
 static struct platform_device dm9000_device = {
@@ -167,17 +168,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
 };
 
 
-/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
-       .wire4          = 1,
-//     .det_pin        = ... not connected
-//     .wp_pin         = ... not connected
-//     .vcc_pin        = ... not connected
-};
-
-
 /*
  * NAND flash
  */
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
        at91_add_device_nand(&ek_nand_data);
 }
 
+/*
+ * SPI related devices
+ */
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
 
 /*
  * ADS7846 Touchscreen
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
 #endif
 };
 
+#else /* CONFIG_SPI_ATMEL_* */
+/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+       .wire4          = 1,
+};
+
+#endif /* CONFIG_SPI_ATMEL_* */
+
 
 /*
  * LCD Controller
index 7f7da439341fabc4e85b6febeb8b1f3e2cd6f4ee..7525cee3983f7252fac0542be5955a490ca6869b 100644 (file)
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
 int __init clk_register(struct clk *clk)
 {
        if (clk_is_peripheral(clk)) {
-               clk->parent = &mck;
+               if (!clk->parent)
+                       clk->parent = &mck;
                clk->mode = pmc_periph_mode;
                list_add_tail(&clk->node, &clocks);
        }
index 8bf3cec98cfadba46d8ca1816c7aff5b8a871bbf..4566bd1c8660b3fe7ac0cff28473746fd3d34c82 100644 (file)
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
        clkdev_add_table(clocks, ARRAY_SIZE(clocks));
        return 0;
 }
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
index 91931dcb068997d540dd2a133001eed458cbc81b..4aaadc753d3e6ff4e60b88c17e62e5b3c0cfb812 100644 (file)
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index a5f0174290b4eaa0ae36a1769ae4c9cb32631d3c..e064bb3d69197b8ddee286eda06dee980052712d 100644 (file)
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
        if (!otg_mode_host)
                mxc_register_device(&otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+       eukrea_mbimxsd25_baseboard_init();
 #endif
 }
 
index d3af0fdf8475f7ef0d67b3afbb080df739c36431..7a62e744a8b0fcbc5eef1da9645bf663199608f8 100644 (file)
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
        if (aad->sel)
-               fref = fref * 2 / 3;
+               fref = fref * 3 / 4;
 
        return fref / aad->arm;
 }
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_mpll();
+       unsigned long fref = get_rate_arm();
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
 
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
        return get_rate_ahb(NULL) >> 1;
 }
 
-static unsigned long get_3_3_div(unsigned long in)
-{
-       return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
 static unsigned long get_rate_uart(struct clk *clk)
 {
        unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div = get_3_3_div(pdr4 >> 10);
+       unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
 
        if (pdr3 & (1 << 14))
                return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
                break;
        }
 
-       return rate / get_3_3_div(div);
+       return rate / (div + 1);
 }
 
 static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+       return rate / (((pdr2 >> 16) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+       return rate / (((pdr4 >> 22) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_ipg_per(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div1, div2;
+       unsigned long div;
 
        if (pdr0 & (1 << 26)) {
-               div1 = (pdr4 >> 19) & 0x7;
-               div2 = (pdr4 >> 16) & 0x7;
-               return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+               div = (pdr4 >> 16) & 0x3f;
+               return get_rate_arm() / (div + 1);
        } else {
-               div1 = (pdr0 >> 12) & 0x7;
-               return get_rate_ahb(NULL) / div1;
+               div = (pdr0 >> 12) & 0x7;
+               return get_rate_ahb(NULL) / (div + 1);
        }
 }
 
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+       unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+       unsigned long fref = get_rate_mpll();
+
+       if (fref > 400 * 1000 * 1000) {
+               switch (hsp_podf) {
+               case 0:
+                       return fref >> 2;
+               case 1:
+                       return fref >> 3;
+               case 2:
+                       return fref / 3;
+               }
+       } else {
+               switch (hsp_podf) {
+               case 0:
+               case 2:
+                       return fref / 3;
+               case 1:
+                       return fref / 6;
+               }
+       }
+
+       return 0;
+}
+
 static int clk_cgr_enable(struct clk *clk)
 {
        u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk,   0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c2_clk,   1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c3_clk,   2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_hsp, NULL);
 DEFINE_CLOCK(kpp_clk,    0, CCM_CGR1, 20, get_rate_ipg, NULL);
 DEFINE_CLOCK(mlb_clk,    0, CCM_CGR1, 22, get_rate_ahb, NULL);
 DEFINE_CLOCK(mshc_clk,   0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int ll = 0;
+       unsigned int cgr2 = 3 << 26, cgr3 = 0;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       ll = (3 << 16);
+       cgr2 |= 3 << 16;
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
-       __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
-       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       /*
+        * Check if we came up in internal boot mode. If yes, we need some
+        * extra clocks turned on, otherwise the MX35 boot ROM code will
+        * hang after a watchdog reset.
+        */
+       if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+               /* Additionally turn on UART1, SCC, and IIM clocks */
+               cgr2 |= 3 << 16 | 3 << 4;
+               cgr3 |= 3 << 2;
+       }
+
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
 
        mxc_timer_init(&gpt_clk,
                        MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
index 1dc5004df866d4e9df7d817aca27e658573f8c6c..f8f15e3ac7a0e82a6cde9f6bd021c85a546bbae7 100644 (file)
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index 9770a6a973be561fdfb67cfdda1f8cdf36381e8e..2a4f8b781ba4c60a4d66f554d860953fcafe1cf0 100644 (file)
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
        if (!otg_mode_host)
                mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+       eukrea_mbimxsd35_baseboard_init();
 #endif
 }
 
index 6af69def357f92d2f177d19d8fc7bce330ff5666..57c10a9926cc5056668645ff39fe3ac07fed9969 100644 (file)
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
 {
        u32 reg;
        reg = __raw_readl(clk->enable_reg);
-       reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+       reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
        __raw_writel(reg, clk->enable_reg);
 
 }
index 268a9bc6be8a22a4ca0ac4fed742d035753581a3..50d5939a78f1bdc8f318f08360243626a14a6105 100644 (file)
@@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 {
        int i;
        unsigned int freq;
index 27fa329d9a8b7a5677c2cf75e25900797130eb46..0a0d0fe99220d7f450e61dc04495d8dfe4492be3 100644 (file)
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret = -EINVAL;
 
index 7139e0dc26d16062304bd72beca1f339e61899e7..4e1287070d219c32235aebadfbf2d97df3d059fb 100644 (file)
 #define GPIO46_CI_DD_7         MFP_CFG_DRV(GPIO46, AF0, DS04X)
 #define GPIO47_CI_DD_8         MFP_CFG_DRV(GPIO47, AF1, DS04X)
 #define GPIO48_CI_DD_9         MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
 #define GPIO49_CI_MCLK         MFP_CFG_DRV(GPIO49, AF0, DS04X)
 #define GPIO50_CI_PCLK         MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
 
 /* KEYPAD */
 #define GPIO3_KP_DKIN_6                MFP_CFG_LPM(GPIO3,   AF2, FLOAT)
index 5e16b4c692222a4a45d5728cec5b4970cae495e0..ae416fe7daf2e61b09f683c9be36ebc57c105c62 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o
+obj-y                          := timer.o console.o clock.o pm_runtime.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index 23d472f9525e6a160c97cbf8adc21c505819fb05..95935c83c30654ee94d301e94086680475a6ed67 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
 #include <linux/sh_clk.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/usb/r8a66597.h>
 
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
        .dma_slave_tx   = SHDMA_SLAVE_SDHI1_TX,
        .dma_slave_rx   = SHDMA_SLAVE_SDHI1_RX,
        .tmio_ocr_mask  = MMC_VDD_165_195,
+       .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
 
 static struct platform_device fsi_device = {
        .name           = "sh_fsi2",
-       .id             = 0,
+       .id             = -1,
        .num_resources  = ARRAY_SIZE(fsi_resources),
        .resource       = fsi_resources,
        .dev    = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
        },
 };
 
+static struct gpio_led ap4evb_leds[] = {
+       {
+               .name                   = "led4",
+               .gpio                   = GPIO_PORT185,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led2",
+               .gpio                   = GPIO_PORT186,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led3",
+               .gpio                   = GPIO_PORT187,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led1",
+               .gpio                   = GPIO_PORT188,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+       .num_leds = ARRAY_SIZE(ap4evb_leds),
+       .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+       .name = "leds-gpio",
+       .id = 0,
+       .dev = {
+               .platform_data  = &ap4evb_leds_pdata,
+       },
+};
+
 static struct platform_device *ap4evb_devices[] __initdata = {
+       &leds_device,
        &nor_flash_device,
        &smc911x_device,
        &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
        gpio_request(GPIO_FN_CS5A,      NULL);
        gpio_request(GPIO_FN_IRQ6_39,   NULL);
 
-       /* enable LED 1 - 4 */
-       gpio_request(GPIO_PORT185, NULL);
-       gpio_request(GPIO_PORT186, NULL);
-       gpio_request(GPIO_PORT187, NULL);
-       gpio_request(GPIO_PORT188, NULL);
-       gpio_direction_output(GPIO_PORT185, 1);
-       gpio_direction_output(GPIO_PORT186, 1);
-       gpio_direction_output(GPIO_PORT187, 1);
-       gpio_direction_output(GPIO_PORT188, 1);
-       gpio_export(GPIO_PORT185, 0);
-       gpio_export(GPIO_PORT186, 0);
-       gpio_export(GPIO_PORT187, 0);
-       gpio_export(GPIO_PORT188, 0);
-
        /* enable Debug switch (S6) */
        gpio_request(GPIO_PORT32, NULL);
        gpio_request(GPIO_PORT33, NULL);
index fb4e9b1d788e464922ba2345d60fb43b8e1173d2..759468992ad287ff3f40b2f2e92e19d99734c94a 100644 (file)
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
 
 struct clk pllc2_clk = {
        .ops            = &pllc2_clk_ops,
-       .flags          = CLK_ENABLE_ON_INIT,
        .parent         = &extal1_div2_clk,
        .freq_table     = pllc2_freq_table,
        .parent_table   = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
 
 enum { MSTP001,
        MSTP131, MSTP130,
-       MSTP129, MSTP128,
+       MSTP129, MSTP128, MSTP127, MSTP126,
        MSTP118, MSTP117, MSTP116,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
        [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
        [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
        [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
        [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
        [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
        [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
-       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
        [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
        [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
        [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
        CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
index b7c705a213a2a1400e180df83d9e5a6f67649db4..6b7c7c42bc8fc529678fe7e77d01878299d049c4 100644 (file)
@@ -1,8 +1,10 @@
 /*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
  *
  * Copyright (C) 2010  Magnus Damm
  *
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644 (file)
index 0000000..94912d3
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ *  Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+       unsigned long flags;
+       struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+       struct pm_runtime_data *prd = res;
+
+       dev_dbg(dev, "__devres_release()\n");
+
+       if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+               clk_disable(prd->clk);
+
+       if (test_bit(BIT_ACTIVE, &prd->flags))
+               clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+       return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+                                    struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+               prd->clk = clk_get(dev, NULL);
+               if (!IS_ERR(prd->clk)) {
+                       set_bit(BIT_ACTIVE, &prd->flags);
+                       dev_info(dev, "clocks managed by runtime pm\n");
+               }
+       }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+                                   struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+               dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+       platform_pm_runtime_bug(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_disable(prd->clk);
+               clear_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+       platform_pm_runtime_init(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_enable(prd->clk);
+               set_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+       /* suspend synchronously to disable clocks immediately */
+       return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pm_runtime_data *prd;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       if (action == BUS_NOTIFY_BIND_DRIVER) {
+               prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+               if (prd)
+                       devres_add(dev, prd);
+               else
+                       dev_err(dev, "unable to alloc memory for runtime pm\n");
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct clk *clk;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       switch (action) {
+       case BUS_NOTIFY_BIND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_enable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced on\n");
+               }
+               break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_disable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced off\n");
+               }
+               break;
+       }
+
+       return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+       .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+       bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+}
+core_initcall(sh_pm_runtime_init);
index 33c3f570aaa06c2a56f6a6d70eb7f558a883b0b1..a0a2928ae4dd7670a1342863040791833b57dbab 100644 (file)
@@ -398,7 +398,7 @@ config CPU_V6
 # ARMv6k
 config CPU_32v6K
        bool "Support ARM V6K processor extensions" if !SMP
-       depends on CPU_V6
+       depends on CPU_V6 || CPU_V7
        default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
        help
          Say Y here if your ARMv6 processor supports the 'K' extension.
index c704eed63c5ddba4c5f849f7ab7b6420008cef21..4bc43e535d3baadc657df9af504078ff1ae00570 100644 (file)
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
                        }
                } while (size -= PAGE_SIZE);
 
+               dsb();
+
                return (void *)c->vm_start;
        }
        return NULL;
index 0527e65318f4a647b5b00192ce08ba47368ce5f8..6785db4179b84ccd925f9112cd48e9be71668e7c 100644 (file)
@@ -43,6 +43,7 @@ config ARCH_MXC91231
 config ARCH_MX5
        bool "MX5-based"
        select CPU_V7
+       select ARM_L1_CACHE_SHIFT_6
        help
          This enables support for systems based on the Freescale i.MX51 family
 
index 634e3f4c454df222728aa678bdcd657967286dbc..656acb45d434b7333e0864798fb6b69538387701 100644 (file)
@@ -37,9 +37,9 @@
  * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
  */
 
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
 extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
 extern void eukrea_mbimx51_baseboard_init(void);
 
 #endif
index b3da9aad4295704ef9ea8a4a43c95127d74e6d9e..3703ab28257fbbb55d3db89bee56877eebb38345 100644 (file)
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
                return -EAGAIN;
 
        for (i = 0; i < 4; i++) {
-               v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
-               __raw_writel(v, TZIC_WAKEUP0(i));
+               v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+                       wakeup_intr[i];
+               __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
        }
 
        return 0;
index 0732c6c8d511979e354cced2cd6987889702a1e7..ef32686feef9431ab00f42e4a2a0e2d7656af783 100644 (file)
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
 
 static int __devinit pwm_probe(struct platform_device *pdev)
 {
-       struct platform_device_id *id = platform_get_device_id(pdev);
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        struct pwm_device *pwm, *secondary = NULL;
        struct resource *r;
        int ret = 0;
index 48cbdcb6bbd4288929f31bef94da3691b160a489..55590a4d87c932984404d1df13ca4c296c9d7117 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2622,7 +2622,7 @@ kraken                    MACH_KRAKEN             KRAKEN                  2634
 gw2388                 MACH_GW2388             GW2388                  2635
 jadecpu                        MACH_JADECPU            JADECPU                 2636
 carlisle               MACH_CARLISLE           CARLISLE                2637
-lux_sf9                        MACH_LUX_SFT9           LUX_SFT9                2638
+lux_sf9                        MACH_LUX_SF9            LUX_SF9                 2638
 nemid_tb               MACH_NEMID_TB           NEMID_TB                2639
 terrier                        MACH_TERRIER            TERRIER                 2640
 turbot                 MACH_TURBOT             TURBOT                  2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr        MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
 netviz                 MACH_NETVIZ             NETVIZ                  2964
 flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
 wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
+lpc24xx                        MACH_LPC24XX            LPC24XX                 2967
+spica                  MACH_SPICA              SPICA                   2968
+gpsdisplay             MACH_GPSDISPLAY         GPSDISPLAY              2969
+bipnet                 MACH_BIPNET             BIPNET                  2970
+overo_ctu_inertial     MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL      2971
+davinci_dm355_mmm      MACH_DAVINCI_DM355_MMM  DAVINCI_DM355_MMM       2972
+pc9260_v2              MACH_PC9260_V2          PC9260_V2               2973
+ptx7545                        MACH_PTX7545            PTX7545                 2974
+tm_efdc                        MACH_TM_EFDC            TM_EFDC                 2975
+omap3_waldo1           MACH_OMAP3_WALDO1       OMAP3_WALDO1            2977
+flyer                  MACH_FLYER              FLYER                   2978
+tornado3240            MACH_TORNADO3240        TORNADO3240             2979
+soli_01                        MACH_SOLI_01            SOLI_01                 2980
+omapl138_europalc      MACH_OMAPL138_EUROPALC  OMAPL138_EUROPALC       2981
+helios_v1              MACH_HELIOS_V1          HELIOS_V1               2982
+netspace_lite_v2       MACH_NETSPACE_LITE_V2   NETSPACE_LITE_V2        2983
+ssc                    MACH_SSC                SSC                     2984
+premierwave_en         MACH_PREMIERWAVE_EN     PREMIERWAVE_EN          2985
+wasabi                 MACH_WASABI             WASABI                  2986
+vivow                  MACH_VIVOW              VIVOW                   2987
+mx50_rdp               MACH_MX50_RDP           MX50_RDP                2988
+universal              MACH_UNIVERSAL          UNIVERSAL               2989
+real6410               MACH_REAL6410           REAL6410                2990
+spx_sakura             MACH_SPX_SAKURA         SPX_SAKURA              2991
+ij3k_2440              MACH_IJ3K_2440          IJ3K_2440               2992
+omap3_bc10             MACH_OMAP3_BC10         OMAP3_BC10              2993
+thebe                  MACH_THEBE              THEBE                   2994
+rv082                  MACH_RV082              RV082                   2995
+armlguest              MACH_ARMLGUEST          ARMLGUEST               2996
+tjinc1000              MACH_TJINC1000          TJINC1000               2997
+dockstar               MACH_DOCKSTAR           DOCKSTAR                2998
+ax8008                 MACH_AX8008             AX8008                  2999
+gnet_sgce              MACH_GNET_SGCE          GNET_SGCE               3000
+pxwnas_500_1000                MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
+ea20                   MACH_EA20               EA20                    3002
+awm2                   MACH_AWM2               AWM2                    3003
+ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
+tegra_seaboard         MACH_TEGRA_SEABOARD     TEGRA_SEABOARD          3005
+linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
+tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
+rubys                  MACH_RUBYS              RUBYS                   3008
+aquarius               MACH_AQUARIUS           AQUARIUS                3009
+mx53_ard               MACH_MX53_ARD           MX53_ARD                3010
+mx53_smd               MACH_MX53_SMD           MX53_SMD                3011
+lswxl                  MACH_LSWXL              LSWXL                   3012
+dove_avng_v3           MACH_DOVE_AVNG_V3       DOVE_AVNG_V3            3013
+sdi_ess_9263           MACH_SDI_ESS_9263       SDI_ESS_9263            3014
+jocpu550               MACH_JOCPU550           JOCPU550                3015
+msm8x60_rumi3          MACH_MSM8X60_RUMI3      MSM8X60_RUMI3           3016
+msm8x60_ffa            MACH_MSM8X60_FFA        MSM8X60_FFA             3017
+yanomami               MACH_YANOMAMI           YANOMAMI                3018
+gta04                  MACH_GTA04              GTA04                   3019
+cm_a510                        MACH_CM_A510            CM_A510                 3020
+omap3_rfs200           MACH_OMAP3_RFS200       OMAP3_RFS200            3021
+kx33xx                 MACH_KX33XX             KX33XX                  3022
+ptx7510                        MACH_PTX7510            PTX7510                 3023
+top9000                        MACH_TOP9000            TOP9000                 3024
+teenote                        MACH_TEENOTE            TEENOTE                 3025
+ts3                    MACH_TS3                TS3                     3026
+a0                     MACH_A0                 A0                      3027
+fsm9xxx_surf           MACH_FSM9XXX_SURF       FSM9XXX_SURF            3028
+fsm9xxx_ffa            MACH_FSM9XXX_FFA        FSM9XXX_FFA             3029
+frrhwcdma60w           MACH_FRRHWCDMA60W       FRRHWCDMA60W            3030
+remus                  MACH_REMUS              REMUS                   3031
+at91cap7xdk            MACH_AT91CAP7XDK        AT91CAP7XDK             3032
+at91cap7stk            MACH_AT91CAP7STK        AT91CAP7STK             3033
+kt_sbc_sam9_1          MACH_KT_SBC_SAM9_1      KT_SBC_SAM9_1           3034
+oratisrouter           MACH_ORATISROUTER       ORATISROUTER            3035
+armada_xp_db           MACH_ARMADA_XP_DB       ARMADA_XP_DB            3036
+spdm                   MACH_SPDM               SPDM                    3037
+gtib                   MACH_GTIB               GTIB                    3038
+dgm3240                        MACH_DGM3240            DGM3240                 3039
+atlas_i_lpe            MACH_ATLAS_I_LPE        ATLAS_I_LPE             3040
+htcmega                        MACH_HTCMEGA            HTCMEGA                 3041
+tricorder              MACH_TRICORDER          TRICORDER               3042
+tx28                   MACH_TX28               TX28                    3043
+bstbrd                 MACH_BSTBRD             BSTBRD                  3044
+pwb3090                        MACH_PWB3090            PWB3090                 3045
+idea6410               MACH_IDEA6410           IDEA6410                3046
+qbc9263                        MACH_QBC9263            QBC9263                 3047
+borabora               MACH_BORABORA           BORABORA                3048
+valdez                 MACH_VALDEZ             VALDEZ                  3049
+ls9g20                 MACH_LS9G20             LS9G20                  3050
+mios_v1                        MACH_MIOS_V1            MIOS_V1                 3051
+s5pc110_crespo         MACH_S5PC110_CRESPO     S5PC110_CRESPO          3052
+controltek9g20         MACH_CONTROLTEK9G20     CONTROLTEK9G20          3053
+tin307                 MACH_TIN307             TIN307                  3054
+tin510                 MACH_TIN510             TIN510                  3055
+bluecheese             MACH_BLUECHEESE         BLUECHEESE              3057
+tem3x30                        MACH_TEM3X30            TEM3X30                 3058
+harvest_desoto         MACH_HARVEST_DESOTO     HARVEST_DESOTO          3059
+msm8x60_qrdc           MACH_MSM8X60_QRDC       MSM8X60_QRDC            3060
+spear900               MACH_SPEAR900           SPEAR900                3061
+pcontrol_g20           MACH_PCONTROL_G20       PCONTROL_G20            3062
index a67aeed17d405fbc37e3a44a860c67a3a1dd9fcf..debc5ed96d6e087a2e241e47421f537a13bf1feb 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
 #define __ARCH_POWERPC_ASM_FSLDMA_H__
 
+#include <linux/slab.h>
 #include <linux/dmaengine.h>
 
 /*
index 50794137d710d71bfa197cbb055d14377f2cd770..675c9e11ada5541085e2fd6e6129272c05713df4 100644 (file)
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs)
 {
        siginfo_t info;
 
-       lock_kernel();
 #ifdef DEBUG_SPARC_BREAKPOINT
         printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
 #endif
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs)
 #ifdef DEBUG_SPARC_BREAKPOINT
        printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
 #endif
-       unlock_kernel();
 }
 
 asmlinkage int
index f8514e291e1559ecda6994ff5be883cc6805128c..12b9f352595f44e26c3f5730d3a84e8557d8cca8 100644 (file)
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 {
        enum direction dir;
 
-       lock_kernel();
        if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
           (((insn >> 30) & 3) != 3))
                goto kill_user;
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 kill_user:
        user_mna_trap_fault(regs, insn);
 out:
-       unlock_kernel();
+       ;
 }
index f24d298bda29d1ca0457b0b7060a366ace463756..b351770cbdd6aded05ca53554b30c8716a24a4ce 100644 (file)
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
        struct thread_info *tp = current_thread_info();
        int window;
 
-       lock_kernel();
        flush_user_windows();
        for(window = 0; window < tp->w_saved; window++) {
                unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
                        do_exit(SIGILL);
        }
        tp->w_saved = 0;
-       unlock_kernel();
 }
index f35eb45d6576258e7dba242934d76376380b531c..c4191b3b7056c6ad16563375f529d1480668a26e 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
index 51cfd730ac5d145ed9f184f643c254d441dbeaab..1f99ecfc48e178312860f177160b735c2655946f 100644 (file)
@@ -152,9 +152,14 @@ struct x86_emulate_ops {
 struct operand {
        enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
        unsigned int bytes;
-       unsigned long orig_val, *ptr;
+       union {
+               unsigned long orig_val;
+               u64 orig_val64;
+       };
+       unsigned long *ptr;
        union {
                unsigned long val;
+               u64 val64;
                char valptr[sizeof(unsigned long) + 2];
        };
 };
index 224392d8fe8c095390439a10ea45af2e21bc0930..5e975298fa819ff3ca648e647f531700edb1f408 100644 (file)
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
-       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
                kfree(b);
                err = -ENOMEM;
                goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        cpumask_setall(b->cpus);
 #else
-       cpumask_copy(b->cpus, c->llc_shared_map);
+       cpumask_set_cpu(cpu, b->cpus);
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
index c2a8b26d4feacf4ac6b6b022c0a9c590fbbcef85..d9368eeda3090eb9f53482704e000e600b6237e3 100644 (file)
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+                               unsigned int cpu)
 {
        int err;
-       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
        if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
                mutex_lock(&therm_cpu_lock);
-               err = thermal_throttle_add_dev(sys_dev);
+               err = thermal_throttle_add_dev(sys_dev, cpu);
                mutex_unlock(&therm_cpu_lock);
                WARN_ON(err);
                break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
 #endif
        /* connect live CPUs to sysfs */
        for_each_online_cpu(cpu) {
-               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
                WARN_ON(err);
        }
 #ifdef CONFIG_HOTPLUG_CPU
index f2da20fda02ddf6fcd449a88ba399fe4ed44af2a..3efdf2870a3572263add749326aa5925df7c461f 100644 (file)
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                /*
                 * event overflow
                 */
-               handled         = 1;
+               handled++;
                data.period     = event->hw.last_period;
 
                if (!x86_perf_event_set_period(event))
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
+struct pmu_nmi_state {
+       unsigned int    marked;
+       int             handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
 static int __kprobes
 perf_event_nmi_handler(struct notifier_block *self,
                         unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
-       struct pt_regs *regs;
+       unsigned int this_nmi;
+       int handled;
 
        if (!atomic_read(&active_events))
                return NOTIFY_DONE;
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
        case DIE_NMI:
        case DIE_NMI_IPI:
                break;
-
+       case DIE_NMIUNKNOWN:
+               this_nmi = percpu_read(irq_stat.__nmi_count);
+               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+                       /* let the kernel handle the unknown nmi */
+                       return NOTIFY_DONE;
+               /*
+                * This one is a PMU back-to-back nmi. Two events
+                * trigger 'simultaneously' raising two back-to-back
+                * NMIs. If the first NMI handles both, the latter
+                * will be empty and daze the CPU. So, we drop it to
+                * avoid false-positive 'unknown nmi' messages.
+                */
+               return NOTIFY_STOP;
        default:
                return NOTIFY_DONE;
        }
 
-       regs = args->regs;
-
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /*
-        * Can't rely on the handled return value to say it was our NMI, two
-        * events could trigger 'simultaneously' raising two back-to-back NMIs.
-        *
-        * If the first NMI handles both, the latter will be empty and daze
-        * the CPU.
-        */
-       x86_pmu.handle_irq(regs);
+
+       handled = x86_pmu.handle_irq(args->regs);
+       if (!handled)
+               return NOTIFY_DONE;
+
+       this_nmi = percpu_read(irq_stat.__nmi_count);
+       if ((handled > 1) ||
+               /* the next nmi could be a back-to-back nmi */
+           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+            (__get_cpu_var(pmu_nmi).handled > 1))) {
+               /*
+                * We could have two subsequent back-to-back nmis: The
+                * first handles more than one counter, the 2nd
+                * handles only one counter and the 3rd handles no
+                * counter.
+                *
+                * This is the 2nd nmi because the previous was
+                * handling more than one counter. We will mark the
+                * next (3rd) and then drop it if unhandled.
+                */
+               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
+               __get_cpu_var(pmu_nmi).handled  = handled;
+       }
 
        return NOTIFY_STOP;
 }
index d8d86d01400866c6320001fb715301276747cd52..ee05c90012d269e66de9ffb6a5952d1434317c1e 100644 (file)
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        int bit, loops;
-       u64 ack, status;
+       u64 status;
+       int handled = 0;
 
        perf_sample_data_init(&data, 0);
 
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 
        loops = 0;
 again:
+       intel_pmu_ack_status(status);
        if (++loops > 100) {
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
        }
 
        inc_irq_stat(apic_perf_irqs);
-       ack = status;
 
        intel_pmu_lbr_read();
 
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
-       if (__test_and_clear_bit(62, (unsigned long *)&status))
+       if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+               handled++;
                x86_pmu.drain_pebs(regs);
+       }
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
+               handled++;
+
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -761,8 +766,6 @@ again:
                        x86_pmu_stop(event);
        }
 
-       intel_pmu_ack_status(ack);
-
        /*
         * Repeat if there is more work to be done:
         */
@@ -772,7 +775,7 @@ again:
 
 done:
        intel_pmu_enable_all(0);
-       return 1;
+       return handled;
 }
 
 static struct event_constraint *
index 7e578e9cc58bd5062d30776d431dabdcf724ac67..b560db3305be16ff954fc416137d17b17189b5e7 100644 (file)
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                inc_irq_stat(apic_perf_irqs);
        }
 
-       return handled > 0;
+       return handled;
 }
 
 /*
index a874495b3673baeb27467d144995d885f2f94ebc..e2a5952573905b2eeac3d18060be54532dbfdfde 100644 (file)
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
        /* Copy kernel address range */
        clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
                        swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS,
-                             KERNEL_PGD_BOUNDARY));
+                       KERNEL_PGD_PTRS);
 
        /* Initialize low mappings */
        clone_pgd_range(trampoline_pg_dir,
index d632934cb6386947352650f262745eb3c93c68ce..26a863a9c2a815ec797b6211cd3de5e0b9cbed8e 100644 (file)
@@ -655,7 +655,7 @@ void restore_sched_clock_state(void)
 
        local_irq_save(flags);
 
-       get_cpu_var(cyc2ns_offset) = 0;
+       __get_cpu_var(cyc2ns_offset) = 0;
        offset = cyc2ns_suspend - sched_clock();
 
        for_each_possible_cpu(cpu)
index b38bd8b92aa6c84ed9a00295eeaac73bade26458..66ca98aafdd6a73d7eea8834d1f1c930384b8268 100644 (file)
@@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
                               struct x86_emulate_ops *ops)
 {
        struct decode_cache *c = &ctxt->decode;
-       u64 old = c->dst.orig_val;
+       u64 old = c->dst.orig_val64;
 
        if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
            ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
-
                c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
                c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
                ctxt->eflags &= ~EFLG_ZF;
        } else {
-               c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
-                      (u32) c->regs[VCPU_REGS_RBX];
+               c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+                       (u32) c->regs[VCPU_REGS_RBX];
 
                ctxt->eflags |= EFLG_ZF;
        }
@@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
                                        c->src.valptr, c->src.bytes);
                if (rc != X86EMUL_CONTINUE)
                        goto done;
-               c->src.orig_val = c->src.val;
+               c->src.orig_val64 = c->src.val64;
        }
 
        if (c->src2.type == OP_MEM) {
index 8d10c063d7f207451b087a11a8d7bf0c888f3695..4b7b73ce209894442eddd9df2b8f076408389b12 100644 (file)
@@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s)
                if (!found)
                        found = s->kvm->bsp_vcpu;
 
+               if (!found)
+                       return;
+
                kvm_vcpu_kick(found);
        }
 }
index ffed06871c5cf389594244086ecb3685496b4258..63c314502993b33c1a53773d111eb69133668179 100644 (file)
@@ -43,7 +43,6 @@ struct kvm_kpic_state {
        u8 irr;         /* interrupt request register */
        u8 imr;         /* interrupt mask register */
        u8 isr;         /* interrupt service register */
-       u8 isr_ack;     /* interrupt ack detection */
        u8 priority_add;        /* highest irq priority */
        u8 irq_base;
        u8 read_reg_select;
@@ -56,6 +55,7 @@ struct kvm_kpic_state {
        u8 init4;               /* true if 4 byte init */
        u8 elcr;                /* PIIX edge/trigger selection */
        u8 elcr_mask;
+       u8 isr_ack;     /* interrupt ack detection */
        struct kvm_pic *pics_state;
 };
 
index 84e236ce76ba9a8afd624cfc4c506ebaa654b926..72fc70cf6184c756b1157f272b0d5e2b7bcc0609 100644 (file)
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 /*
  * Map 'pfn' using fixed map 'type' and protections 'prot'
  */
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
        /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
index f6b48f6c595176a59c8e2fe5fa145bc11acca118..cfe4faabb0f6792aebfb8330dd55f02406b148dc 100644 (file)
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
        int error;
 
        error = sysdev_class_register(&oprofile_sysclass);
-       if (!error)
-               error = sysdev_register(&device_oprofile);
+       if (error)
+               return error;
+
+       error = sysdev_register(&device_oprofile);
+       if (error)
+               sysdev_class_unregister(&oprofile_sysclass);
+
        return error;
 }
 
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
 }
 
 #else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int  init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
 #endif /* CONFIG_PM */
 
 static int __init p4_init(char **cpu_type)
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        char *cpu_type = NULL;
        int ret = 0;
 
+       using_nmi = 0;
+
        if (!cpu_has_apic)
                return -ENODEV;
 
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        mux_init(ops);
 
-       init_sysfs();
+       ret = init_sysfs();
+       if (ret)
+               return ret;
+
        using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
index a6809645d212d9cf970b85473e3d9c05bc4ba652..2fef1ef931a06756d364707c28d97e6c440aa484 100644 (file)
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
 
        /* Currently we do not support hierarchy deeper than two level (0,1) */
        if (parent != cgroup->top_cgroup)
-               return ERR_PTR(-EINVAL);
+               return ERR_PTR(-EPERM);
 
        blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
        if (!blkcg)
index ee1a1e7e63ccfc3e735566e3725febee1890bb2e..32a1c123dfb36a5fff24857221a9a4d1b79131bf 100644 (file)
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const bool sync = (bio->bi_rw & REQ_SYNC);
-       const bool unplug = (bio->bi_rw & REQ_UNPLUG);
-       const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const bool sync = !!(bio->bi_rw & REQ_SYNC);
+       const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
+       const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
        if ((bio->bi_rw & REQ_HARDBARRIER) &&
index 001ab18078f5ba1b8c6f34e021cc02ef401d98b6..0749b89c68852fc824e4afa862d871814c56ddd8 100644 (file)
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk)
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
                blk_trace_remove_sysfs(disk_to_dev(disk));
+               kobject_put(&dev->kobj);
                return ret;
        }
 
index 6e7dc87141e48230d0eb82c2bdcf770b0ac581a9..d6b911ac002cd9ef14c76535a6b3b6814243079b 100644 (file)
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
 
 static inline int blk_cpu_to_group(int cpu)
 {
+       int group = NR_CPUS;
 #ifdef CONFIG_SCHED_MC
        const struct cpumask *mask = cpu_coregroup_mask(cpu);
-       return cpumask_first(mask);
+       group = cpumask_first(mask);
 #elif defined(CONFIG_SCHED_SMT)
-       return cpumask_first(topology_thread_cpumask(cpu));
+       group = cpumask_first(topology_thread_cpumask(cpu));
 #else
        return cpu;
 #endif
+       if (likely(group < NR_CPUS))
+               return group;
+       return cpu;
 }
 
 /*
index eb4086f7dfef9eb7efc6d202fb7a979919a551b8..f65c6f01c47580b7e24e7fc6bda1764668cf4d21 100644 (file)
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
 static int cfq_slice_async = HZ / 25;
 static const int cfq_slice_async_rq = 2;
 static int cfq_slice_idle = HZ / 125;
+static int cfq_group_idle = HZ / 125;
 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
@@ -147,6 +148,8 @@ struct cfq_queue {
        struct cfq_queue *new_cfqq;
        struct cfq_group *cfqg;
        struct cfq_group *orig_cfqg;
+       /* Number of sectors dispatched from queue in single dispatch round */
+       unsigned long nr_sectors;
 };
 
 /*
@@ -198,6 +201,8 @@ struct cfq_group {
        struct hlist_node cfqd_node;
        atomic_t ref;
 #endif
+       /* number of requests that are on the dispatch list or inside driver */
+       int dispatched;
 };
 
 /*
@@ -271,6 +276,7 @@ struct cfq_data {
        unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
+       unsigned int cfq_group_idle;
        unsigned int cfq_latency;
        unsigned int cfq_group_isolation;
 
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
                        &cfqg->service_trees[i][j]: NULL) \
 
 
+static inline bool iops_mode(struct cfq_data *cfqd)
+{
+       /*
+        * If we are not idling on queues and it is a NCQ drive, parallel
+        * execution of requests is on and measuring time is not possible
+        * in most of the cases until and unless we drive shallower queue
+        * depths and that becomes a performance bottleneck. In such cases
+        * switch to start providing fairness in terms of number of IOs.
+        */
+       if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
+               return true;
+       else
+               return false;
+}
+
 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
 {
        if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
                        slice_used = cfqq->allocated_slice;
        }
 
-       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
        return slice_used;
 }
 
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                                struct cfq_queue *cfqq)
 {
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
-       unsigned int used_sl, charge_sl;
+       unsigned int used_sl, charge;
        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
                        - cfqg->service_tree_idle.count;
 
        BUG_ON(nr_sync < 0);
-       used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
+       used_sl = charge = cfq_cfqq_slice_usage(cfqq);
 
-       if (!cfq_cfqq_sync(cfqq) && !nr_sync)
-               charge_sl = cfqq->allocated_slice;
+       if (iops_mode(cfqd))
+               charge = cfqq->slice_dispatch;
+       else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+               charge = cfqq->allocated_slice;
 
        /* Can't update vdisktime while group is on service tree */
        cfq_rb_erase(&cfqg->rb_node, st);
-       cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
+       cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
        __cfq_group_service_tree_add(st, cfqg);
 
        /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
+                       " sect=%u", used_sl, cfqq->slice_dispatch, charge,
+                       iops_mode(cfqd), cfqq->nr_sectors);
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
 }
@@ -1587,6 +1612,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                cfqq->allocated_slice = 0;
                cfqq->slice_end = 0;
                cfqq->slice_dispatch = 0;
+               cfqq->nr_sectors = 0;
 
                cfq_clear_cfqq_wait_request(cfqq);
                cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1865,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        BUG_ON(!service_tree);
        BUG_ON(!service_tree->count);
 
+       if (!cfqd->cfq_slice_idle)
+               return false;
+
        /* We never do for idle class queues. */
        if (prio == IDLE_WORKLOAD)
                return false;
@@ -1863,7 +1892,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
        struct cfq_io_context *cic;
-       unsigned long sl;
+       unsigned long sl, group_idle = 0;
 
        /*
         * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1908,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        /*
         * idle is disabled, either manually or by past process history
         */
-       if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
-               return;
+       if (!cfq_should_idle(cfqd, cfqq)) {
+               /* no queue idling. Check for group idling */
+               if (cfqd->cfq_group_idle)
+                       group_idle = cfqd->cfq_group_idle;
+               else
+                       return;
+       }
 
        /*
         * still active requests from this queue, don't idle
@@ -1907,13 +1941,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                return;
        }
 
+       /* There are other queues in the group, don't do group idle */
+       if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+               return;
+
        cfq_mark_cfqq_wait_request(cfqq);
 
-       sl = cfqd->cfq_slice_idle;
+       if (group_idle)
+               sl = cfqd->cfq_group_idle;
+       else
+               sl = cfqd->cfq_slice_idle;
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
        cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
-       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
+       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+                       group_idle ? 1 : 0);
 }
 
 /*
@@ -1929,9 +1971,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
        cfq_remove_request(rq);
        cfqq->dispatched++;
+       (RQ_CFQG(rq))->dispatched++;
        elv_dispatch_sort(q, rq);
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
+       cfqq->nr_sectors += blk_rq_sectors(rq);
        cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
                                        rq_data_dir(rq), rq_is_sync(rq));
 }
@@ -2198,7 +2242,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
                        cfqq = NULL;
                        goto keep_queue;
                } else
-                       goto expire;
+                       goto check_group_idle;
        }
 
        /*
@@ -2226,8 +2270,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         * flight or is idling for a new request, allow either of these
         * conditions to happen (or time out) before selecting a new queue.
         */
-       if (timer_pending(&cfqd->idle_slice_timer) ||
-           (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
+       if (timer_pending(&cfqd->idle_slice_timer)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       /*
+        * If group idle is enabled and there are requests dispatched from
+        * this group, wait for requests to complete.
+        */
+check_group_idle:
+       if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
+           && cfqq->cfqg->dispatched) {
                cfqq = NULL;
                goto keep_queue;
        }
@@ -3375,6 +3434,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        WARN_ON(!cfqq->dispatched);
        cfqd->rq_in_driver--;
        cfqq->dispatched--;
+       (RQ_CFQG(rq))->dispatched--;
        cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
                        rq_start_time_ns(rq), rq_io_start_time_ns(rq),
                        rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3464,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                 * the queue.
                 */
                if (cfq_should_wait_busy(cfqd, cfqq)) {
-                       cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
+                       unsigned long extend_sl = cfqd->cfq_slice_idle;
+                       if (!cfqd->cfq_slice_idle)
+                               extend_sl = cfqd->cfq_group_idle;
+                       cfqq->slice_end = jiffies + extend_sl;
                        cfq_mark_cfqq_wait_busy(cfqq);
                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
                }
@@ -3850,6 +3913,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_group_idle = cfq_group_idle;
        cfqd->cfq_latency = 1;
        cfqd->cfq_group_isolation = 0;
        cfqd->hw_tag = -1;
@@ -3922,6 +3986,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4019,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
                UINT_MAX, 0);
 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4041,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(group_idle),
        CFQ_ATTR(low_latency),
        CFQ_ATTR(group_isolation),
        __ATTR_NULL
@@ -4028,6 +4095,12 @@ static int __init cfq_init(void)
        if (!cfq_slice_idle)
                cfq_slice_idle = 1;
 
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       if (!cfq_group_idle)
+               cfq_group_idle = 1;
+#else
+               cfq_group_idle = 0;
+#endif
        if (cfq_slab_setup())
                return -ENOMEM;
 
index ec585c9554d33c04b973537f9ac3216e332afa50..205b09a5bd9ead9e64d7bcfb60cec924f1a22cbd 100644 (file)
@@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
        struct elevator_queue *old_elevator, *e;
        void *data;
+       int err;
 
        /*
         * Allocate new elevator
         */
        e = elevator_alloc(q, new_e);
        if (!e)
-               return 0;
+               return -ENOMEM;
 
        data = elevator_init_queue(q, e);
        if (!data) {
                kobject_put(&e->kobj);
-               return 0;
+               return -ENOMEM;
        }
 
        /*
@@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        __elv_unregister_queue(old_elevator);
 
-       if (elv_register_queue(q))
+       err = elv_register_queue(q);
+       if (err)
                goto fail_register;
 
        /*
@@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
 
-       return 1;
+       return 0;
 
 fail_register:
        /*
@@ -1071,17 +1073,19 @@ fail_register:
        queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
        spin_unlock_irq(q->queue_lock);
 
-       return 0;
+       return err;
 }
 
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
-                         size_t count)
+/*
+ * Switch this queue to the given IO scheduler.
+ */
+int elevator_change(struct request_queue *q, const char *name)
 {
        char elevator_name[ELV_NAME_MAX];
        struct elevator_type *e;
 
        if (!q->elevator)
-               return count;
+               return -ENXIO;
 
        strlcpy(elevator_name, name, sizeof(elevator_name));
        e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 
        if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
                elevator_put(e);
-               return count;
+               return 0;
        }
 
-       if (!elevator_switch(q, e))
-               printk(KERN_ERR "elevator: switch to %s failed\n",
-                                                       elevator_name);
-       return count;
+       return elevator_switch(q, e);
+}
+EXPORT_SYMBOL(elevator_change);
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+                         size_t count)
+{
+       int ret;
+
+       if (!q->elevator)
+               return count;
+
+       ret = elevator_change(q, name);
+       if (!ret)
+               return count;
+
+       printk(KERN_ERR "elevator: switch to %s failed\n", name);
+       return ret;
 }
 
 ssize_t elv_iosched_show(struct request_queue *q, char *name)
index 013727b20417226249ca942d28708164a497c2dc..ff1c945fba98e39de55b42ce46a5e9af26b34c45 100644 (file)
@@ -253,6 +253,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+       { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+       { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+       { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 3971bc0a4838235ea26f4bff0b97ec7dd3335b69..d712675d0a9657d3cc7742b74220aaf05d0fa537 100644 (file)
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (CPT) */
        { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        { }     /* terminate list */
 };
 
index 666850d31df2c304b9d1409e21e20355d52f2a65..68dc6785472f86932f769029ae6046a0dffeec47 100644 (file)
@@ -1326,7 +1326,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
        /* issue the first D2H Register FIS */
        msecs = 0;
        now = jiffies;
-       if (time_after(now, deadline))
+       if (time_after(deadline, now))
                msecs = jiffies_to_msecs(deadline - now);
 
        tf.ctl |= ATA_SRST;
index c035b3d041ee1572492d7a17f56323bde0fccacd..932eaee5024527f4e617064726e39db808c17e87 100644 (file)
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  */
 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
+       unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
        /*
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
         */
        ata_lpm_enable(host);
 
-       rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+       /*
+        * On some hardware, device fails to respond after spun down
+        * for suspend.  As the device won't be used before being
+        * resumed, we don't need to touch the device.  Ask EH to skip
+        * the usual stuff and proceed directly to suspend.
+        *
+        * http://thread.gmane.org/gmane.linux.ide/46764
+        */
+       if (mesg.event == PM_EVENT_SUSPEND)
+               ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+       rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
        if (rc == 0)
                host->dev->power.power_state = mesg;
        return rc;
index c9ae299b83428d039c13cce845e7acc497bcff13..e48302eae55fcd2ad619564c9cd8237c722efb29 100644 (file)
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        if (link->flags & ATA_LFLAG_DISABLED)
                return 1;
 
+       /* skip if explicitly requested */
+       if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+               return 1;
+
        /* thaw frozen port and recover failed devices */
        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
                return 0;
index 3b82d8ef76f0ffc81e6bb62e9d3ea19d1be57195..e30c537cce32f99ab7fcb07f7f124cd920c4cd4b 100644 (file)
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                if (ioaddr->ctl_addr)
                        iowrite8(tf->ctl, ioaddr->ctl_addr);
                ap->last_ctl = tf->ctl;
+               ata_wait_idle(ap);
        }
 
        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                iowrite8(tf->device, ioaddr->device_addr);
                VPRINTK("device 0x%X\n", tf->device);
        }
+
+       ata_wait_idle(ap);
 }
 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                     u8 status, int in_wq)
 {
-       struct ata_eh_info *ehi = &ap->link.eh_info;
+       struct ata_link *link = qc->dev->link;
+       struct ata_eh_info *ehi = &link->eh_info;
        unsigned long flags = 0;
        int poll_next;
 
@@ -1298,8 +1302,14 @@ fsm_start:
 }
 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
 
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
 {
+       struct ata_port *ap = link->ap;
+
+       WARN_ON((ap->sff_pio_task_link != NULL) &&
+               (ap->sff_pio_task_link != link));
+       ap->sff_pio_task_link = link;
+
        /* may fail if ata_sff_flush_pio_task() in progress */
        queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
                           msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
 {
        struct ata_port *ap =
                container_of(work, struct ata_port, sff_pio_task.work);
+       struct ata_link *link = ap->sff_pio_task_link;
        struct ata_queued_cmd *qc;
        u8 status;
        int poll_next;
 
+       BUG_ON(ap->sff_pio_task_link == NULL); 
        /* qc can be NULL if timeout occurred */
-       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       if (!qc)
+       qc = ata_qc_from_tag(ap, link->active_tag);
+       if (!qc) {
+               ap->sff_pio_task_link = NULL;
                return;
+       }
 
 fsm_start:
        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
                msleep(2);
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
-                       ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+                       ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
                        return;
                }
        }
 
+       /*
+        * hsm_move() may trigger another command to be processed.
+        * clean the link beforehand.
+        */
+       ap->sff_pio_task_link = NULL;
        /* move the HSM */
        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
 
@@ -1376,6 +1395,7 @@ fsm_start:
 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* Use polling pio if the LLD doesn't handle
         * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                ap->hsm_task_state = HSM_ST_LAST;
 
                if (qc->tf.flags & ATA_TFLAG_POLLING)
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                break;
 
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                if (qc->tf.flags & ATA_TFLAG_WRITE) {
                        /* PIO data out protocol */
                        ap->hsm_task_state = HSM_ST_FIRST;
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                        /* always send first data block using the
                         * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                        ap->hsm_task_state = HSM_ST;
 
                        if (qc->tf.flags & ATA_TFLAG_POLLING)
-                               ata_sff_queue_pio_task(ap, 0);
+                               ata_sff_queue_pio_task(link, 0);
 
                        /* if polling, ata_sff_pio_task() handles the
                         * rest.  otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                /* send cdb by polling if no cdb interrupt */
                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
                    (qc->tf.flags & ATA_TFLAG_POLLING))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* defer PIO handling to sff_qc_issue */
        if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 
                /* send cdb by polling if no cdb interrupt */
                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
index ba43f0f8c880a21aa1b8b1d3b9ae3d8c3487d415..2215632e4b317684a818b0ee5cbb7037fe064079 100644 (file)
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
        /* Odd numbered device ids are the units with enable bits (the -R cards) */
-       if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+       if ((pdev->device & 1) &&
+           !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
                return -ENOENT;
 
        return ata_sff_prereset(link, deadline);
index 5e659885de162e7ef3cf38e3e1f8a67098108712..ac8d7d97e4085d4122b33fbd58bde6951f3ef2e7 100644 (file)
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                        tf->lbam,
                        tf->lbah);
        }
+
+       ata_wait_idle(ap);
 }
 
 static int via_port_start(struct ata_port *ap)
index 81982594a014b603aa2ebd8b5944ed13049c929b..a9fd9709c2627c7514fe74a8ec9ebc26f09b44f9 100644 (file)
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
        }
 
        if (qc->tf.flags & ATA_TFLAG_POLLING)
-               ata_sff_queue_pio_task(ap, 0);
+               ata_sff_queue_pio_task(link, 0);
        return 0;
 }
 
index 31064df1370a96320f548d1ce5f191eaf2e97051..6124c2fd2d33440f022aa45b369bd7fb7a850c8a 100644 (file)
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
        spin_lock_irqsave(&h->lock, flags);
        addQ(&h->reqQ, c);
        h->Qdepth++;
+       if (h->Qdepth > h->maxQsinceinit)
+               h->maxQsinceinit = h->Qdepth;
        start_io(h);
        spin_unlock_irqrestore(&h->lock, flags);
 }
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
        misc_fw_support = readl(&cfgtable->misc_fw_support);
        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
+       /* The doorbell reset seems to cause lockups on some Smart
+        * Arrays (e.g. P410, P410i, maybe others).  Until this is
+        * fixed or at least isolated, avoid the doorbell reset.
+        */
+       use_doorbell = 0;
+
        rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        h->scatter_list = kmalloc(h->max_commands *
                                                sizeof(struct scatterlist *),
                                                GFP_KERNEL);
+       if (!h->scatter_list)
+               goto clean4;
+
        for (k = 0; k < h->nr_cmds; k++) {
                h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
                                                        h->maxsgentries,
index f3c636d237187df21879c0e8acc4a4c943e59ad2..91797bbbe702f01afc5770f45618487ab21054da 100644 (file)
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
+               bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
                struct file *file = lo->lo_backing_file;
 
                if (barrier) {
index b82c5ce5e9dfaf3bcc1d81b68554de8cdff0493c..76fa3deaee84059d4431f7763981311210f16f06 100644 (file)
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
        host->breq->queuedata = host;
 
        /* mflash is random device, thanx for the noop */
-       elevator_exit(host->breq->elevator);
-       err = elevator_init(host->breq, "noop");
+       err = elevator_change(host->breq, "noop");
        if (err) {
                printk(KERN_ERR "%s:%d (elevator_init) fail\n",
                                __func__, __LINE__);
index 2bbeaaea46e9b7765ce983374fb125b7dd5422c9..38df8c19e74cc56903d5985cdbee7d52df3dc0d9 100644 (file)
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KIOCSOUND:
                if (!perm)
                        goto eperm;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
+               /*
+                * The use of PIT_TICK_RATE is historic, it used to be
+                * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+                * and 2.6.36, which was a minor but unfortunate ABI
+                * change.
+                */
                if (arg)
-                       arg = CLOCK_TICK_RATE / arg;
+                       arg = PIT_TICK_RATE / arg;
                kd_mksound(arg, 0);
                break;
 
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 */
                ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
                count = ticks ? (arg & 0xffff) : 0;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
                if (count)
-                       count = CLOCK_TICK_RATE / count;
+                       count = PIT_TICK_RATE / count;
                kd_mksound(count, ticks);
                break;
        }
index b42f42ca70c3c9454bb00ff7cf2e8505f74e482d..823559ab0e243610ca8e5ff3b5983aca3d53b7bb 100644 (file)
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
        return err;
 }
 
-static int sx150x_init_hw(struct sx150x_chip *chip,
-                       struct sx150x_platform_data *pdata)
+static int sx150x_reset(struct sx150x_chip *chip)
 {
-       int err = 0;
+       int err;
 
-       err = i2c_smbus_write_word_data(chip->client,
+       err = i2c_smbus_write_byte_data(chip->client,
                                        chip->dev_cfg->reg_reset,
-                                       0x3412);
+                                       0x12);
        if (err < 0)
                return err;
 
+       err = i2c_smbus_write_byte_data(chip->client,
+                                       chip->dev_cfg->reg_reset,
+                                       0x34);
+       return err;
+}
+
+static int sx150x_init_hw(struct sx150x_chip *chip,
+                       struct sx150x_platform_data *pdata)
+{
+       int err = 0;
+
+       if (pdata->reset_during_probe) {
+               err = sx150x_reset(chip);
+               if (err < 0)
+                       return err;
+       }
+
        err = sx150x_i2c_write(chip->client,
                        chip->dev_cfg->reg_misc,
                        0x01);
index 59457e83b011aa3bbac119faf6836a66df7affa5..744225ebb4b25d5988fab454441de95d7db94115 100644 (file)
@@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
                i915_seqno_passed(i915_get_gem_seqno(dev,
                                &dev_priv->render_ring),
                        i915_get_tail_request(dev)->seqno)) {
+               bool missed_wakeup = false;
+
                dev_priv->hangcheck_count = 0;
 
                /* Issue a wake-up to catch stuck h/w. */
-               if (dev_priv->render_ring.waiting_gem_seqno |
-                   dev_priv->bsd_ring.waiting_gem_seqno) {
-                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
-                       if (dev_priv->render_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-                       if (dev_priv->bsd_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+               if (dev_priv->render_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+                       missed_wakeup = true;
+               }
+
+               if (dev_priv->bsd_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       missed_wakeup = true;
                }
+
+               if (missed_wakeup)
+                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
                return;
        }
 
index d094e91292234507c82ea57cb57947a019318aa6..4f5e15577e89e3e6f7005cd92f9f87ab0636b4eb 100644 (file)
 #define  WM1_LP_SR_EN          (1<<31)
 #define  WM1_LP_LATENCY_SHIFT  24
 #define  WM1_LP_LATENCY_MASK   (0x7f<<24)
+#define  WM1_LP_FBC_LP1_MASK   (0xf<<20)
+#define  WM1_LP_FBC_LP1_SHIFT  20
 #define  WM1_LP_SR_MASK                (0x1ff<<8)
 #define  WM1_LP_SR_SHIFT       8
 #define  WM1_LP_CURSOR_MASK    (0x3f)
+#define WM2_LP_ILK             0x4510c
+#define  WM2_LP_EN             (1<<31)
+#define WM3_LP_ILK             0x45110
+#define  WM3_LP_EN             (1<<31)
+#define WM1S_LP_ILK            0x45120
+#define  WM1S_LP_EN            (1<<31)
 
 /* Memory latency timer register */
 #define MLTR_ILK               0x11222
index 40cc5da264a9bdf5520909ba39ca2cb21c1ca5ca..19daead5b525d5d3d653e156ce2bfe532479bd9e 100644 (file)
@@ -2767,14 +2767,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
        /* Don't promote wm_size to unsigned... */
        if (wm_size > (long)wm->max_wm)
                wm_size = wm->max_wm;
-       if (wm_size <= 0) {
+       if (wm_size <= 0)
                wm_size = wm->default_wm;
-               DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
-                         " entries required = %ld, available = %lu.\n",
-                         entries_required + wm->guard_size,
-                         wm->fifo_size);
-       }
-
        return wm_size;
 }
 
@@ -3388,8 +3382,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
                reg_value = I915_READ(WM1_LP_ILK);
                reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
                               WM1_LP_CURSOR_MASK);
-               reg_value |= WM1_LP_SR_EN |
-                            (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+               reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
                             (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
 
                I915_WRITE(WM1_LP_ILK, reg_value);
@@ -5675,6 +5668,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                        I915_WRITE(DISP_ARB_CTL,
                                        (I915_READ(DISP_ARB_CTL) |
                                                DISP_FBC_WM_DIS));
+               I915_WRITE(WM3_LP_ILK, 0);
+               I915_WRITE(WM2_LP_ILK, 0);
+               I915_WRITE(WM1_LP_ILK, 0);
                }
                /*
                 * Based on the document from hardware guys the following bits
@@ -5696,8 +5692,7 @@ void intel_init_clock_gating(struct drm_device *dev)
                                   ILK_DPFC_DIS2 |
                                   ILK_CLK_FBC);
                }
-               if (IS_GEN6(dev))
-                       return;
+               return;
        } else if (IS_G4X(dev)) {
                uint32_t dspclk_gate;
                I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5758,11 +5753,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                                OUT_RING(MI_FLUSH);
                                ADVANCE_LP_RING();
                        }
-               } else {
+               } else
                        DRM_DEBUG_KMS("Failed to allocate render context."
-                                     "Disable RC6\n");
-                       return;
-               }
+                                      "Disable RC6\n");
        }
 
        if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
index b819c10811470775b19826e407ba75fffd6a9b68..4fbb0165b26f06a24cdbd8eef71f3c3d8e9845a2 100644 (file)
@@ -875,8 +875,6 @@ void intel_lvds_init(struct drm_device *dev)
 
        intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
        intel_encoder->crtc_mask = (1 << 1);
-       if (IS_I965G(dev))
-               intel_encoder->crtc_mask |= (1 << 0);
        drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
index 7580f55e67e3cf1560437b428d9fb1e5b8e411d0..36e95753223059ab0e1b5ed8490fe7364b39673e 100644 (file)
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+       AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+       AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index 8f0caf7d4482079ef45aa9ea3b8a66d6a37500d4..78fbe9ffe7f024f3f4e1ca486bcbeb5976087e27 100644 (file)
@@ -53,7 +53,7 @@
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
 #define T3_MAX_NUM_STAG (1<<15)
 #define T3_MAX_MR_SIZE 0x100000000ULL
 #define T3_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
index 443cea55daac5973469cf43fabe2a388abeadb55..61e0efd4ccfb5d9d4d6f6bdc50f765d365c07d0e 100644 (file)
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 static void nes_retrans_expired(struct nes_cm_node *cm_node)
 {
        struct iw_cm_id *cm_id = cm_node->cm_id;
-       switch (cm_node->state) {
+       enum nes_cm_node_state state = cm_node->state;
+       cm_node->state = NES_CM_STATE_CLOSED;
+       switch (state) {
        case NES_CM_STATE_SYN_RCVD:
        case NES_CM_STATE_CLOSING:
                rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
        case NES_CM_STATE_FIN_WAIT1:
                if (cm_node->cm_id)
                        cm_id->rem_ref(cm_id);
-               cm_node->state = NES_CM_STATE_CLOSED;
                send_reset(cm_node, NULL);
                break;
        default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                break;
        case NES_CM_STATE_MPAREQ_RCVD:
                passive_state = atomic_add_return(1, &cm_node->passive_state);
-               if (passive_state ==  NES_SEND_RESET_EVENT)
-                       create_event(cm_node, NES_CM_EVENT_RESET);
-               cm_node->state = NES_CM_STATE_CLOSED;
                dev_kfree_skb_any(skb);
                break;
        case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
        case NES_CM_STATE_CLOSED:
                drop_packet(skb);
                break;
+       case NES_CM_STATE_FIN_WAIT2:
        case NES_CM_STATE_FIN_WAIT1:
        case NES_CM_STATE_LAST_ACK:
                cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                return -EINVAL;
        }
 
+       passive_state = atomic_add_return(1, &cm_node->passive_state);
+       if (passive_state == NES_SEND_RESET_EVENT) {
+               rem_ref_cm_node(cm_node->cm_core, cm_node);
+               return -ECONNRESET;
+       }
+
        /* associate the node with the QP */
        nesqp->cm_node = (void *)cm_node;
        cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
                        "ret=%d\n", __func__, __LINE__, ret);
 
-       passive_state = atomic_add_return(1, &cm_node->passive_state);
-       if (passive_state == NES_SEND_RESET_EVENT)
-               create_event(cm_node, NES_CM_EVENT_RESET);
        return 0;
 }
 
index f8233c851c694862d76861e515b93183b8d387a5..1980a461c49904e93102e02e655e5b033c5f92be 100644 (file)
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                return; /* Ignore it, wait for close complete */
 
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+                               if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+                                       (nesqp->ibqp_state == IB_QPS_RTS) &&
+                                       ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+                                       spin_lock_irqsave(&nesqp->lock, flags);
+                                       nesqp->hw_iwarp_state = iwarp_state;
+                                       nesqp->hw_tcp_state = tcp_state;
+                                       nesqp->last_aeq = async_event_id;
+                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+                                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+                                       spin_unlock_irqrestore(&nesqp->lock, flags);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                                       nes_cm_disconn(nesqp);
+                               }
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
                                                NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
-
                        break;
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
                        if (nesqp->term_flags) {
index aa9183db32b104aaaa7bfad081c3c969699cec72..1204c3432b6322f23518c42d550746f19d9e4ce6 100644 (file)
@@ -45,6 +45,7 @@
 #define NES_PHY_TYPE_KR               9
 
 #define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
 
 enum pci_regs {
        NES_INT_STAT = 0x0000,
index 6dfdd49cdbcf36ef5cd68aee3caf46dbdaeb77f0..10560c796fd6c0ffc591601c610579d3e1b6e8ca 100644 (file)
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 0;
        } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
                u32temp = nes_read_indexed(nesdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 1;
        }
        if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
index a9b025f4147a0692845d2407b6efbd9220f9837e..ab6982056518e3c086c57738f360574b211108aa 100644 (file)
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
  *
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
  */
 int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
 {
+       int i;
+
        if (!num_slots)
                return 0;
 
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
        dev->mtsize = num_slots;
        input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
 
+       /* Mark slots as 'unused' */
+       for (i = 0; i < num_slots; i++)
+               dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
        return 0;
 }
 EXPORT_SYMBOL(input_mt_create_slots);
index ea67c49146a3a03280ee8719c362c41d8033c743..b952317639116f2f18a7bc1f41ff5887c17f2a49 100644 (file)
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
                               const struct bcm5974_config *cfg,
                               const struct tp_finger *f)
 {
-       input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
-       input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
-       input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
-       input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+       input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+                        raw2int(f->force_major) << 1);
+       input_report_abs(input, ABS_MT_TOUCH_MINOR,
+                        raw2int(f->force_minor) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+                        raw2int(f->size_major) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MINOR,
+                        raw2int(f->size_minor) << 1);
        input_report_abs(input, ABS_MT_ORIENTATION,
                         MAX_FINGER_ORIENTATION - raw2int(f->orientation));
        input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
index 46e4ba0b92463184d5e398345e50ca68809628b2..f585131604806f531f91c48ebb867919339fd80f 100644 (file)
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
 
 static void __exit i8042_exit(void)
 {
-       platform_driver_unregister(&i8042_driver);
        platform_device_unregister(i8042_platform_device);
+       platform_driver_unregister(&i8042_driver);
        i8042_platform_exit();
 
        panic_blink = NULL;
index 40d77ba8fdc138ff98b0320b877358a5302d7a28..6e29badb969e44192be85080caf801851c3d06fa 100644 (file)
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
                        if (features->type == WACOM_G4 ||
                                        features->type == WACOM_MO) {
                                input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
-                               rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+                               rw = (data[7] & 0x04) - (data[7] & 0x03);
                        } else {
                                input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
-                               rw = -(signed)data[6];
+                               rw = -(signed char)data[6];
                        }
                        input_report_rel(input, REL_WHEEL, rw);
                }
index bd2755e8d9a3d327f7ae54bedd0e7820f85ea5fe..f332c52968b75d7528ee8c5f21eaf561a76d373d 100644 (file)
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
                goto err;
        }
 
-       err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
-       if (!err) {
+       if (ocr & R4_MEMORY_PRESENT
+           && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
                card->type = MMC_TYPE_SD_COMBO;
 
                if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
index 5f3a599ead07bbdfae11f7abc0198285e743db81..87226cd202a5086f7d90699f0a43d6d4e99725a1 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/clk.h>
 #include <linux/atmel_pdc.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 
 #include <linux/mmc/host.h>
 
index 9a68ff4353a2e83878fce5429afe9351140daf57..5a950b16d9e629dc3d08041bda547b165cadee76 100644 (file)
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
 
                while (delay--) {
                        reg = readw(host->base + MMC_REG_STATUS);
-                       if (reg & STATUS_CARD_BUS_CLK_RUN)
+                       if (reg & STATUS_CARD_BUS_CLK_RUN) {
                                /* Check twice before cut */
                                reg = readw(host->base + MMC_REG_STATUS);
                                if (reg & STATUS_CARD_BUS_CLK_RUN)
                                        return 0;
+                       }
 
                        if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
                                return 0;
index 4a8776f8afdd690048c69de91e755d35d2c884a5..4526d2791f2990229acbe9ef0f5c88286819807f 100644 (file)
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
        int ret = 0;
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
-       pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
 
        if (host && host->suspended)
                return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
                        }
                }
                cancel_work_sync(&host->mmc_carddetect_work);
-               mmc_host_enable(host->mmc);
                ret = mmc_suspend_host(host->mmc);
+               mmc_host_enable(host->mmc);
                if (ret == 0) {
                        omap_hsmmc_disable_irq(host);
                        OMAP_HSMMC_WRITE(host->base, HCTL,
index 2e16e0a90a5e1a5d8d3d7487727baa39d701b8fb..976330de379ecc78cbe91f19c4bd9495d4722dae 100644 (file)
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
        host->pio_active        = XFER_NONE;
 
 #ifdef CONFIG_MMC_S3C_PIODMA
-       host->dodma             = host->pdata->dma;
+       host->dodma             = host->pdata->use_dma;
 #endif
 
        host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index ee7d0a5a51c496cb92b04e7b9bc8172f8a233875..69d98e3bf6abaa3c784d1d70f171387b3142eb64 100644 (file)
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_data *data = host->data;
+       void *sg_virt;
        unsigned short *buf;
        unsigned int count;
        unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
                return;
        }
 
-       buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
-             host->sg_off);
+       sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+       buf = (unsigned short *)(sg_virt + host->sg_off);
 
        count = host->sg_ptr->length - host->sg_off;
        if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
        host->sg_off += count;
 
-       tmio_mmc_kunmap_atomic(host, &flags);
+       tmio_mmc_kunmap_atomic(sg_virt, &flags);
 
        if (host->sg_off == host->sg_ptr->length)
                tmio_mmc_next_sg(host);
index 64f7d5dfc106ac7b39e37842c5eca9a898dbbb06..0fedc78e3ea5c4613767d7d31e534143b4bf1780 100644 (file)
 
 #define ack_mmc_irqs(host, i) \
        do { \
-               u32 mask;\
-               mask  = sd_ctrl_read32((host), CTL_STATUS); \
-               mask &= ~((i) & TMIO_MASK_IRQ); \
-               sd_ctrl_write32((host), CTL_STATUS, mask); \
+               sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
        } while (0)
 
 
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
        return --host->sg_len;
 }
 
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
        unsigned long *flags)
 {
-       struct scatterlist *sg = host->sg_ptr;
-
        local_irq_save(*flags);
        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 }
 
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
        unsigned long *flags)
 {
-       kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
        local_irq_restore(*flags);
 }
 
index a045559c81cf09e37bce9dbef9c56f1325523269..85671adae455dc59bd29405aa2b196e5b0ebd9c2 100644 (file)
@@ -1994,10 +1994,9 @@ vortex_error(struct net_device *dev, int status)
                }
        }
 
-       if (status & RxEarly) {                         /* Rx early is unused. */
-               vortex_rx(dev);
+       if (status & RxEarly)                           /* Rx early is unused. */
                iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
-       }
+
        if (status & StatsFull) {                       /* Empty statistics. */
                static int DoneDidThat;
                if (vortex_debug > 4)
@@ -2298,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id)
                if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
                        if (status == 0xffff)
                                break;
+                       if (status & RxEarly)
+                               vortex_rx(dev);
+                       spin_unlock(&vp->window_lock);
                        vortex_error(dev, status);
+                       spin_lock(&vp->window_lock);
+                       window_set(vp, 7);
                }
 
                if (--work_done < 0) {
index 37617abc164769aa5f8be38a797ea4337a3585e0..1e620e287ae0cc9fbe9894a105fedbfdc9fbb6be 100644 (file)
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
                b44_tx(bp);
                /* spin_unlock(&bp->tx_lock); */
        }
+       if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
+               bp->istat &= ~ISTAT_RFO;
+               b44_disable_ints(bp);
+               ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+               b44_init_rings(bp);
+               b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+               netif_wake_queue(bp->dev);
+       }
+
        spin_unlock_irqrestore(&bp->lock, flags);
 
        work_done = 0;
index 99197bd54da558ef26cf8a62a54af19aaa02e0f8..53306bf3f401bee193fc89f5c6c4d1b35759ecb0 100644 (file)
@@ -181,6 +181,7 @@ struct be_drvr_stats {
        u64 be_rx_bytes_prev;
        u64 be_rx_pkts;
        u32 be_rx_rate;
+       u32 be_rx_mcast_pkt;
        /* number of non ether type II frames dropped where
         * frame len > length field of Mac Hdr */
        u32 be_802_3_dropped_frames;
index 3d305494a6066fb987510abebe34a1332f80c114..34abcc9403d6b76428416412904b4c06ff8d593b 100644 (file)
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
        while ((compl = be_mcc_compl_get(adapter))) {
                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
                        /* Interpret flags as an async trailer */
-                       BUG_ON(!is_link_state_evt(compl->flags));
-
-                       /* Interpret compl as a async link evt */
-                       be_async_link_state_process(adapter,
+                       if (is_link_state_evt(compl->flags))
+                               be_async_link_state_process(adapter,
                                (struct be_async_event_link_state *) compl);
                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
                                *status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 
                if (msecs > 4000) {
                        dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
-                       be_dump_ue(adapter);
+                       be_detect_dump_ue(adapter);
                        return -1;
                }
 
index bdc10a28cfda9feb11ffdecee141a6e333a9e92d..ad1e6fac60c58869e074609cee3e363672bfecd9 100644 (file)
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
 extern int be_cmd_get_phy_info(struct be_adapter *adapter,
                struct be_dma_mem *cmd);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
 
index cd16243c7c364a858d849ac3f70ac10bbbfcb966..13f0abbc520550b0b22ef48d0ed2a4da56d69187 100644 (file)
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
        {DRVSTAT_INFO(be_rx_events)},
        {DRVSTAT_INFO(be_tx_compl)},
        {DRVSTAT_INFO(be_rx_compl)},
+       {DRVSTAT_INFO(be_rx_mcast_pkt)},
        {DRVSTAT_INFO(be_ethrx_post_fail)},
        {DRVSTAT_INFO(be_802_3_dropped_frames)},
        {DRVSTAT_INFO(be_802_3_malformed_frames)},
index 5d38046402b235d255b529bb96c3cd07806fe3a9..a2ec5df0d73340bf82e45ab3d50f25402fcaa9b6 100644 (file)
 #define FLASH_FCoE_BIOS_START_g3           (13631488)
 #define FLASH_REDBOOT_START_g3             (262144)
 
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET              0
+#define BE_MULTICAST_PACKET            1
+#define BE_BROADCAST_PACKET            2
+#define BE_RSVD_PACKET                 3
 
 /*
  * BE descriptors: host memory data structures whose formats
index 74e146f470c60e9df5ff01806623a0aaaaa0ec82..6eda7a02225623943a35293cada545b8d20d752b 100644 (file)
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
        dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
        dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
        dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+       dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
 
        /* bad pkts received */
        dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
        /* no space available in linux */
        dev_stats->tx_dropped = 0;
 
-       dev_stats->multicast = port_stats->rx_multicast_frames;
        dev_stats->collisions = 0;
 
        /* detailed tx_errors */
@@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
 }
 
 static void be_rx_stats_update(struct be_adapter *adapter,
-               u32 pktsize, u16 numfrags)
+               u32 pktsize, u16 numfrags, u8 pkt_type)
 {
        struct be_drvr_stats *stats = drvr_stats(adapter);
 
@@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
        stats->be_rx_frags += numfrags;
        stats->be_rx_bytes += pktsize;
        stats->be_rx_pkts++;
+
+       if (pkt_type == BE_MULTICAST_PACKET)
+               stats->be_rx_mcast_pkt++;
 }
 
 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
        u16 rxq_idx, i, j;
        u32 pktsize, hdr_len, curr_frag_len, size;
        u8 *start;
+       u8 pkt_type;
 
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
 
        page_info = get_rx_page_info(adapter, rxq_idx);
 
@@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
        BUG_ON(j > MAX_SKB_FRAGS);
 
 done:
-       be_rx_stats_update(adapter, pktsize, num_rcvd);
+       be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
 }
 
 /* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
        u16 i, rxq_idx = 0, vid, j;
        u8 vtm;
+       u8 pkt_type;
 
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
        /* Is it a flush compl that has no data */
@@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
 
        /* vlanf could be wrongly set in some cards.
         * ignore if vtm is not set */
@@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
                vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
        }
 
-       be_rx_stats_update(adapter, pkt_size, num_rcvd);
+       be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
 }
 
 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        return 1;
 }
 
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
-       u32 online0 = 0, online1 = 0;
-
-       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
-       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
-       if (!online0 || !online1) {
-               adapter->ue_detected = true;
-               dev_err(&adapter->pdev->dev,
-                       "UE Detected!! online0=%d online1=%d\n",
-                       online0, online1);
-               return true;
-       }
-
-       return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
 {
        u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
        u32 i;
@@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter)
        ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
        ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
 
+       if (ue_status_lo || ue_status_hi) {
+               adapter->ue_detected = true;
+               dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+       }
+
        if (ue_status_lo) {
                for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
                        if (ue_status_lo & 1)
@@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work)
                adapter->rx_post_starved = false;
                be_post_rx_frags(adapter);
        }
-       if (!adapter->ue_detected) {
-               if (be_detect_ue(adapter))
-                       be_dump_ue(adapter);
-       }
+       if (!adapter->ue_detected)
+               be_detect_dump_ue(adapter);
 
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
index 2cc4cfc31892cd85458dec20b6c46401fdb90d9b..3b16f62d5606c741e97fb0a7c7b0b211c903157d 100644 (file)
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
         *       so it can wait
         */
        bond_for_each_slave(bond, slave, i) {
+               unsigned long trans_start = dev_trans_start(slave->dev);
+
                if (slave->link != BOND_LINK_UP) {
-                       if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
-                           time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+                       if (time_in_range(jiffies,
+                               trans_start - delta_in_ticks,
+                               trans_start + delta_in_ticks) &&
+                           time_in_range(jiffies,
+                               slave->dev->last_rx - delta_in_ticks,
+                               slave->dev->last_rx + delta_in_ticks)) {
 
                                slave->link  = BOND_LINK_UP;
                                slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                         * when the source ip is 0, so don't take the link down
                         * if we don't know our ip yet
                         */
-                       if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
-                           (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+                       if (!time_in_range(jiffies,
+                               trans_start - delta_in_ticks,
+                               trans_start + 2 * delta_in_ticks) ||
+                           !time_in_range(jiffies,
+                               slave->dev->last_rx - delta_in_ticks,
+                               slave->dev->last_rx + 2 * delta_in_ticks)) {
 
                                slave->link  = BOND_LINK_DOWN;
                                slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 {
        struct slave *slave;
        int i, commit = 0;
+       unsigned long trans_start;
 
        bond_for_each_slave(bond, slave, i) {
                slave->new_link = BOND_LINK_NOCHANGE;
 
                if (slave->link != BOND_LINK_UP) {
-                       if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
-                                          delta_in_ticks)) {
+                       if (time_in_range(jiffies,
+                               slave_last_rx(bond, slave) - delta_in_ticks,
+                               slave_last_rx(bond, slave) + delta_in_ticks)) {
+
                                slave->new_link = BOND_LINK_UP;
                                commit++;
                        }
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 * active.  This avoids bouncing, as the last receive
                 * times need a full ARP monitor cycle to be updated.
                 */
-               if (!time_after_eq(jiffies, slave->jiffies +
-                                  2 * delta_in_ticks))
+               if (time_in_range(jiffies,
+                                 slave->jiffies - delta_in_ticks,
+                                 slave->jiffies + 2 * delta_in_ticks))
                        continue;
 
                /*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 */
                if (slave->state == BOND_STATE_BACKUP &&
                    !bond->current_arp_slave &&
-                   time_after(jiffies, slave_last_rx(bond, slave) +
-                              3 * delta_in_ticks)) {
+                   !time_in_range(jiffies,
+                       slave_last_rx(bond, slave) - delta_in_ticks,
+                       slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 * - (more than 2*delta since receive AND
                 *    the bond has an IP address)
                 */
+               trans_start = dev_trans_start(slave->dev);
                if ((slave->state == BOND_STATE_ACTIVE) &&
-                   (time_after_eq(jiffies, dev_trans_start(slave->dev) +
-                                   2 * delta_in_ticks) ||
-                     (time_after_eq(jiffies, slave_last_rx(bond, slave)
-                                    + 2 * delta_in_ticks)))) {
+                   (!time_in_range(jiffies,
+                       trans_start - delta_in_ticks,
+                       trans_start + 2 * delta_in_ticks) ||
+                    !time_in_range(jiffies,
+                       slave_last_rx(bond, slave) - delta_in_ticks,
+                       slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
 {
        struct slave *slave;
        int i;
+       unsigned long trans_start;
 
        bond_for_each_slave(bond, slave, i) {
                switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
                        continue;
 
                case BOND_LINK_UP:
+                       trans_start = dev_trans_start(slave->dev);
                        if ((!bond->curr_active_slave &&
-                            time_before_eq(jiffies,
-                                           dev_trans_start(slave->dev) +
-                                           delta_in_ticks)) ||
+                            time_in_range(jiffies,
+                                          trans_start - delta_in_ticks,
+                                          trans_start + delta_in_ticks)) ||
                            bond->curr_active_slave != slave) {
                                slave->link = BOND_LINK_UP;
                                bond->current_arp_slave = NULL;
index b4fb07a6f13ffd489c957816eebb4f831157ccd1..51919fcd50c26e2c0c6b8c23ba393887eca50254 100644 (file)
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                ks8851_wrreg16(ks, KS_RXQCR,
                               ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
 
-               if (rxlen > 0) {
-                       skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
-                       if (!skb) {
-                               /* todo - dump frame and move on */
-                       }
+               if (rxlen > 4) {
+                       unsigned int rxalign;
+
+                       rxlen -= 4;
+                       rxalign = ALIGN(rxlen, 4);
+                       skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+                       if (skb) {
 
-                       /* two bytes to ensure ip is aligned, and four bytes
-                        * for the status header and 4 bytes of garbage */
-                       skb_reserve(skb, 2 + 4 + 4);
+                               /* 4 bytes of status header + 4 bytes of
+                                * garbage: we put them before ethernet
+                                * header, so that they are copied,
+                                * but ignored.
+                                */
 
-                       rxpkt = skb_put(skb, rxlen - 4) - 8;
+                               rxpkt = skb_put(skb, rxlen) - 8;
 
-                       /* align the packet length to 4 bytes, and add 4 bytes
-                        * as we're getting the rx status header as well */
-                       ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+                               ks8851_rdfifo(ks, rxpkt, rxalign + 8);
 
-                       if (netif_msg_pktdata(ks))
-                               ks8851_dbg_dumpkkt(ks, rxpkt);
+                               if (netif_msg_pktdata(ks))
+                                       ks8851_dbg_dumpkkt(ks, rxpkt);
 
-                       skb->protocol = eth_type_trans(skb, ks->netdev);
-                       netif_rx(skb);
+                               skb->protocol = eth_type_trans(skb, ks->netdev);
+                               netif_rx(skb);
 
-                       ks->netdev->stats.rx_packets++;
-                       ks->netdev->stats.rx_bytes += rxlen - 4;
+                               ks->netdev->stats.rx_packets++;
+                               ks->netdev->stats.rx_bytes += rxlen;
+                       }
                }
 
                ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
index bc695d53cdccbcca3eb1c2ccb30018acb3c8bde6..fe6983af6918fe37cdb5589907346557ddeeb8df 100644 (file)
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
        struct niu_parent *parent = np->parent;
        struct niu_tcam_entry *tp;
        int i, idx, cnt;
-       u16 n_entries;
        unsigned long flags;
-
+       int ret = 0;
 
        /* put the tcam size here */
        nfc->data = tcam_get_size(np);
 
        niu_lock_parent(np, flags);
-       n_entries = nfc->rule_cnt;
        for (cnt = 0, i = 0; i < nfc->data; i++) {
                idx = tcam_get_index(np, i);
                tp = &parent->tcam[idx];
                if (!tp->valid)
                        continue;
+               if (cnt == nfc->rule_cnt) {
+                       ret = -EMSGSIZE;
+                       break;
+               }
                rule_locs[cnt] = i;
                cnt++;
        }
        niu_unlock_parent(np, flags);
 
-       if (n_entries != cnt) {
-               /* print warning, this should not happen */
-               netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
-                           np->parent->index, __func__, n_entries, cnt);
-       }
-
-       return 0;
+       return ret;
 }
 
 static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
index bbb7951b9c4c34bb3992deed9c7a8e84ff52f68e..ea0461eb2dbe4314c223ab2ad47f17ea9b90e740 100644 (file)
@@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev)
        if (!netif_running(dev))
                return 0;
 
-       spin_lock(&priv->lock);
-
        if (priv->shutdown) {
                /* Re-open the interface and re-init the MAC/DMA
-                  and the rings. */
+                  and the rings (i.e. on hibernation stage) */
                stmmac_open(dev);
-               goto out_resume;
+               return 0;
        }
 
+       spin_lock(&priv->lock);
+
        /* Power Down bit, into the PM register, is cleared
         * automatically as soon as a magic packet or a Wake-up frame
         * is received. Anyway, it's better to manually clear
@@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev)
 
        netif_start_queue(dev);
 
-out_resume:
        spin_unlock(&priv->lock);
        return 0;
 }
index 8ed30fa35d0a5d789121eb3042c4b87a5e53a7f2..b2bcf99e6f087ab1dfca2aef092233eea9aa5fe6 100644 (file)
@@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
        .ndo_get_stats = &ipheth_stats,
 };
 
-static struct device_type ipheth_type = {
-       .name   = "wwan",
-};
-
 static int ipheth_probe(struct usb_interface *intf,
                        const struct usb_device_id *id)
 {
@@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
 
        netdev->netdev_ops = &ipheth_netdev_ops;
        netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
-       strcpy(netdev->name, "wwan%d");
+       strcpy(netdev->name, "eth%d");
 
        dev = netdev_priv(netdev);
        dev->udev = udev;
@@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
 
        SET_NETDEV_DEV(netdev, &intf->dev);
        SET_ETHTOOL_OPS(netdev, &ops);
-       SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
 
        retval = register_netdev(netdev);
        if (retval) {
index fd69095ef6e33d61698556abac79a4e84429c8fe..f53412368ce1e1745a7796a8cc8181b16027e797 100644 (file)
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
 
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
-               NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+               NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
 
        ret = register_netdev(dev);
        if (ret < 0)
index a9352b2c7ac430d4e4aafac3d65a1b46005ea505..b7e755f4178ad885332ccaaeeb5eda492e6dfcfd 100644 (file)
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
        .notifier_call = module_load_notify,
 };
 
-
-static void end_sync(void)
-{
-       end_cpu_work();
-       /* make sure we don't leak task structs */
-       process_task_mortuary();
-       process_task_mortuary();
-}
-
-
 int sync_start(void)
 {
        int err;
@@ -158,7 +148,7 @@ int sync_start(void)
        if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
 
-       start_cpu_work();
+       mutex_lock(&buffer_mutex);
 
        err = task_handoff_register(&task_free_nb);
        if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
        if (err)
                goto out4;
 
+       start_cpu_work();
+
 out:
+       mutex_unlock(&buffer_mutex);
        return err;
 out4:
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
 out2:
        task_handoff_unregister(&task_free_nb);
 out1:
-       end_sync();
        free_cpumask_var(marked_cpus);
        goto out;
 }
@@ -190,11 +182,20 @@ out1:
 
 void sync_stop(void)
 {
+       /* flush buffers */
+       mutex_lock(&buffer_mutex);
+       end_cpu_work();
        unregister_module_notifier(&module_load_nb);
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
        profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
        task_handoff_unregister(&task_free_nb);
-       end_sync();
+       mutex_unlock(&buffer_mutex);
+       flush_scheduled_work();
+
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+
        free_cpumask_var(marked_cpus);
 }
 
index 219f79e2210a3fcd561b94456c4960a0a1fbacd9..f179ac2ea80149423034d66a90b1e81251c5069b 100644 (file)
@@ -120,8 +120,6 @@ void end_cpu_work(void)
 
                cancel_delayed_work(&b->work);
        }
-
-       flush_scheduled_work();
 }
 
 /*
index 72b2bcc2c22413b1a63e465e355ea65084ec7b8e..d4fb82d85e9b36ab61e1626236cb98bf76364ea2 100644 (file)
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                enable_irq_wake(IRQ_RTC);
                bfin_rtc_sync_pending(&pdev->dev);
        } else
-               bfin_rtc_int_clear(-1);
+               bfin_rtc_int_clear(0);
 
        return 0;
 }
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
 {
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(IRQ_RTC);
-       else
-               bfin_write_RTC_ISTAT(-1);
+
+       /*
+        * Since only some of the RTC bits are maintained externally in the
+        * Vbat domain, we need to wait for the RTC MMRs to be synced into
+        * the core after waking up.  This happens every RTC 1HZ.  Once that
+        * has happened, we can go ahead and re-enable the important write
+        * complete interrupt event.
+        */
+       while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+               continue;
+       bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
 
        return 0;
 }
index 66377f3e28b851eaa908c6057a9646a639e9c229..d60557cae8ef4fdadadb10b343125f174cb7508d 100644 (file)
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        t->time.tm_isdst = -1;
        t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
        t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
-       return rtc_valid_tm(t);
+       return 0;
 }
 
 static struct rtc_class_ops m41t80_rtc_ops = {
index 6c418fe7f288ae2deaa9f44080a749a9eaafad88..b7a6690e5b35e8744295bf212a8e0d75e0d8dd6f 100644 (file)
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        if (request_irq(adev->irq[0], pl031_interrupt,
-                       IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+                       IRQF_DISABLED, "rtc-pl031", ldata)) {
                ret = -EIO;
                goto out_no_irq;
        }
index b7de02525ec901ebbee390b2798043eb1938f4bd..85cf607fc78f62d243ae41b20f2df018710be2e0 100644 (file)
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
        if (!blkdat->request_queue)
                return -ENOMEM;
 
-       elevator_exit(blkdat->request_queue->elevator);
-       rc = elevator_init(blkdat->request_queue, "noop");
+       rc = elevator_change(blkdat->request_queue, "noop");
        if (rc)
                goto cleanup_queue;
 
index 7d4d2275573c138d9e1c5223e41928defd4649aa..7f11f3e48e120ec82f1d7e26a0f1055fca771f97 100644 (file)
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
                           enum iscsi_host_param param, char *buf)
 {
        struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
-       int len = 0;
-       int status;
+       int status = 0;
 
        SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
        switch (param) {
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
        default:
                return iscsi_host_get_param(shost, param, buf);
        }
-       return len;
+       return status;
 }
 
 int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
index 26350e470bccf71ea47b8dd99f387a31aff21670..877324fc594c28b606db82a12ab9b3559615ecbd 100644 (file)
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        memset(req, 0, sizeof(*req));
        wrb->tag0 |= tag;
 
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
                           sizeof(*req));
index cd05e049d5f6ad7164c6485ee336db00933c63b0..d0c82340f0e25198d0c56101c76d0d1a00b17254 100644 (file)
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
 {
        struct scsi_sense_hdr sshdr;
 
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
                                 &sshdr);
        scsi_show_sense_hdr(&sshdr);
        scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
                                 &sshdr);
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
 }
 EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
 
 void scsi_print_result(struct scsi_cmnd *cmd)
 {
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_show_result(cmd->result);
 }
 EXPORT_SYMBOL(scsi_print_result);
index 4f5551b5fe53d29a1a0473d3d5d799dabb46e0d5..c5d0606ad0974edab8c0326f4fadff905413c171 100644 (file)
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
        misc_fw_support = readl(&cfgtable->misc_fw_support);
        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
+       /* The doorbell reset seems to cause lockups on some Smart
+        * Arrays (e.g. P410, P410i, maybe others).  Until this is
+        * fixed or at least isolated, avoid the doorbell reset.
+        */
+       use_doorbell = 0;
+
        rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
index fda4de3440c4640f6754a1a66d143aeb5da8936c..e88bbdde49c5066b4bfb9fa2d8b5822115777cd8 100644 (file)
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
-       WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
+       WARN_ON(bio->bi_rw & REQ_WRITE);
        or->in.bio = bio;
        or->in.total_bytes = len;
 }
index 420238cc794eb7dd69dbe132bdf6be29752ed644..114bc5a81171993ac91c27ba600cee72adb9aed9 100644 (file)
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
 
        qla24xx_disable_vp(vha);
 
+       vha->flags.delete_progress = 1;
+
        fc_remove_host(vha->host);
 
        scsi_remove_host(vha->host);
 
-       qla2x00_free_fcports(vha);
+       if (vha->timer_active) {
+               qla2x00_vp_stop_timer(vha);
+               DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
+               " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+       }
 
        qla24xx_deallocate_vp_id(vha);
 
+       /* No pending activities shall be there on the vha now */
+       DEBUG(msleep(random32()%10));  /* Just to see if something falls on
+                                       * the net we have placed below */
+
+       BUG_ON(atomic_read(&vha->vref_count));
+
+       qla2x00_free_fcports(vha);
+
        mutex_lock(&ha->vport_lock);
        ha->cur_vport_count--;
        clear_bit(vha->vp_idx, ha->vp_idx_map);
        mutex_unlock(&ha->vport_lock);
 
-       if (vha->timer_active) {
-               qla2x00_vp_stop_timer(vha);
-               DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
-                   "has stopped\n",
-                   vha->host_no, vha->vp_idx, vha));
-        }
-
        if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
                if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
                        qla_printk(KERN_WARNING, ha,
index 6cfc28a25eb3c41cca4f24b91ba575fd8d12c48d..b74e6b5743dc2931cfefa730beb351f1fa60ea57 100644 (file)
@@ -29,8 +29,6 @@
 /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
 /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
 
-/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
-
 /*
 * Macros use for debugging the driver.
 */
index 3a432ea0c7a3548844dd4013fd06798396135a70..d2a4e1530708add3659525948aec20ce13629f91 100644 (file)
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
 #define MBX_UPDATE_FLASH_ACTIVE        3
 
        struct mutex vport_lock;        /* Virtual port synchronization */
+       spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
        struct completion mbx_cmd_comp; /* Serialize mbx access */
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
                uint32_t        management_server_logged_in :1;
                uint32_t        process_response_queue  :1;
                uint32_t        difdix_supported:1;
+               uint32_t        delete_progress:1;
        } flags;
 
        atomic_t        loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
        struct req_que *req;
        int             fw_heartbeat_counter;
        int             seconds_since_last_heartbeat;
+
+       atomic_t        vref_count;
 } scsi_qla_host_t;
 
 /*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
         test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
         atomic_read(&ha->loop_state) == LOOP_DOWN)
 
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do {               \
+       atomic_inc(&__vha->vref_count);                      \
+       mb();                                                \
+       if (__vha->flags.delete_progress) {                  \
+               atomic_dec(&__vha->vref_count);              \
+               __bail = 1;                                  \
+       } else {                                             \
+               __bail = 0;                                  \
+       }                                                    \
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do {                   \
+       atomic_dec(&__vha->vref_count);                      \
+} while (0)
+
+
 #define qla_printk(level, ha, format, arg...) \
        dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
 
index d863ed2619b56853f7d29250dfd17b876fb0ee5c..9c383baebe279d0c27bb3027dc1bd337ab52603e 100644 (file)
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
 {
        struct srb_ctx *ctx = sp->ctx;
        struct srb_iocb *iocb = ctx->u.iocb_cmd;
+       struct scsi_qla_host *vha = sp->fcport->vha;
 
        del_timer_sync(&iocb->timer);
        kfree(iocb);
        kfree(ctx);
        mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+
+       QLA_VHA_MARK_NOT_BUSY(vha);
 }
 
 inline srb_t *
 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
     unsigned long tmo)
 {
-       srb_t *sp;
+       srb_t *sp = NULL;
        struct qla_hw_data *ha = vha->hw;
        struct srb_ctx *ctx;
        struct srb_iocb *iocb;
+       uint8_t bail;
+
+       QLA_VHA_MARK_BUSY(vha, bail);
+       if (bail)
+               return NULL;
 
        sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
        if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
        iocb->timer.function = qla2x00_ctx_sp_timeout;
        add_timer(&iocb->timer);
 done:
+       if (!sp)
+               QLA_VHA_MARK_NOT_BUSY(vha);
        return sp;
 }
 
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                qla2x00_init_response_q_entries(rsp);
        }
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Clear RSCN queue. */
        list_for_each_entry(vp, &ha->vp_list, list) {
                vp->rscn_in_ptr = 0;
                vp->rscn_out_ptr = 0;
        }
+
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        ha->isp_ops->config_rings(vha);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                /* Bypass virtual ports of the same host. */
                found = 0;
                if (ha->num_vhosts) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
                        list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                                if (new_fcport->d_id.b24 == vp->d_id.b24) {
                                        found = 1;
                                        break;
                                }
                        }
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        if (found)
                                continue;
                }
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp;
        struct scsi_qla_host *tvp;
+       unsigned long flags = 0;
 
        rval = QLA_SUCCESS;
 
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
                /* Check for loop ID being already in use. */
                found = 0;
                fcport = NULL;
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
                list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                        list_for_each_entry(fcport, &vp->vp_fcports, list) {
                                if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
                        if (found)
                                break;
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
 
                /* If not in use then it is free to use. */
                if (!found) {
@@ -3791,14 +3814,27 @@ void
 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
 {
        fc_port_t *fcport;
-       struct scsi_qla_host *tvp, *vha;
+       struct scsi_qla_host *vha;
+       struct qla_hw_data *ha = base_vha->hw;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Go with deferred removal of rport references. */
-       list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
-               list_for_each_entry(fcport, &vha->vp_fcports, list)
+       list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+               atomic_inc(&vha->vref_count);
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (fcport && fcport->drport &&
-                           atomic_read(&fcport->state) != FCS_UNCONFIGURED)
+                           atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_rport_del(fcport);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                       }
+               }
+               atomic_dec(&vha->vref_count);
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
-       struct scsi_qla_host *tvp;
+       unsigned long flags;
 
        vha->flags.online = 0;
        ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
                qla2x00_mark_all_devices_lost(vha, 0);
-               list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
+                       atomic_inc(&vp->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        qla2x00_mark_all_devices_lost(vp, 0);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vp->vref_count);
+               }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
        } else {
                if (!atomic_read(&vha->loop_down_timer))
                        atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
        uint8_t        status = 0;
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp;
-       struct scsi_qla_host *tvp;
        struct req_que *req = ha->req_q_map[0];
+       unsigned long flags;
 
        if (vha->flags.online) {
                qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                DEBUG(printk(KERN_INFO
                                "qla2x00_abort_isp(%ld): succeeded.\n",
                                vha->host_no));
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                       if (vp->vp_idx)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp->vp_idx) {
+                               atomic_inc(&vp->vref_count);
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_vp_abort_isp(vp);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                               atomic_dec(&vp->vref_count);
+                       }
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        } else {
                qla_printk(KERN_INFO, ha,
                        "qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
        struct scsi_qla_host *vp;
-       struct scsi_qla_host *tvp;
+       unsigned long flags;
 
        status = qla2x00_init_rings(vha);
        if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                DEBUG(printk(KERN_INFO
                        "qla82xx_restart_isp(%ld): succeeded.\n",
                        vha->host_no));
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                       if (vp->vp_idx)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp->vp_idx) {
+                               atomic_inc(&vp->vref_count);
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_vp_abort_isp(vp);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                               atomic_dec(&vp->vref_count);
+                       }
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        } else {
                qla_printk(KERN_INFO, ha,
                        "qla82xx_restart_isp: **** FAILED ****\n");
index 6982ba70e12af12235d8d6502c78f81c06e732aa..28f65be19dad9878f19981a18ebc60ef13bed356 100644 (file)
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                cp->result = DID_ERROR << 16;
                                break;
                        }
-               } else if (!lscsi_status) {
+               } else {
                        DEBUG2(qla_printk(KERN_INFO, ha,
                            "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
                            "of 0x%x bytes).\n", vha->host_no, cp->device->id,
                            cp->device->lun, resid, scsi_bufflen(cp)));
 
-                       cp->result = DID_ERROR << 16;
-                       break;
+                       cp->result = DID_ERROR << 16 | lscsi_status;
+                       goto check_scsi_status;
                }
 
                cp->result = DID_OK << 16 | lscsi_status;
                logit = 0;
 
+check_scsi_status:
                /*
                 * Check to see if SCSI Status is non zero. If so report SCSI
                 * Status.
index 6009b0c69488144bf21715a2a8b03192fbac56ef..a595ec8264f8d7f73823a11d0616591a7957bd63 100644 (file)
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
-       scsi_qla_host_t *tvp;
+       unsigned long   flags;
 
        if (rptid_entry->entry_status != 0)
                return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        return;
                }
 
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list)
                        if (vp_idx == vp->vp_idx)
                                break;
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                if (!vp)
                        return;
 
index 987c5b0ca78ea22d67d074ee7d170ef1f14b52a4..2b69392a71a1fac76b50ab389a0c3e1054dce59f 100644 (file)
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
 {
        uint32_t vp_id;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
 
        /* Find an empty slot and assign an vp_id */
        mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
        set_bit(vp_id, ha->vp_idx_map);
        ha->num_vhosts++;
        vha->vp_idx = vp_id;
+
+       spin_lock_irqsave(&ha->vport_slock, flags);
        list_add_tail(&vha->list, &ha->vp_list);
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        mutex_unlock(&ha->vport_lock);
        return vp_id;
 }
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
 {
        uint16_t vp_id;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
 
        mutex_lock(&ha->vport_lock);
+       /*
+        * Wait for all pending activities to finish before removing vport from
+        * the list.
+        * Lock needs to be held for safe removal from the list (it
+        * ensures no active vp_list traversal while the vport is removed
+        * from the queue)
+        */
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       while (atomic_read(&vha->vref_count)) {
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+               msleep(500);
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+       }
+       list_del(&vha->list);
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        vp_id = vha->vp_idx;
        ha->num_vhosts--;
        clear_bit(vp_id, ha->vp_idx_map);
-       list_del(&vha->list);
+
        mutex_unlock(&ha->vport_lock);
 }
 
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 {
        scsi_qla_host_t *vha;
        struct scsi_qla_host *tvha;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Locate matching device in database. */
        list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
-               if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+               if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
                        return vha;
+               }
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
        return NULL;
 }
 
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 static void
 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
 {
+       /*
+        * !!! NOTE !!!
+        * This function, if called in contexts other than vp create, disable
+        * or delete, please make sure this is synchronized with the
+        * delete thread.
+        */
        fc_port_t *fcport;
 
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
                    "loop_id=0x%04x :%x\n",
                    vha->host_no, fcport->loop_id, fcport->vp_idx));
 
-               atomic_set(&fcport->state, FCS_DEVICE_DEAD);
                qla2x00_mark_device_lost(vha, fcport, 0, 0);
                atomic_set(&fcport->state, FCS_UNCONFIGURED);
        }
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
 void
 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 {
-       scsi_qla_host_t *vha, *tvha;
+       scsi_qla_host_t *vha;
        struct qla_hw_data *ha = rsp->hw;
        int i = 0;
+       unsigned long flags;
 
-       list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vha, &ha->vp_list, list) {
                if (vha->vp_idx) {
+                       atomic_inc(&vha->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        switch (mb[0]) {
                        case MBA_LIP_OCCURRED:
                        case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
                                qla2x00_async_event(vha, rsp, mb);
                                break;
                        }
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vha->vref_count);
                }
                i++;
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
        int ret;
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
-       struct scsi_qla_host *tvp;
+       unsigned long flags = 0;
 
        if (vha->vp_idx)
                return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
        if (!(ha->current_topology & ISP_CFG_F))
                return;
 
-       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-               if (vp->vp_idx)
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               if (vp->vp_idx) {
+                       atomic_inc(&vp->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        ret = qla2x00_do_dpc_vp(vp);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vp->vref_count);
+               }
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 int
index 915b77a6e19390026134ff697cceaa89e9dd8c99..0a71cc71eab23922e9c6aee451962f60a1e3cfb3 100644 (file)
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
 sufficient_dsds:
                req_cnt = 1;
 
+               if (req->cnt < (req_cnt + 2)) {
+                       cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+                               &reg->req_q_out[0]);
+                       if (req->ring_index < cnt)
+                               req->cnt = cnt - req->ring_index;
+                       else
+                               req->cnt = req->length -
+                                       (req->ring_index - cnt);
+               }
+
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+
                ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
                if (!sp->ctx) {
                        DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                        }
                        qla2xxx_wake_dpc(vha);
+                       ha->flags.fw_hung = 1;
                        if (ha->flags.mbox_busy) {
-                               ha->flags.fw_hung = 1;
                                ha->flags.mbox_int = 1;
                                DEBUG2(qla_printk(KERN_ERR, ha,
-                                   "Due to fw hung, doing premature "
-                                   "completion of mbx command\n"));
-                               complete(&ha->mbx_intr_comp);
+                                       "Due to fw hung, doing premature "
+                                       "completion of mbx command\n"));
+                               if (test_bit(MBX_INTR_WAIT,
+                                       &ha->mbx_cmd_flags))
+                                       complete(&ha->mbx_intr_comp);
                        }
                }
-       }
+       } else
+               vha->seconds_since_last_heartbeat = 0;
        vha->fw_heartbeat_counter = fw_heartbeat_counter;
 }
 
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                "%s(): Adapter reset needed!\n", __func__);
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                        qla2xxx_wake_dpc(vha);
+                       ha->flags.fw_hung = 1;
                        if (ha->flags.mbox_busy) {
-                               ha->flags.fw_hung = 1;
                                ha->flags.mbox_int = 1;
                                DEBUG2(qla_printk(KERN_ERR, ha,
-                                   "Need reset, doing premature "
-                                   "completion of mbx command\n"));
-                               complete(&ha->mbx_intr_comp);
+                                       "Need reset, doing premature "
+                                       "completion of mbx command\n"));
+                               if (test_bit(MBX_INTR_WAIT,
+                                       &ha->mbx_cmd_flags))
+                                       complete(&ha->mbx_intr_comp);
                        }
                } else {
                        qla82xx_check_fw_alive(vha);
index 8c80b49ac1c44d875f7c033fbb1c8962723307cb..1e4bff695254b4fb6a4adbc1008ba4a999352088 100644 (file)
--- a/