Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Sep 2009 16:16:22 +0000 (09:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Sep 2009 16:16:22 +0000 (09:16 -0700)
* 'kmemleak' of git://linux-arm.org/linux-2.6:
  kmemleak: Improve the "Early log buffer exceeded" error message
  kmemleak: fix sparse warning for static declarations
  kmemleak: fix sparse warning over overshadowed flags
  kmemleak: move common painting code together
  kmemleak: add clear command support
  kmemleak: use bool for true/false questions
  kmemleak: Do no create the clean-up thread during kmemleak_disable()
  kmemleak: Scan all thread stacks
  kmemleak: Don't scan uninitialized memory when kmemcheck is enabled
  kmemleak: Ignore the aperture memory hole on x86_64
  kmemleak: Printing of the objects hex dump
  kmemleak: Do not report alloc_bootmem blocks as leaks
  kmemleak: Save the stack trace for early allocations
  kmemleak: Mark the early log buffer as __initdata
  kmemleak: Dump object information on request
  kmemleak: Allow rescheduling during an object scanning

308 files changed:
Documentation/filesystems/9p.txt
Documentation/keys.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/thread_info.h
arch/alpha/kernel/signal.c
arch/arm/include/asm/thread_info.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/signal.c
arch/avr32/include/asm/thread_info.h
arch/avr32/kernel/entry-avr32b.S
arch/avr32/kernel/signal.c
arch/cris/kernel/ptrace.c
arch/frv/kernel/signal.c
arch/h8300/include/asm/thread_info.h
arch/h8300/kernel/signal.c
arch/ia64/kernel/dma-mapping.c
arch/ia64/kernel/process.c
arch/ia64/lib/ip_fast_csum.S
arch/m32r/include/asm/thread_info.h
arch/m32r/kernel/signal.c
arch/mips/include/asm/thread_info.h
arch/mips/kernel/signal.c
arch/mn10300/kernel/signal.c
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/signal.c
arch/parisc/kernel/traps.c
arch/powerpc/kernel/power7-pmu.c
arch/powerpc/sysdev/xilinx_intc.c
arch/s390/kernel/signal.c
arch/sh/kernel/signal_32.c
arch/sh/kernel/signal_64.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/nmi.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/prom/misc_64.c
arch/sparc/prom/printf.c
arch/x86/kernel/apic/probe_64.c
arch/x86/kernel/signal.c
arch/x86/xen/enlighten.c
block/blk-sysfs.c
crypto/algapi.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/video.c
drivers/ata/ata_piix.c
drivers/block/aoe/aoe.h
drivers/block/aoe/aoeblk.c
drivers/block/aoe/aoedev.c
drivers/char/agp/intel-agp.c
drivers/char/n_tty.c
drivers/char/pty.c
drivers/char/tpm/tpm_tis.c
drivers/cpufreq/cpufreq.c
drivers/firewire/core-iso.c
drivers/firewire/ohci.c
drivers/firewire/sbp2.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/ide/ide-cs.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad_priv.h
drivers/infiniband/core/multicast.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/smi.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/amso1100/c2.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_wr.h
drivers/infiniband/hw/cxgb3/iwch.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb3/iwch_cm.h
drivers/infiniband/hw/cxgb3/iwch_mem.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_reqs.c
drivers/infiniband/hw/ehca/ehca_sqp.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/hw/mthca/mthca_config_reg.h
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.h
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_reset.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_cm.h
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_utils.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/input/keyboard/atkbd.c
drivers/input/serio/i8042-x86ia64io.h
drivers/md/dm-exception-store.c
drivers/md/dm-exception-store.h
drivers/md/dm-log-userspace-base.c
drivers/md/dm-log-userspace-transfer.c
drivers/md/dm-log-userspace-transfer.h
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/media/dvb/siano/Kconfig
drivers/media/dvb/siano/Makefile
drivers/media/dvb/siano/smsdvb.c
drivers/media/dvb/siano/smssdio.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/em28xx/em28xx.h
drivers/media/video/gspca/Kconfig
drivers/media/video/zr364xx.c
drivers/mtd/devices/m25p80.c
drivers/mtd/nftlcore.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb3/cxgb3_offload.h
drivers/net/gianfar.c
drivers/net/mlx4/cq.c
drivers/net/mlx4/eq.c
drivers/net/mlx4/icm.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mcg.c
drivers/net/mlx4/mlx4.h
drivers/net/mlx4/mr.c
drivers/net/mlx4/pd.c
drivers/net/mlx4/profile.c
drivers/net/mlx4/qp.c
drivers/net/mlx4/reset.c
drivers/net/mlx4/srq.c
drivers/net/tun.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/pci/iov.c
drivers/pci/pci.h
drivers/pci/setup-bus.c
drivers/pci/setup-res.c
drivers/platform/x86/toshiba_acpi.c
drivers/scsi/cxgb3i/cxgb3i_init.c
drivers/staging/comedi/comedi_fops.c
drivers/video/xen-fbfront.c
fs/9p/v9fs.c
fs/9p/v9fs.h
fs/9p/vfs_inode.c
fs/9p/vfs_super.c
fs/afs/file.c
fs/autofs4/expire.c
fs/binfmt_elf.c
fs/compat.c
fs/exec.c
fs/ext2/acl.c
fs/ext2/acl.h
fs/ext2/file.c
fs/ext2/namei.c
fs/ext3/acl.c
fs/ext3/acl.h
fs/ext3/file.c
fs/ext3/namei.c
fs/ext4/acl.c
fs/ext4/acl.h
fs/ext4/file.c
fs/ext4/namei.c
fs/jffs2/acl.c
fs/jffs2/acl.h
fs/jffs2/dir.c
fs/jffs2/file.c
fs/jffs2/symlink.c
fs/jffs2/wbuf.c
fs/jfs/acl.c
fs/jfs/file.c
fs/jfs/jfs_acl.h
fs/jfs/namei.c
fs/locks.c
fs/namei.c
fs/nfsd/auth.c
fs/nfsd/nfssvc.c
fs/nfsd/vfs.c
fs/nilfs2/btnode.c
fs/notify/inotify/inotify_fsnotify.c
fs/notify/inotify/inotify_user.c
fs/ocfs2/aops.c
fs/ocfs2/dcache.c
fs/open.c
fs/sysfs/dir.c
fs/sysfs/inode.c
fs/sysfs/symlink.c
fs/sysfs/sysfs.h
fs/xattr.c
fs/xfs/linux-2.6/xfs_ioctl32.c
fs/xfs/linux-2.6/xfs_iops.c
include/crypto/algapi.h
include/crypto/internal/skcipher.h
include/linux/binfmts.h
include/linux/cred.h
include/linux/device-mapper.h
include/linux/dm-log-userspace.h
include/linux/fs.h
include/linux/key.h
include/linux/keyctl.h
include/linux/lmb.h
include/linux/lsm_audit.h
include/linux/sched.h
include/linux/security.h
include/linux/shmem_fs.h
include/linux/workqueue.h
include/linux/xattr.h
include/net/pkt_sched.h
kernel/acct.c
kernel/cred.c
kernel/exit.c
kernel/fork.c
kernel/kmod.c
kernel/module.c
kernel/perf_counter.c
kernel/ptrace.c
kernel/sysctl.c
lib/Kconfig.debug
lib/is_single_threaded.c
lib/lmb.c
mm/nommu.c
mm/page_alloc.c
mm/percpu.c
mm/shmem.c
mm/shmem_acl.c
mm/slub.c
net/9p/client.c
net/9p/error.c
net/9p/trans_fd.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/core/dev.c
net/core/sock.c
net/ipv4/ip_output.c
net/ipv4/tcp_cong.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sunrpc/clnt.c
security/Makefile
security/capability.c
security/commoncap.c
security/integrity/ima/ima_main.c
security/keys/Makefile
security/keys/compat.c
security/keys/gc.c [new file with mode: 0644]
security/keys/internal.h
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/proc.c
security/keys/process_keys.c
security/keys/sysctl.c
security/lsm_audit.c
security/security.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/av_inherit.h
security/selinux/include/av_perm_to_string.h
security/selinux/include/av_permissions.h
security/selinux/include/avc.h
security/selinux/include/class_to_string.h
security/selinux/include/flask.h
security/selinux/include/netlabel.h
security/selinux/include/xfrm.h
security/selinux/netlabel.c
security/selinux/ss/services.c
security/selinux/xfrm.c
security/smack/smack.h
security/smack/smack_access.c
security/smack/smack_lsm.c
security/tomoyo/common.c
security/tomoyo/common.h
security/tomoyo/domain.c
security/tomoyo/tomoyo.c
security/tomoyo/tomoyo.h
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/oxygen/oxygen_lib.c
sound/pci/oxygen/oxygen_pcm.c

index bf8080640eba405d01aac5a101623e825b542d90..6208f55c44c352b3bcef39514fdd147684ac9064 100644 (file)
@@ -123,6 +123,9 @@ available from the same CVS repository.
 There are user and developer mailing lists available through the v9fs project
 on sourceforge (http://sourceforge.net/projects/v9fs).
 
+A stand-alone version of the module (which should build for any 2.6 kernel)
+is available via (http://github.com/ericvh/9p-sac/tree/master)
+
 News and other information is maintained on SWiK (http://swik.net/v9fs).
 
 Bug reports may be issued through the kernel.org bugzilla 
index b56aacc1fff864022dbdf67aac997d54f0d14f32..e4dbbdb1bd961e7a7a582c3dbb3ada1008218218 100644 (file)
@@ -26,7 +26,7 @@ This document has the following sections:
        - Notes on accessing payload contents
        - Defining a key type
        - Request-key callback service
-       - Key access filesystem
+       - Garbage collection
 
 
 ============
@@ -113,6 +113,9 @@ Each key has a number of attributes:
 
      (*) Dead. The key's type was unregistered, and so the key is now useless.
 
+Keys in the last three states are subject to garbage collection.  See the
+section on "Garbage collection".
+
 
 ====================
 KEY SERVICE OVERVIEW
@@ -754,6 +757,26 @@ The keyctl syscall functions are:
      successful.
 
 
+ (*) Install the calling process's session keyring on its parent.
+
+       long keyctl(KEYCTL_SESSION_TO_PARENT);
+
+     This functions attempts to install the calling process's session keyring
+     on to the calling process's parent, replacing the parent's current session
+     keyring.
+
+     The calling process must have the same ownership as its parent, the
+     keyring must have the same ownership as the calling process, the calling
+     process must have LINK permission on the keyring and the active LSM module
+     mustn't deny permission, otherwise error EPERM will be returned.
+
+     Error ENOMEM will be returned if there was insufficient memory to complete
+     the operation, otherwise 0 will be returned to indicate success.
+
+     The keyring will be replaced next time the parent process leaves the
+     kernel and resumes executing userspace.
+
+
 ===============
 KERNEL SERVICES
 ===============
@@ -1231,3 +1254,17 @@ by executing:
 
 In this case, the program isn't required to actually attach the key to a ring;
 the rings are provided for reference.
+
+
+==================
+GARBAGE COLLECTION
+==================
+
+Dead keys (for which the type has been removed) will be automatically unlinked
+from those keyrings that point to them and deleted as soon as possible by a
+background garbage collector.
+
+Similarly, revoked and expired keys will be garbage collected, but only after a
+certain amount of time has passed.  This time is set as a number of seconds in:
+
+       /proc/sys/kernel/keys/gc_delay
index 60299a9a7adbe05410f2e01d2a3facd7e3a4b81e..989ff1149390c783823cb5e671e5962574ffad9e 100644 (file)
@@ -439,7 +439,7 @@ F:  drivers/hwmon/ams/
 AMSO1100 RNIC DRIVER
 M:     Tom Tucker <tom@opengridcomputing.com>
 M:     Steve Wise <swise@opengridcomputing.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 S:     Maintained
 F:     drivers/infiniband/hw/amso1100/
 
@@ -1494,7 +1494,7 @@ F:        drivers/net/cxgb3/
 
 CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
 M:     Steve Wise <swise@chelsio.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 W:     http://www.openfabrics.org
 S:     Supported
 F:     drivers/infiniband/hw/cxgb3/
@@ -1868,7 +1868,7 @@ F:        fs/efs/
 EHCA (IBM GX bus InfiniBand adapter) DRIVER
 M:     Hoang-Nam Nguyen <hnguyen@de.ibm.com>
 M:     Christoph Raisch <raisch@de.ibm.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/ehca/
 
@@ -2239,8 +2239,7 @@ S:        Maintained
 F:     drivers/media/video/gspca/pac207.c
 
 GSPCA SN9C20X SUBDRIVER
-P:     Brian Johnson
-M:     brijohn@gmail.com
+M:     Brian Johnson <brijohn@gmail.com>
 L:     linux-media@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
@@ -2553,7 +2552,7 @@ INFINIBAND SUBSYSTEM
 M:     Roland Dreier <rolandd@cisco.com>
 M:     Sean Hefty <sean.hefty@intel.com>
 M:     Hal Rosenstock <hal.rosenstock@gmail.com>
-L:     general@lists.openfabrics.org (moderated for non-subscribers)
+L:     linux-rdma@vger.kernel.org
 W:     http://www.openib.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
 S:     Supported
@@ -2730,7 +2729,7 @@ F:        drivers/net/ipg.c
 
 IPATH DRIVER
 M:     Ralph Campbell <infinipath@qlogic.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 T:     git git://git.qlogic.com/ipath-linux-2.6
 S:     Supported
 F:     drivers/infiniband/hw/ipath/
@@ -3486,7 +3485,7 @@ F:        drivers/scsi/NCR_D700.*
 NETEFFECT IWARP RNIC DRIVER (IW_NES)
 M:     Faisal Latif <faisal.latif@intel.com>
 M:     Chien Tung <chien.tin.tung@intel.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 W:     http://www.neteffect.com
 S:     Supported
 F:     drivers/infiniband/hw/nes/
index 9c87e60d169c09f1f986a94609dfdc6ab7fe7c9c..60de4ef312547da0264a363c87d0165626a6db8c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 31
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
index 60c83abfde7027832e0e7609ac7e032accb66386..5076a8860b18ff4440ee9d9187e57d696406321d 100644 (file)
@@ -75,6 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
 #define TIF_UAC_SIGBUS         7
 #define TIF_MEMDIE             8
 #define TIF_RESTORE_SIGMASK    9       /* restore signal mask in do_signal */
+#define TIF_NOTIFY_RESUME      10      /* callback before returning to user */
 #define TIF_FREEZE             16      /* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -82,10 +83,12 @@ register struct thread_info *__current_thread_info __asm__("$8");
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_FREEZE            (1<<TIF_FREEZE)
 
 /* Work to do on interrupt/exception return.  */
-#define _TIF_WORK_MASK         (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
+#define _TIF_WORK_MASK         (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+                                _TIF_NOTIFY_RESUME)
 
 /* Work to do on any return to userspace.  */
 #define _TIF_ALLWORK_MASK      (_TIF_WORK_MASK         \
index df65eaa84c4c41218a68121c3adce386437f6642..0932dbb1ef8eff444943645e15b0802b1d4cf5d5 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/binfmts.h>
 #include <linux/bitops.h>
 #include <linux/syscalls.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/sigcontext.h>
@@ -683,4 +684,11 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw,
 {
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
                do_signal(regs, sw, r0, r19);
+
+       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
 }
index 73394e50cbca26e4198349de503fd18554f331d6..d3a39b1e6c0fc12cde0ccf4376a5d250b8232840 100644 (file)
@@ -130,11 +130,13 @@ extern void vfp_sync_state(struct thread_info *thread);
  *  TIF_SYSCALL_TRACE  - syscall trace active
  *  TIF_SIGPENDING     - signal pending
  *  TIF_NEED_RESCHED   - rescheduling necessary
+ *  TIF_NOTIFY_RESUME  - callback before returning to user
  *  TIF_USEDFPU                - FPU was used by this task this quantum (SMP)
  *  TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
  */
 #define TIF_SIGPENDING         0
 #define TIF_NEED_RESCHED       1
+#define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
@@ -143,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread);
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
index 8c3de1a350b5dd85bdf06b79f1dab163cd305743..7813ab782fda6796bf455a39dcc723a6236707e5 100644 (file)
@@ -51,7 +51,7 @@ fast_work_pending:
 work_pending:
        tst     r1, #_TIF_NEED_RESCHED
        bne     work_resched
-       tst     r1, #_TIF_SIGPENDING
+       tst     r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
        beq     no_work_pending
        mov     r0, sp                          @ 'regs'
        mov     r2, why                         @ 'syscall'
index f6bc5d442782374bb99618702722dde5d37e1dc2..b76fe06d92e746b68855d42112071f69fff71c5a 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/personality.h>
 #include <linux/freezer.h>
 #include <linux/uaccess.h>
+#include <linux/tracehook.h>
 
 #include <asm/elf.h>
 #include <asm/cacheflush.h>
@@ -707,4 +708,11 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
        if (thread_flags & _TIF_SIGPENDING)
                do_signal(&current->blocked, regs, syscall);
+
+       if (thread_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
 }
index fc42de5ca209ac5e9de0cfca9c1f35561f740be2..fd0c5d7e933701c31166deb5f4f7269c2adb8206 100644 (file)
@@ -84,6 +84,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE             6
 #define TIF_RESTORE_SIGMASK    7       /* restore signal mask in do_signal */
 #define TIF_CPU_GOING_TO_SLEEP 8       /* CPU is entering sleep 0 mode */
+#define TIF_NOTIFY_RESUME      9       /* callback before returning to user */
 #define TIF_FREEZE             29
 #define TIF_DEBUG              30      /* debugging enabled */
 #define TIF_USERSPACE          31      /* true if FS sets userspace */
@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_MEMDIE            (1 << TIF_MEMDIE)
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
+#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_FREEZE            (1 << TIF_FREEZE)
 
 /* Note: The masks below must never span more than 16 bits! */
@@ -103,13 +105,15 @@ static inline struct thread_info *current_thread_info(void)
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK                         \
        ((1 << TIF_SIGPENDING)                  \
+        | _TIF_NOTIFY_RESUME                   \
         | (1 << TIF_NEED_RESCHED)              \
         | (1 << TIF_POLLING_NRFLAG)            \
         | (1 << TIF_BREAKPOINT)                \
         | (1 << TIF_RESTORE_SIGMASK))
 
 /* work to do on any return to userspace */
-#define _TIF_ALLWORK_MASK      (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE))
+#define _TIF_ALLWORK_MASK      (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE) | \
+                                _TIF_NOTIFY_RESUME)
 /* work to do on return from debug mode */
 #define _TIF_DBGWORK_MASK      (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT))
 
index 009a80155d67d96b7da1ee34e60ec726abcfdb4b..169268c40ae2552df9430128e4ae6e6964249989 100644 (file)
@@ -281,7 +281,7 @@ syscall_exit_work:
        ld.w    r1, r0[TI_flags]
        rjmp    1b
 
-2:     mov     r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
+2:     mov     r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
        tst     r1, r2
        breq    3f
        unmask_interrupts
index 27227561bad67a7ebea577acffa3796f5998b8ea..64f886fac2efef6ea05bd1c0703cfdf4ccb8d7d4 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/freezer.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/ucontext.h>
@@ -322,4 +323,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
 
        if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
                do_signal(regs, &current->blocked, syscall);
+
+       if (ti->flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
 }
index b326023baab28d3d8be5eee2acc472aa323256e6..48b0f3912632f98d48ae22e40945d057aa393269 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -36,4 +37,11 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
        /* deal with pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(canrestart,regs);
+
+       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
 }
index 4a7a62c6e7833ed91a9087b3762cb3d0ba24df5f..6b0a2b6fed6a9326ab8f4d28955cda0bada96b6a 100644 (file)
@@ -572,6 +572,8 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(__frame);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 
 } /* end do_notify_resume() */
index 8bbc8b0ee45db3ad2ddb97f6c6c9be4b5550315f..70e67e47d0205eef549c4529c3ac627ac9cd2e42 100644 (file)
@@ -89,6 +89,7 @@ static inline struct thread_info *current_thread_info(void)
                                           TIF_NEED_RESCHED */
 #define TIF_MEMDIE             4
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME      6       /* callback before returning to user */
 #define TIF_FREEZE             16      /* is freezing for suspend */
 
 /* as above, but as bit values */
@@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_FREEZE            (1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK         0x0000FFFE      /* work to do on interrupt/exception return */
index cf3472f7389b9467bdb483ac883e81d4414a0532..af842c369d24301e4b38861d29ffae2df0f863e7 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/tty.h>
 #include <linux/binfmts.h>
 #include <linux/freezer.h>
+#include <linux/tracehook.h>
 
 #include <asm/setup.h>
 #include <asm/uaccess.h>
@@ -552,4 +553,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
 {
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
                do_signal(regs, NULL);
+
+       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
 }
index 39a3cd0a417326be10680e9d9cb52458e400051f..f2c1600da097f00a6c6c388ded1a9e5523dd39d9 100644 (file)
@@ -10,7 +10,9 @@ EXPORT_SYMBOL(dma_ops);
 
 static int __init dma_init(void)
 {
-       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+       return 0;
 }
 fs_initcall(dma_init);
 
index 5d7c0e5b9e76f150b20269cb9ba9e2497062209a..89969e9500456e3a17212b6f20ea47d6abbc6fb3 100644 (file)
@@ -192,6 +192,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
        if (test_thread_flag(TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(&scr->pt);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 
        /* copy user rbs to kernel rbs */
index 1f86aeb2c9485bb70054280b103fa8280bbad3dc..620d9dc5220f377c9cd2da899a81795054a27ad0 100644 (file)
@@ -96,20 +96,22 @@ END(ip_fast_csum)
 GLOBAL_ENTRY(csum_ipv6_magic)
        ld4     r20=[in0],4
        ld4     r21=[in1],4
-       dep     r15=in3,in2,32,16
+       zxt4    in2=in2
        ;;
        ld4     r22=[in0],4
        ld4     r23=[in1],4
-       mux1    r15=r15,@rev
+       dep     r15=in3,in2,32,16
        ;;
        ld4     r24=[in0],4
        ld4     r25=[in1],4
-       shr.u   r15=r15,16
+       mux1    r15=r15,@rev
        add     r16=r20,r21
        add     r17=r22,r23
+       zxt4    in4=in4
        ;;
        ld4     r26=[in0],4
        ld4     r27=[in1],4
+       shr.u   r15=r15,16
        add     r18=r24,r25
        add     r8=r16,r17
        ;;
index 07bb5bd00e2a0660dc3be6c0c0c4514da8ade81b..71578151a403e9353c007b849fd5e711336fc3fc 100644 (file)
@@ -149,6 +149,7 @@ static inline unsigned int get_thread_fault_code(void)
 #define TIF_NEED_RESCHED       2       /* rescheduling necessary */
 #define TIF_SINGLESTEP         3       /* restore singlestep on return to user mode */
 #define TIF_IRET               4       /* return with iret */
+#define TIF_NOTIFY_RESUME      5       /* callback before returning to user */
 #define TIF_RESTORE_SIGMASK    8       /* restore signal mask in do_signal() */
 #define TIF_USEDFPU            16      /* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG     17      /* true if poll_idle() is polling TIF_NEED_RESCHED */
@@ -160,6 +161,7 @@ static inline unsigned int get_thread_fault_code(void)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP                (1<<TIF_SINGLESTEP)
 #define _TIF_IRET              (1<<TIF_IRET)
+#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_USEDFPU           (1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
index 18124542a6ebb695444c9681996147b69ac5d6de..144b0f124fc72f08b20f93336f96da81327fe61c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/stddef.h>
 #include <linux/personality.h>
 #include <linux/freezer.h>
+#include <linux/tracehook.h>
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
@@ -408,5 +409,12 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
        if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs,oldset);
 
+       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
+
        clear_thread_flag(TIF_IRET);
 }
index f9df720d2e40e215aa0cb21561f467d6d95a2b6b..01cc1630b66cc4cf7f8f7a0159245fd72e5bf893 100644 (file)
@@ -115,6 +115,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_NEED_RESCHED       2       /* rescheduling necessary */
 #define TIF_SYSCALL_AUDIT      3       /* syscall auditing active */
 #define TIF_SECCOMP            4       /* secure computing */
+#define TIF_NOTIFY_RESUME      5       /* callback before returning to user */
 #define TIF_RESTORE_SIGMASK    9       /* restore signal mask in do_signal() */
 #define TIF_USEDFPU            16      /* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG     17      /* true if poll_idle() is polling TIF_NEED_RESCHED */
@@ -139,6 +140,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_AUDIT     (1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1<<TIF_SECCOMP)
+#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_USEDFPU           (1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
index 830c5ef9932b389cbc4ad7f1410b750721a6b070..6254041b942f9a9b56295024a0fd1320fb2da6ae 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/compiler.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/tracehook.h>
 
 #include <asm/abi.h>
 #include <asm/asm.h>
@@ -700,4 +701,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        /* deal with pending signal delivery */
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
                do_signal(regs);
+
+       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
 }
index feb2f2e810db7c785ea5a7562ac77dfb79666a79..a21f43bc68e269cf412e6ae3b0c0293c0dcd96c0 100644 (file)
@@ -568,5 +568,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(__frame);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 }
index 4ce0edfbe9694dc4daaed2253a625e9d46a25c72..ac775a76bff71508a939a08fc983d9d4b8e272f5 100644 (file)
@@ -59,6 +59,7 @@ struct thread_info {
 #define TIF_MEMDIE             5
 #define TIF_RESTORE_SIGMASK    6       /* restore saved signal mask */
 #define TIF_FREEZE             7       /* is freezing for suspend */
+#define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
@@ -67,8 +68,9 @@ struct thread_info {
 #define _TIF_32BIT             (1 << TIF_32BIT)
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_FREEZE            (1 << TIF_FREEZE)
+#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 
-#define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | \
+#define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
 
 #endif /* __KERNEL__ */
index e552e547cb93fd88050ee800d62f2f2667b3ba5d..8c4712b74dc13b626f449e699ee31aca15701315 100644 (file)
@@ -948,7 +948,7 @@ intr_check_sig:
        /* As above */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19
-       ldi     (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
+       ldi     (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
        and,COND(<>)    %r19, %r20, %r0
        b,n     intr_restore    /* skip past if we've nothing to do */
 
index f82544225e8e8b43b8ca76664101a849d1e8a7f7..8eb3c63c407a43e5aaac3e96654ce391cd8764a5 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/stddef.h>
 #include <linux/compat.h>
 #include <linux/elf.h>
+#include <linux/tracehook.h>
 #include <asm/ucontext.h>
 #include <asm/rt_sigframe.h>
 #include <asm/uaccess.h>
@@ -645,4 +646,11 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall)
        if (test_thread_flag(TIF_SIGPENDING) ||
            test_thread_flag(TIF_RESTORE_SIGMASK))
                do_signal(regs, in_syscall);
+
+       if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
 }
index 528f0ff9b2738314ab61871379b0d8c980c4b530..8b58bf0b7d5aa47fd6f54317c74a0528e253b86c 100644 (file)
@@ -532,7 +532,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
                /* Kill the user process later */
                regs->iaoq[0] = 0 | 3;
                regs->iaoq[1] = regs->iaoq[0] + 4;
-               regs->iasq[0] = regs->iasq[0] = regs->sr[7];
+               regs->iasq[0] = regs->iasq[1] = regs->sr[7];
                regs->gr[0] &= ~PSW_B;
                return;
        }
index 388cf57ad827b8d187c49dc34eb0ec8deaf48db5..018d094d92f91d2af529f0eff119f3b3f27983ef 100644 (file)
@@ -317,7 +317,7 @@ static int power7_generic_events[] = {
  */
 static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
-               [C(OP_READ)] = {        0x400f0,        0xc880  },
+               [C(OP_READ)] = {        0xc880,         0x400f0 },
                [C(OP_WRITE)] = {       0,              0x300f0 },
                [C(OP_PREFETCH)] = {    0xd8b8,         0       },
        },
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
                [C(OP_PREFETCH)] = {    0x408a,         0       },
        },
        [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
-               [C(OP_READ)] = {        0x6080,         0x6084  },
-               [C(OP_WRITE)] = {       0x6082,         0x6086  },
+               [C(OP_READ)] = {        0x16080,        0x26080 },
+               [C(OP_WRITE)] = {       0x16082,        0x26082 },
                [C(OP_PREFETCH)] = {    0,              0       },
        },
        [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
index 3ee1fd37bbfc3b819107a8dd4d9b2496f59ecc84..40edad520770e335c1afc7a7af0463f159972ef4 100644 (file)
@@ -234,7 +234,6 @@ static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
                generic_handle_irq(cascade_irq);
 
        /* Let xilinx_intc end the interrupt */
-       desc->chip->ack(irq);
        desc->chip->unmask(irq);
 }
 
index 062bd64e65fabe1f09641a4c3e0290ae4ff013ee..6b4fef877f9d0ccf0fd28ea654b9553ab3b70df9 100644 (file)
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs)
 {
        clear_thread_flag(TIF_NOTIFY_RESUME);
        tracehook_notify_resume(regs);
+       if (current->replacement_session_keyring)
+               key_replace_session_keyring();
 }
index b5afbec1db59ce6fb2d74cd0cd9e0fa51b879d0f..04a21883f32730bf18013031fe342e27c50b6b99 100644 (file)
@@ -640,5 +640,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 }
index 0663a0ee6021f4ab31c2bb32fd7a24fa82cd9a6a..9e5c9b1d7e9872fe3591e8520e8b7383e96e7962 100644 (file)
@@ -772,5 +772,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 }
index f0ee79055409d34f02947e3ca6915f0e71fe9dc1..8daab33fc17d1808a0abd55bce2d4ac98af0a5c7 100644 (file)
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void)
  * Therefore you cannot make any OBP calls, not even prom_printf,
  * from these two routines.
  */
-static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
+static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
 {
        unsigned long num_entries = (qmask + 1) / 64;
        unsigned long status;
index 2c0cc72d295b079cfab695197eff38fcbd54d586..b75bf502cd424e7305959c2ca92bffd4cc88bcf4 100644 (file)
@@ -103,7 +103,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
        }
        if (!touched && __get_cpu_var(last_irq_sum) == sum) {
                local_inc(&__get_cpu_var(alert_counter));
-               if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+               if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
                        die_nmi("BUG: NMI Watchdog detected LOCKUP",
                                regs, panic_on_timeout);
        } else {
index 181d069a2d44bdeb160125053150b894e550598b..7ce1a1005b1da4c3b13f8b87927d060376279f2a 100644 (file)
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 }
 
index ec82d76dc6f2cc245ca54203dd38c35fee192ed2..647afbda7ae1f170896675dcc4603dfe5c58ec0b 100644 (file)
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 }
+
index eedffb4fec2d64132402cd3007897275962158a4..39fc6af21b7c55ddc0e752c80c745ed6c018820a 100644 (file)
@@ -88,7 +88,7 @@ void prom_cmdline(void)
 /* Drop into the prom, but completely terminate the program.
  * No chance of continuing.
  */
-void prom_halt(void)
+void notrace prom_halt(void)
 {
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
index 660943ee4c2ac7e431822dc1f99cf9e8bac294e7..ca869266b9f3d0106e60d7b8c28f5cdcf614826b 100644 (file)
  */
 
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 
 static char ppbuf[1024];
 
-void
-prom_write(const char *buf, unsigned int n)
+void notrace prom_write(const char *buf, unsigned int n)
 {
        char ch;
 
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n)
        }
 }
 
-void
-prom_printf(const char *fmt, ...)
+void notrace prom_printf(const char *fmt, ...)
 {
        va_list args;
        int i;
index bc3e880f9b82e76902b305d10920b70b05c440ac..fcec2f1d34a18ab37c68bbc14865a18174c21488 100644 (file)
@@ -44,6 +44,11 @@ static struct apic *apic_probe[] __initdata = {
        NULL,
 };
 
+static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
+{
+       return hard_smp_processor_id() >> index_msb;
+}
+
 /*
  * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
  */
@@ -69,6 +74,11 @@ void __init default_setup_apic_routing(void)
                printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
        }
 
+       if (is_vsmp_box()) {
+               /* need to update phys_pkg_id */
+               apic->phys_pkg_id = apicid_phys_pkg_id;
+       }
+
        /*
         * Now that apic routing model is selected, configure the
         * fault handling for intr remapping.
index 4c578751e94ec08dc4e6ed69338947105e444654..81e58238c4ce642ed54ea3ce63c265427e1d6984 100644 (file)
@@ -869,6 +869,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
        }
 
 #ifdef CONFIG_X86_32
index e90540a46a0bca9ed06d03672010eb19ef648b24..eb33aaa8415de0d7b4e13d7dd18c826829a59466 100644 (file)
@@ -215,6 +215,7 @@ static __init void xen_init_cpuid_mask(void)
                          (1 << X86_FEATURE_ACPI));  /* disable ACPI */
 
        ax = 1;
+       cx = 0;
        xen_cpuid(&ax, &bx, &cx, &dx);
 
        /* cpuid claims we support xsave; try enabling it to see what happens */
@@ -1059,6 +1060,7 @@ asmlinkage void __init xen_start_kernel(void)
        /* set up basic CPUID stuff */
        cpu_detect(&new_cpu_data);
        new_cpu_data.hard_math = 1;
+       new_cpu_data.wp_works_ok = 1;
        new_cpu_data.x86_capability[0] = cpuid_edx(1);
 #endif
 
index 418d63619680e8df2b9ab2ba0cdd5f31bcfb4ee4..d3aa2aadb3e0e7d18645a2eb9333698f4984263a 100644 (file)
@@ -133,7 +133,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
                return -EINVAL;
 
        spin_lock_irq(q->queue_lock);
-       blk_queue_max_sectors(q, max_sectors_kb << 1);
+       q->limits.max_sectors = max_sectors_kb << 1;
        spin_unlock_irq(q->queue_lock);
 
        return ret;
index 56c62e2858d56a9eee11b9167fe8362a71a7c6c1..df0863d56995d7ca02c1e9492d3ab74de164ab14 100644 (file)
@@ -692,7 +692,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 
-struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
 {
        struct list_head *request;
 
@@ -707,7 +707,14 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
        request = queue->list.next;
        list_del(request);
 
-       return list_entry(request, struct crypto_async_request, list);
+       return (char *)list_entry(request, struct crypto_async_request, list) -
+              offset;
+}
+EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
+
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+{
+       return __crypto_dequeue_request(queue, 0);
 }
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 
index 67340cc70142209d1aa97ae360b21587cefc321b..257706e7734f786dbe8e8d760283ef2751859838 100644 (file)
@@ -70,6 +70,12 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
 
        ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
 
+       /* If Source and Target are the same, just return */
+
+       if (source_desc == target_desc) {
+               return_ACPI_STATUS(AE_OK);
+       }
+
        /* We know that source_desc is a buffer by now */
 
        buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer);
@@ -161,6 +167,12 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
 
        ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
 
+       /* If Source and Target are the same, just return */
+
+       if (source_desc == target_desc) {
+               return_ACPI_STATUS(AE_OK);
+       }
+
        /* We know that source_desc is a string by now */
 
        buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer);
index 8851315ce858a2e2eb1e4ffd2d2b6b695fa8bfb8..60ea984c84a02c651b606d6d42aa4b3610c21882 100644 (file)
@@ -2004,8 +2004,11 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
        status = acpi_remove_notify_handler(device->dev->handle,
                                            ACPI_DEVICE_NOTIFY,
                                            acpi_video_device_notify);
-       sysfs_remove_link(&device->backlight->dev.kobj, "device");
-       backlight_device_unregister(device->backlight);
+       if (device->backlight) {
+               sysfs_remove_link(&device->backlight->dev.kobj, "device");
+               backlight_device_unregister(device->backlight);
+               device->backlight = NULL;
+       }
        if (device->cdev) {
                sysfs_remove_link(&device->dev->dev.kobj,
                                  "thermal_cooling");
index 56b8a3ff12865041af813ca0bcfa49db143e5169..9ac4e378992ef60d71634121379b91479c8660f2 100644 (file)
@@ -664,6 +664,8 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
        return ata_sff_prereset(link, deadline);
 }
 
+static DEFINE_SPINLOCK(piix_lock);
+
 /**
  *     piix_set_piomode - Initialize host controller PATA PIO timings
  *     @ap: Port whose timings we are configuring
@@ -677,8 +679,9 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
 
 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
 {
-       unsigned int pio        = adev->pio_mode - XFER_PIO_0;
        struct pci_dev *dev     = to_pci_dev(ap->host->dev);
+       unsigned long flags;
+       unsigned int pio        = adev->pio_mode - XFER_PIO_0;
        unsigned int is_slave   = (adev->devno != 0);
        unsigned int master_port= ap->port_no ? 0x42 : 0x40;
        unsigned int slave_port = 0x44;
@@ -708,6 +711,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
        if (adev->class == ATA_DEV_ATA)
                control |= 4;   /* PPE enable */
 
+       spin_lock_irqsave(&piix_lock, flags);
+
        /* PIO configuration clears DTE unconditionally.  It will be
         * programmed in set_dmamode which is guaranteed to be called
         * after set_piomode if any DMA mode is available.
@@ -747,6 +752,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
                udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
                pci_write_config_byte(dev, 0x48, udma_enable);
        }
+
+       spin_unlock_irqrestore(&piix_lock, flags);
 }
 
 /**
@@ -764,6 +771,7 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
 static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
 {
        struct pci_dev *dev     = to_pci_dev(ap->host->dev);
+       unsigned long flags;
        u8 master_port          = ap->port_no ? 0x42 : 0x40;
        u16 master_data;
        u8 speed                = adev->dma_mode;
@@ -777,6 +785,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
                            { 2, 1 },
                            { 2, 3 }, };
 
+       spin_lock_irqsave(&piix_lock, flags);
+
        pci_read_config_word(dev, master_port, &master_data);
        if (ap->udma_mask)
                pci_read_config_byte(dev, 0x48, &udma_enable);
@@ -867,6 +877,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
        /* Don't scribble on 0x48 if the controller does not support UDMA */
        if (ap->udma_mask)
                pci_write_config_byte(dev, 0x48, udma_enable);
+
+       spin_unlock_irqrestore(&piix_lock, flags);
 }
 
 /**
index 5e41e6dd657b9a7326ba1347346c8147c428f3c2..db195abad69889e4d499bde162ff5e818601f3b2 100644 (file)
@@ -155,7 +155,7 @@ struct aoedev {
        u16 fw_ver;             /* version of blade's firmware */
        struct work_struct work;/* disk create work struct */
        struct gendisk *gd;
-       struct request_queue blkq;
+       struct request_queue *blkq;
        struct hd_geometry geo; 
        sector_t ssize;
        struct timer_list timer;
index 2307a271bdc99e91b5c410083383a0595c4c08d0..1e15889c4b9819f837076c1b17df34fc8ef881c2 100644 (file)
@@ -264,9 +264,12 @@ aoeblk_gdalloc(void *vp)
                goto err_disk;
        }
 
-       blk_queue_make_request(&d->blkq, aoeblk_make_request);
-       if (bdi_init(&d->blkq.backing_dev_info))
+       d->blkq = blk_alloc_queue(GFP_KERNEL);
+       if (!d->blkq)
                goto err_mempool;
+       blk_queue_make_request(d->blkq, aoeblk_make_request);
+       if (bdi_init(&d->blkq->backing_dev_info))
+               goto err_blkq;
        spin_lock_irqsave(&d->lock, flags);
        gd->major = AOE_MAJOR;
        gd->first_minor = d->sysminor * AOE_PARTITIONS;
@@ -276,7 +279,7 @@ aoeblk_gdalloc(void *vp)
        snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
                d->aoemajor, d->aoeminor);
 
-       gd->queue = &d->blkq;
+       gd->queue = d->blkq;
        d->gd = gd;
        d->flags &= ~DEVFL_GDALLOC;
        d->flags |= DEVFL_UP;
@@ -287,6 +290,9 @@ aoeblk_gdalloc(void *vp)
        aoedisk_add_sysfs(d);
        return;
 
+err_blkq:
+       blk_cleanup_queue(d->blkq);
+       d->blkq = NULL;
 err_mempool:
        mempool_destroy(d->bufpool);
 err_disk:
index eeea477d96016596ccd729a6f75d37d5e3d30a0e..fa67027789aab80ca8c11deccad4e2d3ec225697 100644 (file)
@@ -113,6 +113,7 @@ aoedev_freedev(struct aoedev *d)
        if (d->bufpool)
                mempool_destroy(d->bufpool);
        skbpoolfree(d);
+       blk_cleanup_queue(d->blkq);
        kfree(d);
 }
 
index 8c9d50db5c3a7913fba96958a4a1a433cde90330..c58557790585dfe6a0d56dba70fabb3764a3e2c6 100644 (file)
@@ -49,6 +49,7 @@
 #define PCI_DEVICE_ID_INTEL_IGDNG_D_HB     0x0040
 #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG     0x0042
 #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB     0x0044
+#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB            0x0062
 #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG     0x0046
 
 /* cover 915 and 945 variants */
@@ -81,7 +82,8 @@
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
 
 extern int agp_memory_reserved;
 
@@ -1216,6 +1218,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
        case PCI_DEVICE_ID_INTEL_G41_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
+       case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
                *gtt_offset = *gtt_size = MB(2);
                break;
        default:
@@ -2195,6 +2198,8 @@ static const struct intel_driver_description {
            "IGDNG/D", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
            "IGDNG/M", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
+           "IGDNG/MA", NULL, &intel_i965_driver },
        { 0, 0, 0, NULL, NULL, NULL }
 };
 
@@ -2398,6 +2403,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_G41_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
+       ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
        { }
 };
 
index 973be2f441951ed0e68d658c1192c94524f33aff..4e28b35024ece708161ec948f37b926fc8455c1b 100644 (file)
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
                        if (space < 2)
                                return -1;
                        tty->canon_column = tty->column = 0;
-                       tty_put_char(tty, '\r');
-                       tty_put_char(tty, c);
+                       tty->ops->write(tty, "\r\n", 2);
                        return 2;
                }
                tty->canon_column = tty->column;
index d083c73d784a76a34500c26cc582f3d68770be72..b33d6688e9109a31bb27a6b297a73a8e69c5989d 100644 (file)
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to)
  *     the other side of the pty/tty pair.
  */
 
-static int pty_write(struct tty_struct *tty, const unsigned char *buf,
-                                                               int count)
+static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
 {
        struct tty_struct *to = tty->link;
-       int c;
 
        if (tty->stopped)
                return 0;
 
-       /* This isn't locked but our 8K is quite sloppy so no
-          big deal */
-
-       c = pty_space(to);
-       if (c > count)
-               c = count;
        if (c > 0) {
                /* Stuff the data into the input queue of the other end */
                c = tty_insert_flip_string(to, buf, c);
index aec1931608aa28e3645463735c299805a50ab2b8..0b73e4ec1addafff42a79752a666fecd4b25d665 100644 (file)
@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
                goto out_err;
        }
 
+       /* Default timeouts */
+       chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+       chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+       chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+       chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
        if (request_locality(chip, 0) != 0) {
                rc = -ENODEV;
                goto out_err;
@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
 
        vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
 
-       /* Default timeouts */
-       chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
-       chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
-       chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
-       chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
-
        dev_info(dev,
                 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
                 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
index fd69086d08d54f0fb9c2036984cc9c9c88d86446..2968ed6a9c4997003591a9ebc33546e19d6deb0b 100644 (file)
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 {
        int ret = 0;
 
-#ifdef __powerpc__
        int cpu = sysdev->id;
-       unsigned int cur_freq = 0;
        struct cpufreq_policy *cpu_policy;
 
        dprintk("suspending cpu %u\n", cpu);
 
-       /*
-        * This whole bogosity is here because Powerbooks are made of fail.
-        * No sane platform should need any of the code below to be run.
-        * (it's entirely the wrong thing to do, as driver->get may
-        *  reenable interrupts on some architectures).
-        */
-
        if (!cpu_online(cpu))
                return 0;
 
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 
        if (cpufreq_driver->suspend) {
                ret = cpufreq_driver->suspend(cpu_policy, pmsg);
-               if (ret) {
+               if (ret)
                        printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
                                        "step on CPU %u\n", cpu_policy->cpu);
-                       goto out;
-               }
-       }
-
-       if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
-               goto out;
-
-       if (cpufreq_driver->get)
-               cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
-       if (!cur_freq || !cpu_policy->cur) {
-               printk(KERN_ERR "cpufreq: suspend failed to assert current "
-                      "frequency is what timing core thinks it is.\n");
-               goto out;
-       }
-
-       if (unlikely(cur_freq != cpu_policy->cur)) {
-               struct cpufreq_freqs freqs;
-
-               if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
-                       dprintk("Warning: CPU frequency is %u, "
-                              "cpufreq assumed %u kHz.\n",
-                              cur_freq, cpu_policy->cur);
-
-               freqs.cpu = cpu;
-               freqs.old = cpu_policy->cur;
-               freqs.new = cur_freq;
-
-               srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
-                                   CPUFREQ_SUSPENDCHANGE, &freqs);
-               adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
-
-               cpu_policy->cur = cur_freq;
        }
 
 out:
        cpufreq_cpu_put(cpu_policy);
-#endif /* __powerpc__ */
        return ret;
 }
 
@@ -1330,24 +1287,21 @@ out:
  *     cpufreq_resume -  restore proper CPU frequency handling after resume
  *
  *     1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- *     2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
- *     3.) schedule call cpufreq_update_policy() ASAP as interrupts are
- *         restored.
+ *     2.) schedule call cpufreq_update_policy() ASAP as interrupts are
+ *         restored. It will verify that the current freq is in sync with
+ *         what we believe it to be. This is a bit later than when it
+ *         should be, but nonethteless it's better than calling
+ *         cpufreq_driver->get() here which might re-enable interrupts...
  */
 static int cpufreq_resume(struct sys_device *sysdev)
 {
        int ret = 0;
 
-#ifdef __powerpc__
        int cpu = sysdev->id;
        struct cpufreq_policy *cpu_policy;
 
        dprintk("resuming cpu %u\n", cpu);
 
-       /* As with the ->suspend method, all the code below is
-        * only necessary because Powerbooks suck.
-        * See commit 42d4dc3f4e1e for jokes. */
-
        if (!cpu_online(cpu))
                return 0;
 
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev)
                }
        }
 
-       if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
-               unsigned int cur_freq = 0;
-
-               if (cpufreq_driver->get)
-                       cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
-               if (!cur_freq || !cpu_policy->cur) {
-                       printk(KERN_ERR "cpufreq: resume failed to assert "
-                                       "current frequency is what timing core "
-                                       "thinks it is.\n");
-                       goto out;
-               }
-
-               if (unlikely(cur_freq != cpu_policy->cur)) {
-                       struct cpufreq_freqs freqs;
-
-                       if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
-                               dprintk("Warning: CPU frequency "
-                                      "is %u, cpufreq assumed %u kHz.\n",
-                                      cur_freq, cpu_policy->cur);
-
-                       freqs.cpu = cpu;
-                       freqs.old = cpu_policy->cur;
-                       freqs.new = cur_freq;
-
-                       srcu_notifier_call_chain(
-                                       &cpufreq_transition_notifier_list,
-                                       CPUFREQ_RESUMECHANGE, &freqs);
-                       adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
-
-                       cpu_policy->cur = cur_freq;
-               }
-       }
-
-out:
        schedule_work(&cpu_policy->update);
+
 fail:
        cpufreq_cpu_put(cpu_policy);
-#endif /* __powerpc__ */
        return ret;
 }
 
index 110e731f5574130cb69338a99aab0c8ec8a16916..1c0b504a42f3068e852fa1b7423ee5c37bfb0abd 100644 (file)
@@ -196,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
                switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
                                irm_id, generation, SCODE_100,
                                CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
-                               data, sizeof(data))) {
+                               data, 8)) {
                case RCODE_GENERATION:
                        /* A generation change frees all bandwidth. */
                        return allocate ? -EAGAIN : bandwidth;
@@ -233,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
                data[1] = old ^ c;
                switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
                                           irm_id, generation, SCODE_100,
-                                          offset, data, sizeof(data))) {
+                                          offset, data, 8)) {
                case RCODE_GENERATION:
                        /* A generation change frees all channels. */
                        return allocate ? -EAGAIN : i;
index ecddd11b797a366d105763656dd4eda2c2273add..76b321bb73f9419aa52c826a944047388d53cd0a 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
+#include <linux/pci_ids.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
 
@@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev)
 #define ohci_pmac_off(dev)
 #endif /* CONFIG_PPC_PMAC */
 
+#define PCI_VENDOR_ID_AGERE            PCI_VENDOR_ID_ATT
+#define PCI_DEVICE_ID_AGERE_FW643      0x5901
+
 static int __devinit pci_probe(struct pci_dev *dev,
                               const struct pci_device_id *ent)
 {
@@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev,
        version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
        ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
 
+       /* dual-buffer mode is broken if more than one IR context is active */
+       if (dev->vendor == PCI_VENDOR_ID_AGERE &&
+           dev->device == PCI_DEVICE_ID_AGERE_FW643)
+               ohci->use_dualbuffer = false;
+
+       /* dual-buffer mode is broken */
+       if (dev->vendor == PCI_VENDOR_ID_RICOH &&
+           dev->device == PCI_DEVICE_ID_RICOH_R5C832)
+               ohci->use_dualbuffer = false;
+
 /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
 #if !defined(CONFIG_X86_32)
        /* dual-buffer mode is broken with descriptor addresses above 2G */
index 8d51568ee14344ee1e9e4ac11f690ae7a2c1d6a2..e5df822a8130ca99730a009c75cf3ea454bfccab 100644 (file)
@@ -456,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
        }
        spin_unlock_irqrestore(&card->lock, flags);
 
-       if (&orb->link != &lu->orb_list)
+       if (&orb->link != &lu->orb_list) {
                orb->callback(orb, &status);
-       else
+               kref_put(&orb->kref, free_orb);
+       } else {
                fw_error("status write for unknown orb\n");
-
-       kref_put(&orb->kref, free_orb);
+       }
 
        fw_send_response(card, request, RCODE_COMPLETE);
 }
index 7537f57d8a87399b43079c37adbb843970bd7b1d..5b4f87e556218e0574fb3a57f3e084fbaaa7a251 100644 (file)
@@ -222,6 +222,7 @@ typedef struct drm_i915_private {
        unsigned int edp_support:1;
        int lvds_ssc_freq;
 
+       int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
        struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
        int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -384,6 +385,9 @@ typedef struct drm_i915_private {
                 */
                struct list_head inactive_list;
 
+               /** LRU list of objects with fence regs on them. */
+               struct list_head fence_list;
+
                /**
                 * List of breadcrumbs associated with GPU requests currently
                 * outstanding.
@@ -451,6 +455,9 @@ struct drm_i915_gem_object {
        /** This object's place on the active/flushing/inactive lists */
        struct list_head list;
 
+       /** This object's place on the fenced object LRU */
+       struct list_head fence_list;
+
        /**
         * This is set if the object is on the active or flushing lists
         * (has pending rendering), and is not set if it's on inactive (ready
index 140bee142fc253186f80f2cdc8a4d339786efe20..80e5ba490dc28c8a15c4619865f4872f82a6625e 100644 (file)
@@ -978,6 +978,7 @@ int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_set_domain *args = data;
        struct drm_gem_object *obj;
        uint32_t read_domains = args->read_domains;
@@ -1010,8 +1011,18 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                 obj, obj->size, read_domains, write_domain);
 #endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
+               struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
+               /* Update the LRU on the fence for the CPU access that's
+                * about to occur.
+                */
+               if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                       list_move_tail(&obj_priv->fence_list,
+                                      &dev_priv->mm.fence_list);
+               }
+
                /* Silently promote "you're not bound, there was nothing to do"
                 * to success, since the client was just asking us to
                 * make sure everything was done.
@@ -1155,8 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /* Need a new fence register? */
-       if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (obj_priv->tiling_mode != I915_TILING_NONE) {
                ret = i915_gem_object_get_fence_reg(obj);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
@@ -2208,6 +2218,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
        struct drm_i915_gem_object *old_obj_priv = NULL;
        int i, ret, avail;
 
+       /* Just update our place in the LRU if our fence is getting used. */
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+               list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+               return 0;
+       }
+
        switch (obj_priv->tiling_mode) {
        case I915_TILING_NONE:
                WARN(1, "allocating a fence for non-tiled object?\n");
@@ -2229,7 +2245,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
        }
 
        /* First try to find a free reg */
-try_again:
        avail = 0;
        for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
                reg = &dev_priv->fence_regs[i];
@@ -2243,63 +2258,62 @@ try_again:
 
        /* None available, try to steal one or wait for a user to finish */
        if (i == dev_priv->num_fence_regs) {
-               uint32_t seqno = dev_priv->mm.next_gem_seqno;
+               struct drm_gem_object *old_obj = NULL;
 
                if (avail == 0)
                        return -ENOSPC;
 
-               for (i = dev_priv->fence_reg_start;
-                    i < dev_priv->num_fence_regs; i++) {
-                       uint32_t this_seqno;
-
-                       reg = &dev_priv->fence_regs[i];
-                       old_obj_priv = reg->obj->driver_private;
+               list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
+                                   fence_list) {
+                       old_obj = old_obj_priv->obj;
 
                        if (old_obj_priv->pin_count)
                                continue;
 
+                       /* Take a reference, as otherwise the wait_rendering
+                        * below may cause the object to get freed out from
+                        * under us.
+                        */
+                       drm_gem_object_reference(old_obj);
+
                        /* i915 uses fences for GPU access to tiled buffers */
                        if (IS_I965G(dev) || !old_obj_priv->active)
                                break;
 
-                       /* find the seqno of the first available fence */
-                       this_seqno = old_obj_priv->last_rendering_seqno;
-                       if (this_seqno != 0 &&
-                           reg->obj->write_domain == 0 &&
-                           i915_seqno_passed(seqno, this_seqno))
-                               seqno = this_seqno;
-               }
-
-               /*
-                * Now things get ugly... we have to wait for one of the
-                * objects to finish before trying again.
-                */
-               if (i == dev_priv->num_fence_regs) {
-                       if (seqno == dev_priv->mm.next_gem_seqno) {
-                               i915_gem_flush(dev,
-                                              I915_GEM_GPU_DOMAINS,
-                                              I915_GEM_GPU_DOMAINS);
-                               seqno = i915_add_request(dev, NULL,
-                                                        I915_GEM_GPU_DOMAINS);
-                               if (seqno == 0)
-                                       return -ENOMEM;
+                       /* This brings the object to the head of the LRU if it
+                        * had been written to.  The only way this should
+                        * result in us waiting longer than the expected
+                        * optimal amount of time is if there was a
+                        * fence-using buffer later that was read-only.
+                        */
+                       i915_gem_object_flush_gpu_write_domain(old_obj);
+                       ret = i915_gem_object_wait_rendering(old_obj);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(old_obj);
+                               return ret;
                        }
 
-                       ret = i915_wait_request(dev, seqno);
-                       if (ret)
-                               return ret;
-                       goto try_again;
+                       break;
                }
 
                /*
                 * Zap this virtual mapping so we can set up a fence again
                 * for this object next time we need it.
                 */
-               i915_gem_release_mmap(reg->obj);
+               i915_gem_release_mmap(old_obj);
+
+               i = old_obj_priv->fence_reg;
+               reg = &dev_priv->fence_regs[i];
+
                old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+               list_del_init(&old_obj_priv->fence_list);
+
+               drm_gem_object_unreference(old_obj);
        }
 
        obj_priv->fence_reg = i;
+       list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+
        reg->obj = obj;
 
        if (IS_I965G(dev))
@@ -2342,6 +2356,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 
        dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
+       list_del_init(&obj_priv->fence_list);
 }
 
 /**
@@ -3595,9 +3610,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
         * Pre-965 chips need a fence register set up in order to
         * properly handle tiled surfaces.
         */
-       if (!IS_I965G(dev) &&
-           obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
                ret = i915_gem_object_get_fence_reg(obj);
                if (ret != 0) {
                        if (ret != -EBUSY && ret != -ERESTARTSYS)
@@ -3806,6 +3819,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        obj_priv->obj = obj;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
+       INIT_LIST_HEAD(&obj_priv->fence_list);
 
        return 0;
 }
@@ -4218,15 +4232,11 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       int ret;
-
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       ret = i915_gem_idle(dev);
        drm_irq_uninstall(dev);
-
-       return ret;
+       return i915_gem_idle(dev);
 }
 
 void
@@ -4253,6 +4263,7 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
index 300aee3296c2435545702030d6e66405759e5963..f806fcc54e09d3ad5a6e262b92530d6900a8afb6 100644 (file)
@@ -59,6 +59,16 @@ find_section(struct bdb_header *bdb, int section_id)
        return NULL;
 }
 
+static u16
+get_blocksize(void *p)
+{
+       u16 *block_ptr, block_size;
+
+       block_ptr = (u16 *)((char *)p - 2);
+       block_size = *block_ptr;
+       return block_size;
+}
+
 static void
 fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
                        struct lvds_dvo_timing *dvo_timing)
@@ -214,6 +224,41 @@ parse_general_features(struct drm_i915_private *dev_priv,
        }
 }
 
+static void
+parse_general_definitions(struct drm_i915_private *dev_priv,
+                         struct bdb_header *bdb)
+{
+       struct bdb_general_definitions *general;
+       const int crt_bus_map_table[] = {
+               GPIOB,
+               GPIOA,
+               GPIOC,
+               GPIOD,
+               GPIOE,
+               GPIOF,
+       };
+
+       /* Set sensible defaults in case we can't find the general block
+          or it is the wrong chipset */
+       dev_priv->crt_ddc_bus = -1;
+
+       general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+       if (general) {
+               u16 block_size = get_blocksize(general);
+               if (block_size >= sizeof(*general)) {
+                       int bus_pin = general->crt_ddc_gmbus_pin;
+                       DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
+                       if ((bus_pin >= 1) && (bus_pin <= 6)) {
+                               dev_priv->crt_ddc_bus =
+                                       crt_bus_map_table[bus_pin-1];
+                       }
+               } else {
+                       DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
+                                 block_size);
+               }
+       }
+}
+
 static void
 parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                       struct bdb_header *bdb)
@@ -222,7 +267,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
        struct bdb_general_definitions *p_defs;
        struct child_device_config *p_child;
        int i, child_device_num, count;
-       u16     block_size, *block_ptr;
+       u16     block_size;
 
        p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (!p_defs) {
@@ -240,8 +285,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                return;
        }
        /* get the block size of general definitions */
-       block_ptr = (u16 *)((char *)p_defs - 2);
-       block_size = *block_ptr;
+       block_size = get_blocksize(p_defs);
        /* get the number of child device */
        child_device_num = (block_size - sizeof(*p_defs)) /
                                sizeof(*p_child);
@@ -362,6 +406,7 @@ intel_init_bios(struct drm_device *dev)
 
        /* Grab useful general definitions */
        parse_general_features(dev_priv, bdb);
+       parse_general_definitions(dev_priv, bdb);
        parse_lfp_panel_data(dev_priv, bdb);
        parse_sdvo_panel_data(dev_priv, bdb);
        parse_sdvo_device_mapping(dev_priv, bdb);
index 4cf8e2e88a40eb56aae2052fedab7af3c03b45c4..590f81c8f59482b6e702d8b8191211e79cd8987b 100644 (file)
@@ -508,6 +508,7 @@ void intel_crt_init(struct drm_device *dev)
 {
        struct drm_connector *connector;
        struct intel_output *intel_output;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 i2c_reg;
 
        intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
@@ -527,8 +528,12 @@ void intel_crt_init(struct drm_device *dev)
        /* Set up the DDC bus. */
        if (IS_IGDNG(dev))
                i2c_reg = PCH_GPIOA;
-       else
+       else {
                i2c_reg = GPIOA;
+               /* Use VBT information for CRT DDC if available */
+               if (dev_priv->crt_ddc_bus != -1)
+                       i2c_reg = dev_priv->crt_ddc_bus;
+       }
        intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
        if (!intel_output->ddc_bus) {
                dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
@@ -537,6 +542,10 @@ void intel_crt_init(struct drm_device *dev)
        }
 
        intel_output->type = INTEL_OUTPUT_ANALOG;
+       intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                  (1 << INTEL_ANALOG_CLONE_BIT) |
+                                  (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+       intel_output->crtc_mask = (1 << 0) | (1 << 1);
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
index d6fce2133413e5431bf6abda64f9e260cfca8d77..748ed50c55ca9c67683dcbdca233f6fb4aaa6e41 100644 (file)
@@ -666,7 +666,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
        intel_clock_t clock;
        int err = target;
 
-       if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
            (I915_READ(LVDS)) != 0) {
                /*
                 * For LVDS, if the panel is on, just rely on its current
@@ -2005,7 +2005,21 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
        return;
 }
 
-const static int latency_ns = 3000; /* default for non-igd platforms */
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ *   - memory configuration (speed, channels)
+ *   - chipset
+ *   - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value.  It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+const static int latency_ns = 5000;
 
 static int intel_get_fifo_size(struct drm_device *dev, int plane)
 {
@@ -2396,7 +2410,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                if (is_sdvo) {
                        dpll |= DPLL_DVO_HIGH_SPEED;
                        sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
-                       if (IS_I945G(dev) || IS_I945GM(dev))
+                       if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                                dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
                        else if (IS_IGDNG(dev))
                                dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
@@ -3170,7 +3184,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
 
         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                struct intel_output *intel_output = to_intel_output(connector);
-               if (type_mask & (1 << intel_output->type))
+               if (type_mask & intel_output->clone_mask)
                        index_mask |= (1 << entry);
                entry++;
        }
@@ -3218,30 +3232,30 @@ static void intel_setup_outputs(struct drm_device *dev)
                        intel_dp_init(dev, PCH_DP_D);
 
        } else if (IS_I9XX(dev)) {
-               int found;
-               u32 reg;
+               bool found = false;
 
                if (I915_READ(SDVOB) & SDVO_DETECTED) {
                        found = intel_sdvo_init(dev, SDVOB);
                        if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
                                intel_hdmi_init(dev, SDVOB);
+
                        if (!found && SUPPORTS_INTEGRATED_DP(dev))
                                intel_dp_init(dev, DP_B);
                }
 
                /* Before G4X SDVOC doesn't have its own detect register */
-               if (IS_G4X(dev))
-                       reg = SDVOC;
-               else
-                       reg = SDVOB;
 
-               if (I915_READ(reg) & SDVO_DETECTED) {
+               if (I915_READ(SDVOB) & SDVO_DETECTED)
                        found = intel_sdvo_init(dev, SDVOC);
-                       if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+
+               if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
+
+                       if (SUPPORTS_INTEGRATED_HDMI(dev))
                                intel_hdmi_init(dev, SDVOC);
-                       if (!found && SUPPORTS_INTEGRATED_DP(dev))
+                       if (SUPPORTS_INTEGRATED_DP(dev))
                                intel_dp_init(dev, DP_C);
                }
+
                if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
                        intel_dp_init(dev, DP_D);
        } else
@@ -3253,51 +3267,10 @@ static void intel_setup_outputs(struct drm_device *dev)
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                struct intel_output *intel_output = to_intel_output(connector);
                struct drm_encoder *encoder = &intel_output->enc;
-               int crtc_mask = 0, clone_mask = 0;
 
-               /* valid crtcs */
-               switch(intel_output->type) {
-               case INTEL_OUTPUT_HDMI:
-                       crtc_mask = ((1 << 0)|
-                                    (1 << 1));
-                       clone_mask = ((1 << INTEL_OUTPUT_HDMI));
-                       break;
-               case INTEL_OUTPUT_DVO:
-               case INTEL_OUTPUT_SDVO:
-                       crtc_mask = ((1 << 0)|
-                                    (1 << 1));
-                       clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
-                                     (1 << INTEL_OUTPUT_DVO) |
-                                     (1 << INTEL_OUTPUT_SDVO));
-                       break;
-               case INTEL_OUTPUT_ANALOG:
-                       crtc_mask = ((1 << 0)|
-                                    (1 << 1));
-                       clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
-                                     (1 << INTEL_OUTPUT_DVO) |
-                                     (1 << INTEL_OUTPUT_SDVO));
-                       break;
-               case INTEL_OUTPUT_LVDS:
-                       crtc_mask = (1 << 1);
-                       clone_mask = (1 << INTEL_OUTPUT_LVDS);
-                       break;
-               case INTEL_OUTPUT_TVOUT:
-                       crtc_mask = ((1 << 0) |
-                                    (1 << 1));
-                       clone_mask = (1 << INTEL_OUTPUT_TVOUT);
-                       break;
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       crtc_mask = ((1 << 0) |
-                                    (1 << 1));
-                       clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
-                       break;
-               case INTEL_OUTPUT_EDP:
-                       crtc_mask = (1 << 1);
-                       clone_mask = (1 << INTEL_OUTPUT_EDP);
-                       break;
-               }
-               encoder->possible_crtcs = crtc_mask;
-               encoder->possible_clones = intel_connector_clones(dev, clone_mask);
+               encoder->possible_crtcs = intel_output->crtc_mask;
+               encoder->possible_clones = intel_connector_clones(dev,
+                                               intel_output->clone_mask);
        }
 }
 
index a6ff15ac548aa9074eec2647ce377a2290dc58c0..2b914d73207681b15fec693677fa2866d437b77f 100644 (file)
@@ -1254,6 +1254,18 @@ intel_dp_init(struct drm_device *dev, int output_reg)
        else
                intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
 
+       if (output_reg == DP_B)
+               intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
+       else if (output_reg == DP_C)
+               intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
+       else if (output_reg == DP_D)
+               intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+
+       if (IS_eDP(intel_output)) {
+               intel_output->crtc_mask = (1 << 1);
+               intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+       } else
+               intel_output->crtc_mask = (1 << 0) | (1 << 1);
        connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
index d6f92ea1b5538fc0928119b600f4eaa241f66ada..26a6227c15fe7c0f20f9f93594ba93254b7b3301 100644 (file)
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
 
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
 #define INTEL_DVO_CHIP_TMDS 2
@@ -86,6 +105,8 @@ struct intel_output {
        bool needs_tv_clock;
        void *dev_priv;
        void (*hot_plug)(struct intel_output *);
+       int crtc_mask;
+       int clone_mask;
 };
 
 struct intel_crtc {
index 13bff20930e89f9a5bd2d83b3a4dace630252cf5..a4d2606de778c3d419713c3d8552290d828826c0 100644 (file)
@@ -435,14 +435,20 @@ void intel_dvo_init(struct drm_device *dev)
                        continue;
 
                intel_output->type = INTEL_OUTPUT_DVO;
+               intel_output->crtc_mask = (1 << 0) | (1 << 1);
                switch (dvo->type) {
                case INTEL_DVO_CHIP_TMDS:
+                       intel_output->clone_mask =
+                               (1 << INTEL_DVO_TMDS_CLONE_BIT) |
+                               (1 << INTEL_ANALOG_CLONE_BIT);
                        drm_connector_init(dev, connector,
                                           &intel_dvo_connector_funcs,
                                           DRM_MODE_CONNECTOR_DVII);
                        encoder_type = DRM_MODE_ENCODER_TMDS;
                        break;
                case INTEL_DVO_CHIP_LVDS:
+                       intel_output->clone_mask =
+                               (1 << INTEL_DVO_LVDS_CLONE_BIT);
                        drm_connector_init(dev, connector,
                                           &intel_dvo_connector_funcs,
                                           DRM_MODE_CONNECTOR_LVDS);
index 1842290cded3f074883a155269ceed3ae2e0b749..fa304e136010b0cef680590adbfc606b1776dcfa 100644 (file)
@@ -230,22 +230,28 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
+       intel_output->crtc_mask = (1 << 0) | (1 << 1);
 
        /* Set up the DDC bus. */
-       if (sdvox_reg == SDVOB)
+       if (sdvox_reg == SDVOB) {
+               intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
                intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
-       else if (sdvox_reg == SDVOC)
+       } else if (sdvox_reg == SDVOC) {
+               intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
                intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
-       else if (sdvox_reg == HDMIB)
+       } else if (sdvox_reg == HDMIB) {
+               intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
                intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
                                                                "HDMIB");
-       else if (sdvox_reg == HDMIC)
+       } else if (sdvox_reg == HDMIC) {
+               intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
                intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
                                                                "HDMIC");
-       else if (sdvox_reg == HDMID)
+       } else if (sdvox_reg == HDMID) {
+               intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
                intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
                                                                "HDMID");
-
+       }
        if (!intel_output->ddc_bus)
                goto err_connector;
 
index 3f445a80c552908b251cfa9d0d688c653a2f10ab..8df02ef892617ed7a47b8ab9d88665b675cbb046 100644 (file)
@@ -916,6 +916,8 @@ void intel_lvds_init(struct drm_device *dev)
        drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
        intel_output->type = INTEL_OUTPUT_LVDS;
 
+       intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+       intel_output->crtc_mask = (1 << 1);
        drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
index 5371d9332554fe9716bb5bd8403bb177b4829fd3..d3b74ba62b4a07bb1702eca5e62f28dcea017ade 100644 (file)
@@ -1458,7 +1458,7 @@ intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
                (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
                caps++;
        if (sdvo_priv->caps.output_flags &
-               (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0))
+               (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
                caps++;
        if (sdvo_priv->caps.output_flags &
                (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
@@ -1967,6 +1967,9 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
                        intel_sdvo_set_colorimetry(intel_output,
                                                   SDVO_COLORIMETRY_RGB256);
                        connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+                       intel_output->clone_mask =
+                                       (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                       (1 << INTEL_ANALOG_CLONE_BIT);
                }
        } else if (flags & SDVO_OUTPUT_SVID0) {
 
@@ -1975,11 +1978,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
                connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
                sdvo_priv->is_tv = true;
                intel_output->needs_tv_clock = true;
+               intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
        } else if (flags & SDVO_OUTPUT_RGB0) {
 
                sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
                encoder->encoder_type = DRM_MODE_ENCODER_DAC;
                connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+               intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                       (1 << INTEL_ANALOG_CLONE_BIT);
        } else if (flags & SDVO_OUTPUT_RGB1) {
 
                sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
@@ -1991,12 +1997,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
                encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
                connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
                sdvo_priv->is_lvds = true;
+               intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+                                       (1 << INTEL_SDVO_LVDS_CLONE_BIT);
        } else if (flags & SDVO_OUTPUT_LVDS1) {
 
                sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
                encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
                connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
                sdvo_priv->is_lvds = true;
+               intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+                                       (1 << INTEL_SDVO_LVDS_CLONE_BIT);
        } else {
 
                unsigned char bytes[2];
@@ -2009,6 +2019,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
                                  bytes[0], bytes[1]);
                ret = false;
        }
+       intel_output->crtc_mask = (1 << 0) | (1 << 1);
 
        if (ret && registered)
                ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
index da4ab4dc16306b6fc4b7727da0954f7736a290cb..5b1c9e9fdba04b89044b5131b00477c66b230c41 100644 (file)
@@ -1718,6 +1718,7 @@ intel_tv_init(struct drm_device *dev)
        if (!intel_output) {
                return;
        }
+
        connector = &intel_output->base;
 
        drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1729,6 +1730,8 @@ intel_tv_init(struct drm_device *dev)
        drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
        tv_priv = (struct intel_tv_priv *)(intel_output + 1);
        intel_output->type = INTEL_OUTPUT_TVOUT;
+       intel_output->crtc_mask = (1 << 0) | (1 << 1);
+       intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
        intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
        intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
        intel_output->dev_priv = tv_priv;
index 053f4ec397f76b075e96833d46a1e62bc37acad7..051bca6e3a4f2981d1f580d7bbc174c43ac586d3 100644 (file)
@@ -995,7 +995,7 @@ static const unsigned r300_reg_safe_bm[159] = {
        0x00000000, 0x00000000, 0x00000000, 0x00000000,
        0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
        0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
+       0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
 };
 
 static int r300_packet0_check(struct radeon_cs_parser *p,
index 7ca6c13569b5045a7ed3c71fb642e913e33604cf..93d8f88893024088624e007e36ec2bb9b106eeed 100644 (file)
@@ -266,6 +266,7 @@ static struct radeon_asic rs400_asic = {
 /*
  * rs600.
  */
+int rs600_init(struct radeon_device *dev);
 void rs600_errata(struct radeon_device *rdev);
 void rs600_vram_info(struct radeon_device *rdev);
 int rs600_mc_init(struct radeon_device *rdev);
@@ -281,7 +282,7 @@ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs600_bandwidth_update(struct radeon_device *rdev);
 static struct radeon_asic rs600_asic = {
-       .init = &r300_init,
+       .init = &rs600_init,
        .errata = &rs600_errata,
        .vram_info = &rs600_vram_info,
        .gpu_reset = &r300_gpu_reset,
@@ -316,7 +317,6 @@ static struct radeon_asic rs600_asic = {
 /*
  * rs690,rs740
  */
-int rs690_init(struct radeon_device *rdev);
 void rs690_errata(struct radeon_device *rdev);
 void rs690_vram_info(struct radeon_device *rdev);
 int rs690_mc_init(struct radeon_device *rdev);
@@ -325,7 +325,7 @@ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs690_bandwidth_update(struct radeon_device *rdev);
 static struct radeon_asic rs690_asic = {
-       .init = &rs690_init,
+       .init = &rs600_init,
        .errata = &rs690_errata,
        .vram_info = &rs690_vram_info,
        .gpu_reset = &r300_gpu_reset,
index 7e8ce983a9089e1831d311593ce59abfdbbf61c2..02fd11aad6a28bed2f30eb111ec0a187d114cb63 100644 (file)
@@ -409,3 +409,68 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
                ((reg) & RS600_MC_ADDR_MASK));
        WREG32(RS600_MC_DATA, v);
 }
+
+static const unsigned rs600_reg_safe_bm[219] = {
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+       0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
+       0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
+       0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
+       0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
+       0x00000000, 0x0000C100, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+};
+
+int rs600_init(struct radeon_device *rdev)
+{
+       rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
+       rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+       return 0;
+}
index bc6b7c5339bc3d6a0bdc2c94cfa3d89510dcd5a8..879882533e45a121e47d7eb8340a76b508d5330a 100644 (file)
@@ -653,67 +653,3 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
        WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
 }
 
-static const unsigned rs690_reg_safe_bm[219] = {
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0x17FF1FFF,0xFFFFFFFC,0xFFFFFFFF,0xFF30FFBF,
-       0xFFFFFFF8,0xC3E6FFFF,0xFFFFF6DF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFF03F,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFEFCE,0xF00EBFFF,0x007C0000,
-       0xF0000078,0xFF000009,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFF7FF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFC78,0xFFFFFFFF,0xFFFFFFFE,0xFFFFFFFF,
-       0x38FF8F50,0xFFF88082,0xF000000C,0xFAE009FF,
-       0x0000FFFF,0xFFFFFFFF,0xFFFFFFFF,0x00000000,
-       0x00000000,0x0000C100,0x00000000,0x00000000,
-       0x00000000,0x00000000,0x00000000,0x00000000,
-       0x00000000,0xFFFF0000,0xFFFFFFFF,0xFF80FFFF,
-       0x00000000,0x00000000,0x00000000,0x00000000,
-       0x0003FC01,0xFFFFFFF8,0xFE800B19,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-       0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-};
-
-int rs690_init(struct radeon_device *rdev)
-{
-       rdev->config.r300.reg_safe_bm = rs690_reg_safe_bm;
-       rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs690_reg_safe_bm);
-       return 0;
-}
index 31a7f668ae5af2c130c2657a3a7b992f18d5a8c1..0566fb67e4607b4c50d335df74a5ef25d9d55f1a 100644 (file)
@@ -508,7 +508,7 @@ static const unsigned r500_reg_safe_bm[219] = {
        0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
        0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
        0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
+       0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
        0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
        0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
        0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
index 527908ff298c408c0f86fdf24363ea9c70d6e652..063b933d864a81f04acc0bbabdce2ec0297b7088 100644 (file)
@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = {
        PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
        PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
        PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+       PCMCIA_DEVICE_PROD_ID12("CNF   ", "CD-ROM", 0x46d7db81, 0x66536591),
        PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
        PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
        PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
index 8f9509e1ebf76494217b7d48f6b7bdef307ac3b4..55d093a36ae48a29a09877176b1448e279555d00 100644 (file)
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                 * In either case, must tell the provider to reject.
                 */
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
+               cm_id->device->iwcm->reject(cm_id, NULL, 0);
                break;
        case IW_CM_STATE_CONN_SENT:
        case IW_CM_STATE_DESTROYING:
index de922a04ca2dbd5ca4f8696410750735bdaee681..7522008fda86880f759128e694d9df0cda68c764 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
+ * Copyright (c) 2009 HNR Consulting. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");
 MODULE_AUTHOR("Hal Rosenstock");
 MODULE_AUTHOR("Sean Hefty");
 
+int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
+int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
+
+module_param_named(send_queue_size, mad_sendq_size, int, 0444);
+MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
+module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
+MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+
 static struct kmem_cache *ib_mad_cache;
 
 static struct list_head ib_mad_port_list;
 static u32 ib_mad_client_id = 0;
 
 /* Port list lock */
-static spinlock_t ib_mad_port_list_lock;
-
+static DEFINE_SPINLOCK(ib_mad_port_list_lock);
 
 /* Forward declarations */
 static int method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
        unsigned long delay;
 
        if (list_empty(&mad_agent_priv->wait_list)) {
-               cancel_delayed_work(&mad_agent_priv->timed_work);
+               __cancel_delayed_work(&mad_agent_priv->timed_work);
        } else {
                mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
                                         struct ib_mad_send_wr_private,
@@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
                if (time_after(mad_agent_priv->timeout,
                               mad_send_wr->timeout)) {
                        mad_agent_priv->timeout = mad_send_wr->timeout;
-                       cancel_delayed_work(&mad_agent_priv->timed_work);
+                       __cancel_delayed_work(&mad_agent_priv->timed_work);
                        delay = mad_send_wr->timeout - jiffies;
                        if ((long)delay <= 0)
                                delay = 1;
@@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
 
        /* Reschedule a work item if we have a shorter timeout */
        if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
-               cancel_delayed_work(&mad_agent_priv->timed_work);
+               __cancel_delayed_work(&mad_agent_priv->timed_work);
                queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
                                   &mad_agent_priv->timed_work, delay);
        }
@@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
        qp_init_attr.send_cq = qp_info->port_priv->cq;
        qp_init_attr.recv_cq = qp_info->port_priv->cq;
        qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
-       qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
-       qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
+       qp_init_attr.cap.max_send_wr = mad_sendq_size;
+       qp_init_attr.cap.max_recv_wr = mad_recvq_size;
        qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
        qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
        qp_init_attr.qp_type = qp_type;
@@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
                goto error;
        }
        /* Use minimum queue sizes unless the CQ is resized */
-       qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
-       qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
+       qp_info->send_queue.max_active = mad_sendq_size;
+       qp_info->recv_queue.max_active = mad_recvq_size;
        return 0;
 
 error:
@@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
        init_mad_qp(port_priv, &port_priv->qp_info[0]);
        init_mad_qp(port_priv, &port_priv->qp_info[1]);
 
-       cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
+       cq_size = (mad_sendq_size + mad_recvq_size) * 2;
        port_priv->cq = ib_create_cq(port_priv->device,
                                     ib_mad_thread_completion_handler,
                                     NULL, port_priv, cq_size, 0);
@@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)
 {
        int ret;
 
-       spin_lock_init(&ib_mad_port_list_lock);
+       mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
+       mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
+
+       mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
+       mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
 
        ib_mad_cache = kmem_cache_create("ib_mad",
                                         sizeof(struct ib_mad_private),
@@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
 
 module_init(ib_mad_init_module);
 module_exit(ib_mad_cleanup_module);
-
index 05ce331733b069413c69d1ff7d9a2c00f4231cd2..9430ab4969c55505d0cbf59ddafeda741a8892a2 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
  * Copyright (c) 2005 Intel Corporation. All rights reserved.
  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2009 HNR Consulting. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -49,6 +50,8 @@
 /* QP and CQ parameters */
 #define IB_MAD_QP_SEND_SIZE    128
 #define IB_MAD_QP_RECV_SIZE    512
+#define IB_MAD_QP_MIN_SIZE     64
+#define IB_MAD_QP_MAX_SIZE     8192
 #define IB_MAD_SEND_REQ_MAX_SG 2
 #define IB_MAD_RECV_REQ_MAX_SG 1
 
index 107f170c57cdb12cc05786551b21d29b01ecc6de..8d82ba17135366317ba2d3e856c78c4cfb97829a 100644 (file)
@@ -106,6 +106,8 @@ struct mcast_group {
        struct ib_sa_query      *query;
        int                     query_id;
        u16                     pkey_index;
+       u8                      leave_state;
+       int                     retries;
 };
 
 struct mcast_member {
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
 
        rec = group->rec;
        rec.join_state = leave_state;
+       group->leave_state = leave_state;
 
        ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
                                       port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
 {
        struct mcast_group *group = context;
 
-       mcast_work_handler(&group->work);
+       if (status && group->retries > 0 &&
+           !send_leave(group, group->leave_state))
+               group->retries--;
+       else
+               mcast_work_handler(&group->work);
 }
 
 static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
        if (!group)
                return NULL;
 
+       group->retries = 3;
        group->port = port;
        group->rec.mgid = *mgid;
        group->pkey_index = MCAST_INVALID_PKEY_INDEX;
index 1865049e80f7548be1c814e6a9bfc73229f5195a..82543716d59ef74ce57029e03fd1e09aef6032a3 100644 (file)
@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
        .remove = ib_sa_remove_one
 };
 
-static spinlock_t idr_lock;
+static DEFINE_SPINLOCK(idr_lock);
 static DEFINE_IDR(query_idr);
 
-static spinlock_t tid_lock;
+static DEFINE_SPINLOCK(tid_lock);
 static u32 tid;
 
 #define PATH_REC_FIELD(field) \
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
 {
        int ret;
 
-       spin_lock_init(&idr_lock);
-       spin_lock_init(&tid_lock);
-
        get_random_bytes(&tid, sizeof tid);
 
        ret = ib_register_client(&sa_client);
index 87236753bce9b7a93f9b41b30297e7d4c5a4d432..5855e4405d9bf2ca2b04a0fac5aebabed46c538b 100644 (file)
@@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
        hop_cnt = smp->hop_cnt;
 
        /* See section 14.2.2.2, Vol 1 IB spec */
+       /* C14-6 -- valid hop_cnt values are from 0 to 63 */
+       if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
+               return IB_SMI_DISCARD;
+
        if (!ib_get_smp_direction(smp)) {
                /* C14-9:1 */
                if (hop_cnt && hop_ptr == 0) {
@@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
        hop_cnt = smp->hop_cnt;
 
        /* See section 14.2.2.2, Vol 1 IB spec */
+       /* C14-6 -- valid hop_cnt values are from 0 to 63 */
+       if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
+               return IB_SMI_DISCARD;
+
        if (!ib_get_smp_direction(smp)) {
                /* C14-9:1 -- sender should have incremented hop_ptr */
                if (hop_cnt && hop_ptr == 0)
index eb36a81dd09bff2d544675de4c37c447614d3262..d3fff9e008a3e01f1f81795101738ae236b02dcb 100644 (file)
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
 DEFINE_IDR(ib_uverbs_qp_idr);
 DEFINE_IDR(ib_uverbs_srq_idr);
 
-static spinlock_t map_lock;
+static DEFINE_SPINLOCK(map_lock);
 static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
 static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
 
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
 
        if (hdr.command < 0                             ||
            hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
-           !uverbs_cmd_table[hdr.command]              ||
-           !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+           !uverbs_cmd_table[hdr.command])
                return -EINVAL;
 
        if (!file->ucontext &&
            hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
                return -EINVAL;
 
+       if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+               return -ENOSYS;
+
        return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
                                             hdr.in_words * 4, hdr.out_words * 4);
 }
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
 {
        int ret;
 
-       spin_lock_init(&map_lock);
-
        ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
                                     "infiniband_verbs");
        if (ret) {
index 0cfbb6d2f762b5699c4edef953ab5757196dae5c..8250740c94b09bce8f5c73c6928d895cd24a28c3 100644 (file)
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
 
 static void c2_print_macaddr(struct net_device *netdev)
 {
-       pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
-               "IRQ %u\n", netdev->name,
-               netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
-               netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
-               netdev->irq);
+       pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
 }
 
 static void c2_set_rxbufsize(struct c2_port *c2_port)
index f1948fad85d7cd1586c009bd7e89d53f2c30375b..ad723bd8bf498090560c72bc78a1a06dc72e9730 100644 (file)
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
        /* Register pseudo network device */
        dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
        if (!dev->pseudo_netdev)
-               goto out3;
+               goto out;
 
        ret = register_netdev(dev->pseudo_netdev);
        if (ret)
-               goto out2;
+               goto out_free_netdev;
 
        pr_debug("%s:%u\n", __func__, __LINE__);
        strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
        dev->ibdev.post_recv = c2_post_receive;
 
        dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+       if (dev->ibdev.iwcm == NULL) {
+               ret = -ENOMEM;
+               goto out_unregister_netdev;
+       }
        dev->ibdev.iwcm->add_ref = c2_add_ref;
        dev->ibdev.iwcm->rem_ref = c2_rem_ref;
        dev->ibdev.iwcm->get_qp = c2_get_qp;
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
 
        ret = ib_register_device(&dev->ibdev);
        if (ret)
-               goto out1;
+               goto out_free_iwcm;
 
        for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
                ret = device_create_file(&dev->ibdev.dev,
                                               c2_dev_attributes[i]);
                if (ret)
-                       goto out0;
+                       goto out_unregister_ibdev;
        }
-       goto out3;
+       goto out;
 
-out0:
+out_unregister_ibdev:
        ib_unregister_device(&dev->ibdev);
-out1:
+out_free_iwcm:
+       kfree(dev->ibdev.iwcm);
+out_unregister_netdev:
        unregister_netdev(dev->pseudo_netdev);
-out2:
+out_free_netdev:
        free_netdev(dev->pseudo_netdev);
-out3:
+out:
        pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
        return ret;
 }
index 62f9cf2f94ec647756dcd5f2ae06c9f01a9eafac..72ed3396b721e36d528f3d6191034493447c33ae 100644 (file)
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
        wqe->qpcaps = attr->qpcaps;
        wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
        wqe->rqe_count = cpu_to_be16(attr->rqe_count);
-       wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
+       wqe->flags_rtr_type = cpu_to_be16(attr->flags |
+                                         V_RTR_TYPE(attr->rtr_type) |
+                                         V_CHAN(attr->chan));
        wqe->ord = cpu_to_be32(attr->ord);
        wqe->ird = cpu_to_be32(attr->ird);
        wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1032,6 +1034,7 @@ err3:
 err2:
        cxio_hal_destroy_ctrl_qp(rdev_p);
 err1:
+       rdev_p->t3cdev_p->ulp = NULL;
        list_del(&rdev_p->entry);
        return err;
 }
index 32e3b1461d81d551f3b1aa2a602c9449d2597c8c..a197a5b7ac7fc74836aaf2ece28ff93c4271428d 100644 (file)
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
 #define V_RTR_TYPE(x)  ((x) << S_RTR_TYPE)
 #define G_RTR_TYPE(x)  ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
 
+#define S_CHAN         4
+#define M_CHAN         0x3
+#define V_CHAN(x)      ((x) << S_CHAN)
+#define G_CHAN(x)      ((((x) >> S_CHAN)) & M_CHAN)
+
 struct t3_rdma_init_attr {
        u32 tid;
        u32 qpid;
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
        u16 flags;
        u16 rqe_count;
        u32 irs;
+       u32 chan;
 };
 
 struct t3_rdma_init_wr {
index 26fc0a4eaa749f91477a711be17710e98dc36ff1..b0ea0105ddf6c2caa01dda9716d7f8f38e21642b 100644 (file)
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
 
 static void open_rnic_dev(struct t3cdev *);
 static void close_rnic_dev(struct t3cdev *);
-static void iwch_err_handler(struct t3cdev *, u32, u32);
+static void iwch_event_handler(struct t3cdev *, u32, u32);
 
 struct cxgb3_client t3c_client = {
        .name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
        .remove = close_rnic_dev,
        .handlers = t3c_handlers,
        .redirect = iwch_ep_redirect,
-       .err_handler = iwch_err_handler
+       .event_handler = iwch_event_handler
 };
 
 static LIST_HEAD(dev_list);
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
 static void open_rnic_dev(struct t3cdev *tdev)
 {
        struct iwch_dev *rnicp;
-       static int vers_printed;
 
        PDBG("%s t3cdev %p\n", __func__,  tdev);
-       if (!vers_printed++)
-               printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
+       printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
                       DRV_VERSION);
        rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
        if (!rnicp) {
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
        mutex_unlock(&dev_mutex);
 }
 
-static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
+static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
 {
        struct cxio_rdev *rdev = tdev->ulp;
-       struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
+       struct iwch_dev *rnicp;
        struct ib_event event;
+       u32    portnum = port_id + 1;
 
-       if (status == OFFLOAD_STATUS_DOWN) {
+       if (!rdev)
+               return;
+       rnicp = rdev_to_iwch_dev(rdev);
+       switch (evt) {
+       case OFFLOAD_STATUS_DOWN: {
                rdev->flags = CXIO_ERROR_FATAL;
-
-               event.device = &rnicp->ibdev;
                event.event  = IB_EVENT_DEVICE_FATAL;
-               event.element.port_num = 0;
-               ib_dispatch_event(&event);
+               break;
+               }
+       case OFFLOAD_PORT_DOWN: {
+               event.event  = IB_EVENT_PORT_ERR;
+               break;
+               }
+       case OFFLOAD_PORT_UP: {
+               event.event  = IB_EVENT_PORT_ACTIVE;
+               break;
+               }
        }
 
+       event.device = &rnicp->ibdev;
+       event.element.port_num = portnum;
+       ib_dispatch_event(&event);
+
        return;
 }
 
index 52d7bb0c2a126cbf8a867cb522e9062c39015a5b..66b41351910ad390d5ec70dbb18c5ab99a196f45 100644 (file)
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
        ep = container_of(container_of(kref, struct iwch_ep_common, kref),
                          struct iwch_ep, com);
        PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
-       if (ep->com.flags & RELEASE_RESOURCES) {
+       if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
                cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
                dst_release(ep->dst);
                l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
 static void release_ep_resources(struct iwch_ep *ep)
 {
        PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
-       ep->com.flags |= RELEASE_RESOURCES;
+       set_bit(RELEASE_RESOURCES, &ep->com.flags);
        put_ep(&ep->com);
 }
 
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
        event.private_data_len = ep->plen;
        event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
        event.provider_data = ep;
-       if (state_read(&ep->parent_ep->com) != DEAD)
+       if (state_read(&ep->parent_ep->com) != DEAD) {
+               get_ep(&ep->com);
                ep->parent_ep->com.cm_id->event_handler(
                                                ep->parent_ep->com.cm_id,
                                                &event);
+       }
        put_ep(&ep->parent_ep->com);
        ep->parent_ep = NULL;
 }
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
         * We get 2 abort replies from the HW.  The first one must
         * be ignored except for scribbling that we need one more.
         */
-       if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) {
-               ep->com.flags |= ABORT_REQ_IN_PROGRESS;
+       if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
                return CPL_RET_BUF_DONE;
        }
 
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                /*
                 * We're gonna mark this puppy DEAD, but keep
                 * the reference on it until the ULP accepts or
-                * rejects the CR.
+                * rejects the CR. Also wake up anyone waiting
+                * in rdma connection migration (see iwch_accept_cr()).
                 */
                __state_set(&ep->com, CLOSING);
-               get_ep(&ep->com);
+               ep->com.rpl_done = 1;
+               ep->com.rpl_err = -ECONNRESET;
+               PDBG("waking up ep %p\n", ep);
+               wake_up(&ep->com.waitq);
                break;
        case MPA_REP_SENT:
                __state_set(&ep->com, CLOSING);
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
         * We get 2 peer aborts from the HW.  The first one must
         * be ignored except for scribbling that we need one more.
         */
-       if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) {
-               ep->com.flags |= PEER_ABORT_IN_PROGRESS;
+       if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
                return CPL_RET_BUF_DONE;
        }
 
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                /*
                 * We're gonna mark this puppy DEAD, but keep
                 * the reference on it until the ULP accepts or
-                * rejects the CR.
+                * rejects the CR. Also wake up anyone waiting
+                * in rdma connection migration (see iwch_accept_cr()).
                 */
-               get_ep(&ep->com);
+               ep->com.rpl_done = 1;
+               ep->com.rpl_err = -ECONNRESET;
+               PDBG("waking up ep %p\n", ep);
+               wake_up(&ep->com.waitq);
                break;
        case MORIBUND:
        case CLOSING:
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
                err = send_mpa_reject(ep, pdata, pdata_len);
                err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
        }
+       put_ep(&ep->com);
        return 0;
 }
 
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
 
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-       if (state_read(&ep->com) == DEAD)
-               return -ECONNRESET;
+       if (state_read(&ep->com) == DEAD) {
+               err = -ECONNRESET;
+               goto err;
+       }
 
        BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
        BUG_ON(!qp);
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
            (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
                abort_connection(ep, NULL, GFP_KERNEL);
-               return -EINVAL;
+               err = -EINVAL;
+               goto err;
        }
 
        cm_id->add_ref(cm_id);
        ep->com.cm_id = cm_id;
        ep->com.qp = qp;
 
-       ep->com.rpl_done = 0;
-       ep->com.rpl_err = 0;
        ep->ird = conn_param->ird;
        ep->ord = conn_param->ord;
 
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
 
-       get_ep(&ep->com);
-
        /* bind QP to EP and move to RTS */
        attrs.mpa_attr = ep->mpa_attr;
        attrs.max_ird = ep->ird;
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        err = iwch_modify_qp(ep->com.qp->rhp,
                             ep->com.qp, mask, &attrs, 1);
        if (err)
-               goto err;
+               goto err1;
 
        /* if needed, wait for wr_ack */
        if (iwch_rqes_posted(qp)) {
                wait_event(ep->com.waitq, ep->com.rpl_done);
                err = ep->com.rpl_err;
                if (err)
-                       goto err;
+                       goto err1;
        }
 
        err = send_mpa_reply(ep, conn_param->private_data,
                             conn_param->private_data_len);
        if (err)
-               goto err;
+               goto err1;
 
 
        state_set(&ep->com, FPDU_MODE);
        established_upcall(ep);
        put_ep(&ep->com);
        return 0;
-err:
+err1:
        ep->com.cm_id = NULL;
        ep->com.qp = NULL;
        cm_id->rem_ref(cm_id);
+err:
        put_ep(&ep->com);
        return err;
 }
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
                        ep->com.state = CLOSING;
                        start_ep_timer(ep);
                }
+               set_bit(CLOSE_SENT, &ep->com.flags);
                break;
        case CLOSING:
-               close = 1;
-               if (abrupt) {
-                       stop_ep_timer(ep);
-                       ep->com.state = ABORTING;
-               } else
-                       ep->com.state = MORIBUND;
+               if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+                       close = 1;
+                       if (abrupt) {
+                               stop_ep_timer(ep);
+                               ep->com.state = ABORTING;
+                       } else
+                               ep->com.state = MORIBUND;
+               }
                break;
        case MORIBUND:
        case ABORTING:
index 43c0aea7eadc8b0b52ac1b8b3df8965c5a20a059..b9efadfffb4f64b39e991230b25e676c9a23c777 100644 (file)
@@ -145,9 +145,10 @@ enum iwch_ep_state {
 };
 
 enum iwch_ep_flags {
-       PEER_ABORT_IN_PROGRESS  = (1 << 0),
-       ABORT_REQ_IN_PROGRESS   = (1 << 1),
-       RELEASE_RESOURCES       = (1 << 2),
+       PEER_ABORT_IN_PROGRESS  = 0,
+       ABORT_REQ_IN_PROGRESS   = 1,
+       RELEASE_RESOURCES       = 2,
+       CLOSE_SENT              = 3,
 };
 
 struct iwch_ep_common {
@@ -162,7 +163,7 @@ struct iwch_ep_common {
        wait_queue_head_t waitq;
        int rpl_done;
        int rpl_err;
-       u32 flags;
+       unsigned long flags;
 };
 
 struct iwch_listen_ep {
index ec49a5cbdebbc19705968069b58d2ab9f98d7d8e..e1ec65ebb016e4c7cfbfe81ca1e2095ba614ce08 100644 (file)
@@ -39,7 +39,7 @@
 #include "iwch.h"
 #include "iwch_provider.h"
 
-static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
+static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
 {
        u32 mmid;
 
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
        mhp->attr.stag = stag;
        mmid = stag >> 8;
        mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
-       insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
        PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+       return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
 }
 
 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                      struct iwch_mr *mhp, int shift)
 {
        u32 stag;
+       int ret;
 
        if (cxio_register_phys_mem(&rhp->rdev,
                                   &stag, mhp->attr.pdid,
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                                   mhp->attr.pbl_size, mhp->attr.pbl_addr))
                return -ENOMEM;
 
-       iwch_finish_mem_reg(mhp, stag);
-
-       return 0;
+       ret = iwch_finish_mem_reg(mhp, stag);
+       if (ret)
+               cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+                      mhp->attr.pbl_addr);
+       return ret;
 }
 
 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                                        int npages)
 {
        u32 stag;
+       int ret;
 
        /* We could support this... */
        if (npages > mhp->attr.pbl_size)
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                                   mhp->attr.pbl_size, mhp->attr.pbl_addr))
                return -ENOMEM;
 
-       iwch_finish_mem_reg(mhp, stag);
+       ret = iwch_finish_mem_reg(mhp, stag);
+       if (ret)
+               cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+                      mhp->attr.pbl_addr);
 
-       return 0;
+       return ret;
 }
 
 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
index e2a63214008a90b1ddf137341bbbbc4eac2d16e4..6895523779d0d5ebb9d5eeb4f58e9d5ca42a1c0c 100644 (file)
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
        spin_lock_init(&chp->lock);
        atomic_set(&chp->refcnt, 1);
        init_waitqueue_head(&chp->wait);
-       insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+       if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
+               cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
+               kfree(chp);
+               return ERR_PTR(-ENOMEM);
+       }
 
        if (ucontext) {
                struct iwch_mm_entry *mm;
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
        mhp->attr.stag = stag;
        mmid = (stag) >> 8;
        mhp->ibmw.rkey = stag;
-       insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+       if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+               cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
+               kfree(mhp);
+               return ERR_PTR(-ENOMEM);
+       }
        PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
        return &(mhp->ibmw);
 }
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
        struct iwch_mr *mhp;
        u32 mmid;
        u32 stag = 0;
-       int ret;
+       int ret = 0;
 
        php = to_iwch_pd(pd);
        rhp = php->rhp;
        mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
        if (!mhp)
-               return ERR_PTR(-ENOMEM);
+               goto err;
 
        mhp->rhp = rhp;
        ret = iwch_alloc_pbl(mhp, pbl_depth);
-       if (ret) {
-               kfree(mhp);
-               return ERR_PTR(ret);
-       }
+       if (ret)
+               goto err1;
        mhp->attr.pbl_size = pbl_depth;
        ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
                                 mhp->attr.pbl_size, mhp->attr.pbl_addr);
-       if (ret) {
-               iwch_free_pbl(mhp);
-               kfree(mhp);
-               return ERR_PTR(ret);
-       }
+       if (ret)
+               goto err2;
        mhp->attr.pdid = php->pdid;
        mhp->attr.type = TPT_NON_SHARED_MR;
        mhp->attr.stag = stag;
        mhp->attr.state = 1;
        mmid = (stag) >> 8;
        mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
-       insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+       if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+               goto err3;
+
        PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
        return &(mhp->ibmr);
+err3:
+       cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+                      mhp->attr.pbl_addr);
+err2:
+       iwch_free_pbl(mhp);
+err1:
+       kfree(mhp);
+err:
+       return ERR_PTR(ret);
 }
 
 static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
        spin_lock_init(&qhp->lock);
        init_waitqueue_head(&qhp->wait);
        atomic_set(&qhp->refcnt, 1);
-       insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid);
+
+       if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
+               cxio_destroy_qp(&rhp->rdev, &qhp->wq,
+                       ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+               kfree(qhp);
+               return ERR_PTR(-ENOMEM);
+       }
 
        if (udata) {
 
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
 bail2:
        ib_unregister_device(&dev->ibdev);
 bail1:
+       kfree(dev->ibdev.iwcm);
        return ret;
 }
 
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
                device_remove_file(&dev->ibdev.dev,
                                   iwch_class_attributes[i]);
        ib_unregister_device(&dev->ibdev);
+       kfree(dev->ibdev.iwcm);
        return;
 }
index 27bbdc8e773ae934e03b8d421a08102c7b945d63..6e86534719414ff5e5e6c859dd4fd3cbf995cfa5 100644 (file)
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
        init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
        init_attr.rqe_count = iwch_rqes_posted(qhp);
        init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
+       init_attr.chan = qhp->ep->l2t->smt_idx;
        if (peer2peer) {
                init_attr.rtr_type = RTR_READ;
                if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
index fab18a2c74a8d55f00e5d2772ef1b448e01d6e59..5b635aa5947e27822a5386cdc447f8b4a2693db3 100644 (file)
@@ -52,7 +52,7 @@
 #include "ehca_tools.h"
 #include "hcp_if.h"
 
-#define HCAD_VERSION "0028"
+#define HCAD_VERSION "0029"
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -64,7 +64,7 @@ static int ehca_hw_level      = 0;
 static int ehca_poll_all_eqs  = 1;
 
 int ehca_debug_level   = 0;
-int ehca_nr_ports      = 2;
+int ehca_nr_ports      = -1;
 int ehca_use_hp_mr     = 0;
 int ehca_port_act_time = 30;
 int ehca_static_rate   = -1;
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
                 "Hardware level (0: autosensing (default), "
                 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
 MODULE_PARM_DESC(nr_ports,
-                "number of connected ports (-1: autodetect, 1: port one only, "
-                "2: two ports (default)");
+                "number of connected ports (-1: autodetect (default), "
+                "1: port one only, 2: two ports)");
 MODULE_PARM_DESC(use_hp_mr,
                 "Use high performance MRs (default: no)");
 MODULE_PARM_DESC(port_act_time,
index 5a3d96f84c79c780b078b03b363bb847ef41006e..8fd88cd828fd657ab11ef33e551e0f1060669f80 100644 (file)
@@ -786,7 +786,11 @@ repoll:
        wc->slid = cqe->rlid;
        wc->dlid_path_bits = cqe->dlid;
        wc->src_qp = cqe->remote_qp_number;
-       wc->wc_flags = cqe->w_completion_flags;
+       /*
+        * HW has "Immed data present" and "GRH present" in bits 6 and 5.
+        * SW defines those in bits 1 and 0, so we can just shift and mask.
+        */
+       wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
        wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
        wc->sl = cqe->service_level;
 
index c568b28f4e207416762c18c69d86f27818e9d530..8c1213f8916a19bfdccab6f3dcaf08f14065c509 100644 (file)
@@ -125,14 +125,30 @@ struct ib_perf {
        u8 data[192];
 } __attribute__ ((packed));
 
+/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
+struct tcslfl {
+       u32 tc:8;
+       u32 sl:4;
+       u32 fl:20;
+} __attribute__ ((packed));
+
+/* IP Version/TC/FL packed into 32 bits, as in GRH */
+struct vertcfl {
+       u32 ver:4;
+       u32 tc:8;
+       u32 fl:20;
+} __attribute__ ((packed));
 
 static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
+                            struct ib_wc *in_wc, struct ib_grh *in_grh,
                             struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
        struct ib_perf *in_perf = (struct ib_perf *)in_mad;
        struct ib_perf *out_perf = (struct ib_perf *)out_mad;
        struct ib_class_port_info *poi =
                (struct ib_class_port_info *)out_perf->data;
+       struct tcslfl *tcslfl =
+               (struct tcslfl *)&poi->redirect_tcslfl;
        struct ehca_shca *shca =
                container_of(ibdev, struct ehca_shca, ib_device);
        struct ehca_sport *sport = &shca->sport[port_num - 1];
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
                poi->base_version = 1;
                poi->class_version = 1;
                poi->resp_time_value = 18;
-               poi->redirect_lid = sport->saved_attr.lid;
-               poi->redirect_qp = sport->pma_qp_nr;
+
+               /* copy local routing information from WC where applicable */
+               tcslfl->sl         = in_wc->sl;
+               poi->redirect_lid  =
+                       sport->saved_attr.lid | in_wc->dlid_path_bits;
+               poi->redirect_qp   = sport->pma_qp_nr;
                poi->redirect_qkey = IB_QP1_QKEY;
-               poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
+
+               ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
+                               &poi->redirect_pkey);
+
+               /* if request was globally routed, copy route info */
+               if (in_grh) {
+                       struct vertcfl *vertcfl =
+                               (struct vertcfl *)&in_grh->version_tclass_flow;
+                       memcpy(poi->redirect_gid, in_grh->dgid.raw,
+                              sizeof(poi->redirect_gid));
+                       tcslfl->tc        = vertcfl->tc;
+                       tcslfl->fl        = vertcfl->fl;
+               } else
+                       /* else only fill in default GID */
+                       ehca_query_gid(ibdev, port_num, 0,
+                                      (union ib_gid *)&poi->redirect_gid);
 
                ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
                         sport->saved_attr.lid, sport->pma_qp_nr);
@@ -183,8 +218,7 @@ perf_reply:
 
 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                     struct ib_wc *in_wc, struct ib_grh *in_grh,
-                    struct ib_mad *in_mad,
-                    struct ib_mad *out_mad)
+                    struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
        int ret;
 
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                return IB_MAD_RESULT_SUCCESS;
 
        ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
-       ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
+       ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
+                               in_mad, out_mad);
 
        return ret;
 }
index 23173982b32c1c636fdc2bbc398ad1168e3ff10d..38a287006612c055b3a1faa32800099041257c35 100644 (file)
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
                pd->port_cnt = 1;
                port_fp(fp) = pd;
                pd->port_pid = get_pid(task_pid(current));
-               strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
+               strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
                ipath_stats.sps_ports++;
                ret = 0;
        } else
index 16a702d460184f8f66e2434b720aa9842a145701..ceb98ee7866646d87ecdc6b8351622a11441facd 100644 (file)
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
        if (smp->attr_mod)
                smp->status |= IB_SMP_INVALID_FIELD;
 
-       strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+       memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
 
        return reply(smp);
 }
index ae3d7590346e850c0f5579bea15390c91e8b4764..3cb3f47a10b85753cded5f80f019517bf81e3635 100644 (file)
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
        struct mlx4_ib_alloc_ucontext_resp resp;
        int err;
 
+       if (!dev->ib_active)
+               return ERR_PTR(-EAGAIN);
+
        resp.qp_tab_size      = dev->dev->caps.num_qps;
        resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
        resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
 
 static void *mlx4_ib_add(struct mlx4_dev *dev)
 {
-       static int mlx4_ib_version_printed;
        struct mlx4_ib_dev *ibdev;
        int num_ports = 0;
        int i;
 
-       if (!mlx4_ib_version_printed) {
-               printk(KERN_INFO "%s", mlx4_ib_version);
-               ++mlx4_ib_version_printed;
-       }
+       printk_once(KERN_INFO "%s", mlx4_ib_version);
 
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
                num_ports++;
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        goto err_reg;
        }
 
+       ibdev->ib_active = true;
+
        return ibdev;
 
 err_reg:
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
                break;
 
        case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+               ibdev->ib_active = false;
                ibev.event = IB_EVENT_DEVICE_FATAL;
                break;
 
index 8a7dd6795fa0a2117f3f21311c9a1a4eccdf4c9c..3486d7675e56dfece310b090abf0eec93d140ec6 100644 (file)
@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
        spinlock_t              sm_lock;
 
        struct mutex            cap_mask_mutex;
+       bool                    ib_active;
 };
 
 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
index c4a02648c8afe78ce6553e95fcc0742ed499274b..219b10397b4d0fd64f19767adbdb3e24352f1476 100644 (file)
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
 }
 
 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
                spin_lock_irq(&send_cq->lock);
-       else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+               __acquire(&recv_cq->lock);
+       } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
                spin_lock_irq(&send_cq->lock);
                spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
        } else {
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
 }
 
 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+       __releases(&send_cq->lock) __releases(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
+               __release(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
-       else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+       else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
                spin_unlock(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
        } else {
index 65ad359fdf164e506ec9a5726ba12c5a5ee882f2..056b2a4c69700f9fdb7cb6640213804e5f850aec 100644 (file)
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
        event.device = &dev->ib_dev;
        event.event  = IB_EVENT_DEVICE_FATAL;
        event.element.port_num = 0;
+       dev->active = false;
 
        ib_dispatch_event(&event);
 
index 75671f75cac482bf67ce0a6c3692634cce7f43b5..155bc66395beaeaf5aa1476b884cb12c527715a5 100644 (file)
@@ -34,8 +34,6 @@
 #ifndef MTHCA_CONFIG_REG_H
 #define MTHCA_CONFIG_REG_H
 
-#include <asm/page.h>
-
 #define MTHCA_HCR_BASE         0x80680
 #define MTHCA_HCR_SIZE         0x0001c
 #define MTHCA_ECR_BASE         0x80700
index 9ef611f6dd36d52f531198d0a61d7ec11dbe78fd..7e6a6d64ad4eb1bee96b0d2d244daf20898ab3b6 100644 (file)
@@ -357,6 +357,7 @@ struct mthca_dev {
        struct ib_ah         *sm_ah[MTHCA_MAX_PORTS];
        spinlock_t            sm_lock;
        u8                    rate[MTHCA_MAX_PORTS];
+       bool                  active;
 };
 
 #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
index 90e4e450a12022d4186ca19f1c94c9e897e5c785..8c31fa36e95e7102a96ecf9ca6b31988b0f99640 100644 (file)
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
 
        if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
                static const char *eq_name[] = {
-                       [MTHCA_EQ_COMP]  = DRV_NAME " (comp)",
-                       [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
-                       [MTHCA_EQ_CMD]   = DRV_NAME " (cmd)"
+                       [MTHCA_EQ_COMP]  = DRV_NAME "-comp",
+                       [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
+                       [MTHCA_EQ_CMD]   = DRV_NAME "-cmd"
                };
 
                for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+                       snprintf(dev->eq_table.eq[i].irq_name,
+                                IB_DEVICE_NAME_MAX,
+                                "%s@pci:%s", eq_name[i],
+                                pci_name(dev->pdev));
                        err = request_irq(dev->eq_table.eq[i].msi_x_vector,
                                          mthca_is_memfree(dev) ?
                                          mthca_arbel_msi_x_interrupt :
                                          mthca_tavor_msi_x_interrupt,
-                                         0, eq_name[i], dev->eq_table.eq + i);
+                                         0, dev->eq_table.eq[i].irq_name,
+                                         dev->eq_table.eq + i);
                        if (err)
                                goto err_out_cmd;
                        dev->eq_table.eq[i].have_irq = 1;
                }
        } else {
+               snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
+                        DRV_NAME "@pci:%s", pci_name(dev->pdev));
                err = request_irq(dev->pdev->irq,
                                  mthca_is_memfree(dev) ?
                                  mthca_arbel_interrupt :
                                  mthca_tavor_interrupt,
-                                 IRQF_SHARED, DRV_NAME, dev);
+                                 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
                if (err)
                        goto err_out_cmd;
                dev->eq_table.have_irq = 1;
index 13da9f1d24c0a2bf9d715f32701d0cb02406cba2..b01b28987874e9145d241310347e4691695c2716 100644 (file)
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
        pci_set_drvdata(pdev, mdev);
        mdev->hca_type = hca_type;
 
+       mdev->active = true;
+
        return 0;
 
 err_unregister:
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
 static int __devinit mthca_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *id)
 {
-       static int mthca_version_printed = 0;
        int ret;
 
        mutex_lock(&mthca_device_mutex);
 
-       if (!mthca_version_printed) {
-               printk(KERN_INFO "%s", mthca_version);
-               ++mthca_version_printed;
-       }
+       printk_once(KERN_INFO "%s", mthca_version);
 
        if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
                printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
index 87ad889e367b2b6b39cfe010ec1ec28f1dd488ac..bcf7a401482015f3b5c09afdc4177bab3d10577f 100644 (file)
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
        struct mthca_ucontext           *context;
        int                              err;
 
+       if (!(to_mdev(ibdev)->active))
+               return ERR_PTR(-EAGAIN);
+
        memset(&uresp, 0, sizeof uresp);
 
        uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
index c621f8794b8820d11e6293d62c2ce35d11e8e7ee..90f4c4d2e98359f5b808643b32e7bb6e8a959201 100644 (file)
@@ -113,6 +113,7 @@ struct mthca_eq {
        int                    nent;
        struct mthca_buf_list *page_list;
        struct mthca_mr        mr;
+       char                   irq_name[IB_DEVICE_NAME_MAX];
 };
 
 struct mthca_av;
index f5081bfde6db19641ce62cb3638139caebb6f880..c10576fa60c112931650ab8c322b32dab5f478f9 100644 (file)
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
 }
 
 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
                spin_lock_irq(&send_cq->lock);
-       else if (send_cq->cqn < recv_cq->cqn) {
+               __acquire(&recv_cq->lock);
+       } else if (send_cq->cqn < recv_cq->cqn) {
                spin_lock_irq(&send_cq->lock);
                spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
        } else {
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
 }
 
 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+       __releases(&send_cq->lock) __releases(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
+               __release(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
-       else if (send_cq->cqn < recv_cq->cqn) {
+       else if (send_cq->cqn < recv_cq->cqn) {
                spin_unlock(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
        } else {
index acb6817f6060615a4db753e3b4dde6c7f3dd31bb..2a13a163d33780ce9d646aba280a0ceeb76ff9ca 100644 (file)
@@ -30,7 +30,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
index bf1720f7f35fe6e07d80321bff8e1de42a834256..bcc6abc4faffafb9fba5b3076644b568434ecc98 100644 (file)
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
 void nes_cm_disconn_worker(void *);
 
 /* nes_verbs.c */
-int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
+int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
 int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
 struct nes_ib_device *nes_init_ofa_device(struct net_device *);
 void nes_destroy_ofa_device(struct nes_ib_device *);
index 114b802771ada144c5a4df3fc25407e2acb34156..73473db1986361f2a390a30b09c942f74e62ca96 100644 (file)
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
  */
 int nes_cm_disconn(struct nes_qp *nesqp)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&nesqp->lock, flags);
-       if (nesqp->disconn_pending == 0) {
-               nesqp->disconn_pending++;
-               spin_unlock_irqrestore(&nesqp->lock, flags);
-               /* init our disconnect work element, to */
-               INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
+       struct disconn_work *work;
 
-               queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
-       } else
-               spin_unlock_irqrestore(&nesqp->lock, flags);
+       work = kzalloc(sizeof *work, GFP_ATOMIC);
+       if (!work)
+               return -ENOMEM; /* Timer will clean up */
 
+       nes_add_ref(&nesqp->ibqp);
+       work->nesqp = nesqp;
+       INIT_WORK(&work->work, nes_disconnect_worker);
+       queue_work(g_cm_core->disconn_wq, &work->work);
        return 0;
 }
 
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
  */
 static void nes_disconnect_worker(struct work_struct *work)
 {
-       struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
+       struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+       struct nes_qp *nesqp = dwork->nesqp;
 
+       kfree(dwork);
        nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
                        nesqp->last_aeq, nesqp->hwqp.qp_id);
        nes_cm_disconn_true(nesqp);
+       nes_rem_ref(&nesqp->ibqp);
 }
 
 
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
        u16 last_ae;
        u8 original_hw_tcp_state;
        u8 original_ibqp_state;
-       u8 issued_disconnect_reset = 0;
+       enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
+       int issue_disconn = 0;
+       int issue_close = 0;
+       int issue_flush = 0;
+       u32 flush_q = NES_CQP_FLUSH_RQ;
+       struct ib_event ibevent;
 
        if (!nesqp) {
                nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
        original_ibqp_state   = nesqp->ibqp_state;
        last_ae = nesqp->last_aeq;
 
+       if (nesqp->term_flags) {
+               issue_disconn = 1;
+               issue_close = 1;
+               nesqp->cm_id = NULL;
+               if (nesqp->flush_issued == 0) {
+                       nesqp->flush_issued = 1;
+                       issue_flush = 1;
+               }
+       } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+                       ((original_ibqp_state == IB_QPS_RTS) &&
+                       (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               issue_disconn = 1;
+               if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
+                       disconn_status = IW_CM_EVENT_STATUS_RESET;
+       }
+
+       if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+                (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
+                (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
+                (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               issue_close = 1;
+               nesqp->cm_id = NULL;
+               if (nesqp->flush_issued == 0) {
+                       nesqp->flush_issued = 1;
+                       issue_flush = 1;
+               }
+       }
+
+       spin_unlock_irqrestore(&nesqp->lock, flags);
 
-       nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
+       if ((issue_flush) && (nesqp->destroyed == 0)) {
+               /* Flush the queue(s) */
+               if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
+                       flush_q |= NES_CQP_FLUSH_SQ;
+               flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
 
-       if ((nesqp->cm_id) && (cm_id->event_handler)) {
-               if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
-                               ((original_ibqp_state == IB_QPS_RTS) &&
-                               (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               if (nesqp->term_flags) {
+                       ibevent.device = nesqp->ibqp.device;
+                       ibevent.event = nesqp->terminate_eventtype;
+                       ibevent.element.qp = &nesqp->ibqp;
+                       nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+               }
+       }
+
+       if ((cm_id) && (cm_id->event_handler)) {
+               if (issue_disconn) {
                        atomic_inc(&cm_disconnects);
                        cm_event.event = IW_CM_EVENT_DISCONNECT;
-                       if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
-                               cm_event.status = IW_CM_EVENT_STATUS_RESET;
-                               nes_debug(NES_DBG_CM, "Generating a CM "
-                                       "Disconnect Event (status reset) for "
-                                       "QP%u, cm_id = %p. \n",
-                                       nesqp->hwqp.qp_id, cm_id);
-                       } else
-                               cm_event.status = IW_CM_EVENT_STATUS_OK;
-
+                       cm_event.status = disconn_status;
                        cm_event.local_addr = cm_id->local_addr;
                        cm_event.remote_addr = cm_id->remote_addr;
                        cm_event.private_data = NULL;
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
                                nesqp->hwqp.sq_tail, cm_id,
                                atomic_read(&nesqp->refcount));
 
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
                        ret = cm_id->event_handler(cm_id, &cm_event);
                        if (ret)
                                nes_debug(NES_DBG_CM, "OFA CM event_handler "
                                        "returned, ret=%d\n", ret);
-                       spin_lock_irqsave(&nesqp->lock, flags);
                }
 
-               nesqp->disconn_pending = 0;
-               /* There might have been another AE while the lock was released */
-               original_hw_tcp_state = nesqp->hw_tcp_state;
-               original_ibqp_state   = nesqp->ibqp_state;
-               last_ae = nesqp->last_aeq;
-
-               if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
-                               ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
-                                (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
-                                (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
-                                (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               if (issue_close) {
                        atomic_inc(&cm_closes);
-                       nesqp->cm_id = NULL;
-                       nesqp->in_disconnect = 0;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
                        nes_disconnect(nesqp, 1);
 
                        cm_id->provider_data = nesqp;
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
                        }
 
                        cm_id->rem_ref(cm_id);
-
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       if (nesqp->flush_issued == 0) {
-                               nesqp->flush_issued = 1;
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               flush_wqes(nesvnic->nesdev, nesqp,
-                                       NES_CQP_FLUSH_RQ, 1);
-                       } else
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-               } else {
-                       cm_id = nesqp->cm_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       /* check to see if the inbound reset beat the outbound reset */
-                       if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
-                               nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
-                                       "due to inbound reset beating the "
-                                       "outbound reset.\n", nesqp->hwqp.qp_id);
-                       }
                }
-       } else {
-               nesqp->disconn_pending = 0;
-               spin_unlock_irqrestore(&nesqp->lock, flags);
        }
 
        return 0;
index 8b7e7c0e496ecc7c2055c9845db3a0e746e8e3b9..90e8e4d8a5cef8522039c252f300530da6a15957 100644 (file)
@@ -410,8 +410,6 @@ struct nes_cm_ops {
 int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
                enum nes_timer_type, int, int);
 
-int nes_cm_disconn(struct nes_qp *);
-
 int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
 int nes_reject(struct iw_cm_id *, const void *, u8);
 int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
index 4a84d02ece0637fcbd3da54aed4131e6fbb45c4b..63a1a8e1e8a3d2f28631d7674188657deb2bf1f4 100644 (file)
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
 static void process_critical_error(struct nes_device *nesdev);
 static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
 static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
+static void nes_terminate_timeout(unsigned long context);
+static void nes_terminate_start_timer(struct nes_qp *nesqp);
 
 #ifdef CONFIG_INFINIBAND_NES_DEBUG
 static unsigned char *nes_iwarp_state_str[] = {
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
 }
 
 
+static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
+{
+       u16 pkt_len;
+
+       if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
+               /* skip over ethernet header */
+               pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
+               pkt += ETH_HLEN;
+
+               /* Skip over IP and TCP headers */
+               pkt += 4 * (pkt[0] & 0x0f);
+               pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+       }
+       return pkt;
+}
+
+/* Determine if incoming error pkt is rdma layer */
+static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
+{
+       u8 *pkt;
+       u16 *mpa;
+       u32 opcode = 0xffffffff;
+
+       if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+               pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+               mpa = (u16 *)locate_mpa(pkt, aeq_info);
+               opcode = be16_to_cpu(mpa[1]) & 0xf;
+       }
+
+       return opcode;
+}
+
+/* Build iWARP terminate header */
+static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
+{
+       u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+       u16 ddp_seg_len;
+       int copy_len = 0;
+       u8 is_tagged = 0;
+       u8 flush_code = 0;
+       struct nes_terminate_hdr *termhdr;
+
+       termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
+       memset(termhdr, 0, 64);
+
+       if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+
+               /* Use data from offending packet to fill in ddp & rdma hdrs */
+               pkt = locate_mpa(pkt, aeq_info);
+               ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
+               if (ddp_seg_len) {
+                       copy_len = 2;
+                       termhdr->hdrct = DDP_LEN_FLAG;
+                       if (pkt[2] & 0x80) {
+                               is_tagged = 1;
+                               if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+                                       copy_len += TERM_DDP_LEN_TAGGED;
+                                       termhdr->hdrct |= DDP_HDR_FLAG;
+                               }
+                       } else {
+                               if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+                                       copy_len += TERM_DDP_LEN_UNTAGGED;
+                                       termhdr->hdrct |= DDP_HDR_FLAG;
+                               }
+
+                               if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
+                                       if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
+                                               copy_len += TERM_RDMA_LEN;
+                                               termhdr->hdrct |= RDMA_HDR_FLAG;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       switch (async_event_id) {
+       case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
+               switch (iwarp_opcode(nesqp, aeq_info)) {
+               case IWARP_OPCODE_WRITE:
+                       flush_code = IB_WC_LOC_PROT_ERR;
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_INV_STAG;
+                       break;
+               default:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_INV_STAG;
+               }
+               break;
+       case NES_AEQE_AEID_AMP_INVALID_STAG:
+               flush_code = IB_WC_REM_ACCESS_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+               termhdr->error_code = RDMAP_INV_STAG;
+               break;
+       case NES_AEQE_AEID_AMP_BAD_QP:
+               flush_code = IB_WC_LOC_QP_OP_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_QN;
+               break;
+       case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+       case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
+               switch (iwarp_opcode(nesqp, aeq_info)) {
+               case IWARP_OPCODE_SEND_INV:
+               case IWARP_OPCODE_SEND_SE_INV:
+                       flush_code = IB_WC_REM_OP_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+                       termhdr->error_code = RDMAP_CANT_INV_STAG;
+                       break;
+               default:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_INV_STAG;
+               }
+               break;
+       case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+               if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
+                       flush_code = IB_WC_LOC_PROT_ERR;
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_BOUNDS;
+               } else {
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_INV_BOUNDS;
+               }
+               break;
+       case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
+       case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+       case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
+               flush_code = IB_WC_REM_ACCESS_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+               termhdr->error_code = RDMAP_ACCESS;
+               break;
+       case NES_AEQE_AEID_AMP_TO_WRAP:
+               flush_code = IB_WC_REM_ACCESS_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+               termhdr->error_code = RDMAP_TO_WRAP;
+               break;
+       case NES_AEQE_AEID_AMP_BAD_PD:
+               switch (iwarp_opcode(nesqp, aeq_info)) {
+               case IWARP_OPCODE_WRITE:
+                       flush_code = IB_WC_LOC_PROT_ERR;
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
+                       break;
+               case IWARP_OPCODE_SEND_INV:
+               case IWARP_OPCODE_SEND_SE_INV:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_CANT_INV_STAG;
+                       break;
+               default:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_UNASSOC_STAG;
+               }
+               break;
+       case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+               flush_code = IB_WC_LOC_LEN_ERR;
+               termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+               termhdr->error_code = MPA_MARKER;
+               break;
+       case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+               termhdr->error_code = MPA_CRC;
+               break;
+       case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
+       case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
+               flush_code = IB_WC_LOC_LEN_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+               termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+               break;
+       case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
+       case NES_AEQE_AEID_DDP_NO_L_BIT:
+               flush_code = IB_WC_FATAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+               termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+               break;
+       case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
+       case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+               flush_code = IB_WC_LOC_LEN_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
+               flush_code = IB_WC_GENERAL_ERR;
+               if (is_tagged) {
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
+               } else {
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+                       termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
+               }
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_MO;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+               flush_code = IB_WC_REM_OP_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_QN;
+               break;
+       case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+               termhdr->error_code = RDMAP_INV_RDMAP_VER;
+               break;
+       case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
+               flush_code = IB_WC_LOC_QP_OP_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+               termhdr->error_code = RDMAP_UNEXPECTED_OP;
+               break;
+       default:
+               flush_code = IB_WC_FATAL_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+               termhdr->error_code = RDMAP_UNSPECIFIED;
+               break;
+       }
+
+       if (copy_len)
+               memcpy(termhdr + 1, pkt, copy_len);
+
+       if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
+               if (aeq_info & NES_AEQE_SQ)
+                       nesqp->term_sq_flush_code = flush_code;
+               else
+                       nesqp->term_rq_flush_code = flush_code;
+       }
+
+       return sizeof(struct nes_terminate_hdr) + copy_len;
+}
+
+static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
+                struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
+{
+       u64 context;
+       unsigned long flags;
+       u32 aeq_info;
+       u16 async_event_id;
+       u8 tcp_state;
+       u8 iwarp_state;
+       u32 termlen = 0;
+       u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
+                          NES_CQP_QP_TERM_DONT_SEND_FIN;
+       struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+       if (nesqp->term_flags & NES_TERM_SENT)
+               return; /* Sanity check */
+
+       aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+       tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+       iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+       async_event_id = (u16)aeq_info;
+
+       context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
+               aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
+       if (!context) {
+               WARN_ON(!context);
+               return;
+       }
+
+       nesqp = (struct nes_qp *)(unsigned long)context;
+       spin_lock_irqsave(&nesqp->lock, flags);
+       nesqp->hw_iwarp_state = iwarp_state;
+       nesqp->hw_tcp_state = tcp_state;
+       nesqp->last_aeq = async_event_id;
+       nesqp->terminate_eventtype = eventtype;
+       spin_unlock_irqrestore(&nesqp->lock, flags);
+
+       if (nesadapter->send_term_ok)
+               termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
+       else
+               mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
+
+       nes_terminate_start_timer(nesqp);
+       nesqp->term_flags |= NES_TERM_SENT;
+       nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+}
+
+static void nes_terminate_send_fin(struct nes_device *nesdev,
+                         struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+       u32 aeq_info;
+       u16 async_event_id;
+       u8 tcp_state;
+       u8 iwarp_state;
+       unsigned long flags;
+
+       aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+       tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+       iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+       async_event_id = (u16)aeq_info;
+
+       spin_lock_irqsave(&nesqp->lock, flags);
+       nesqp->hw_iwarp_state = iwarp_state;
+       nesqp->hw_tcp_state = tcp_state;
+       nesqp->last_aeq = async_event_id;
+       spin_unlock_irqrestore(&nesqp->lock, flags);
+
+       /* Send the fin only */
+       nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
+               NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
+}
+
+/* Cleanup after a terminate sent or received */
+static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
+{
+       u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+       unsigned long flags;
+       struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
+       struct nes_device *nesdev = nesvnic->nesdev;
+       u8 first_time = 0;
+
+       spin_lock_irqsave(&nesqp->lock, flags);
+       if (nesqp->hte_added) {
+               nesqp->hte_added = 0;
+               next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+       }
+
+       first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
+       nesqp->term_flags |= NES_TERM_DONE;
+       spin_unlock_irqrestore(&nesqp->lock, flags);
+
+       /* Make sure we go through this only once */
+       if (first_time) {
+               if (timeout_occurred == 0)
+                       del_timer(&nesqp->terminate_timer);
+               else
+                       next_iwarp_state |= NES_CQP_QP_RESET;
+
+               nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+               nes_cm_disconn(nesqp);
+       }
+}
+
+static void nes_terminate_received(struct nes_device *nesdev,
+                               struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+       u32 aeq_info;
+       u8 *pkt;
+       u32 *mpa;
+       u8 ddp_ctl;
+       u8 rdma_ctl;
+       u16 aeq_id = 0;
+
+       aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+       if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+               /* Terminate is not a performance path so the silicon */
+               /* did not validate the frame - do it now */
+               pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+               mpa = (u32 *)locate_mpa(pkt, aeq_info);
+               ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
+               rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
+               if ((ddp_ctl & 0xc0) != 0x40)
+                       aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
+               else if ((ddp_ctl & 0x03) != 1)
+                       aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
+               else if (be32_to_cpu(mpa[2]) != 2)
+                       aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
+               else if (be32_to_cpu(mpa[3]) != 1)
+                       aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
+               else if (be32_to_cpu(mpa[4]) != 0)
+                       aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
+               else if ((rdma_ctl & 0xc0) != 0x40)
+                       aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+               if (aeq_id) {
+                       /* Bad terminate recvd - send back a terminate */
+                       aeq_info = (aeq_info & 0xffff0000) | aeq_id;
+                       aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
+                       nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+                       return;
+               }
+       }
+
+       nesqp->term_flags |= NES_TERM_RCVD;
+       nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
+       nes_terminate_start_timer(nesqp);
+       nes_terminate_send_fin(nesdev, nesqp, aeqe);
+}
+
+/* Timeout routine in case terminate fails to complete */
+static void nes_terminate_timeout(unsigned long context)
+{
+       struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+
+       nes_terminate_done(nesqp, 1);
+}
+
+/* Set a timer in case hw cannot complete the terminate sequence */
+static void nes_terminate_start_timer(struct nes_qp *nesqp)
+{
+       init_timer(&nesqp->terminate_timer);
+       nesqp->terminate_timer.function = nes_terminate_timeout;
+       nesqp->terminate_timer.expires = jiffies + HZ;
+       nesqp->terminate_timer.data = (unsigned long)nesqp;
+       add_timer(&nesqp->terminate_timer);
+}
+
 /**
  * nes_process_iwarp_aeqe
  */
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                   struct nes_hw_aeqe *aeqe)
 {
        u64 context;
-       u64 aeqe_context = 0;
        unsigned long flags;
        struct nes_qp *nesqp;
+       struct nes_hw_cq *hw_cq;
+       struct nes_cq *nescq;
        int resource_allocated;
-       /* struct iw_cm_id *cm_id; */
        struct nes_adapter *nesadapter = nesdev->nesadapter;
-       struct ib_event ibevent;
-       /* struct iw_cm_event cm_event; */
        u32 aeq_info;
        u32 next_iwarp_state = 0;
        u16 async_event_id;
        u8 tcp_state;
        u8 iwarp_state;
+       int must_disconn = 1;
+       int must_terminate = 0;
+       struct ib_event ibevent;
 
        nes_debug(NES_DBG_AEQ, "\n");
        aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
-       if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
+       if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
                context  = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
                context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
        } else {
-               aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
-               aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
                context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
                                                aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
                BUG_ON(!context);
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
 
        switch (async_event_id) {
                case NES_AEQE_AEID_LLP_FIN_RECEIVED:
-                       nesqp = *((struct nes_qp **)&context);
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+
+                       if (nesqp->term_flags)
+                               return; /* Ignore it, wait for close complete */
+
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
+
                        if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
                                        (nesqp->ibqp_state != IB_QPS_RTS)) {
                                /* FIN Received but tcp state or IB state moved on,
                                                should expect a close complete */
                                return;
                        }
+
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       if (nesqp->term_flags) {
+                               nes_terminate_done(nesqp, 0);
+                               return;
+                       }
+
                case NES_AEQE_AEID_LLP_CONNECTION_RESET:
-               case NES_AEQE_AEID_TERMINATE_SENT:
-               case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
                case NES_AEQE_AEID_RESET_SENT:
-                       nesqp = *((struct nes_qp **)&context);
+                       nesqp = (struct nes_qp *)(unsigned long)context;
                        if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
                                tcp_state = NES_AEQE_TCP_STATE_CLOSED;
                        }
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                        if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
                                        (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
                                nesqp->hte_added = 0;
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
-                                               nesqp->hwqp.qp_id);
-                               nes_hw_modify_qp(nesdev, nesqp,
-                                               NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
-                               spin_lock_irqsave(&nesqp->lock, flags);
+                               next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
                        }
 
                        if ((nesqp->ibqp_state == IB_QPS_RTS) &&
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
                                                break;
                                        case NES_AEQE_IWARP_STATE_TERMINATE:
-                                               next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
-                                               nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
-                                               if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
-                                                       next_iwarp_state |= 0x02000000;
-                                                       nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
-                                               }
+                                               must_disconn = 0; /* terminate path takes care of disconn */
+                                               if (nesqp->term_flags == 0)
+                                                       must_terminate = 1;
                                                break;
-                                       default:
-                                               next_iwarp_state = 0;
-                               }
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               if (next_iwarp_state) {
-                                       nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
-                                                       " also added another reference\n",
-                                                       nesqp->hwqp.qp_id, next_iwarp_state);
-                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
                                }
-                               nes_cm_disconn(nesqp);
                        } else {
                                if (async_event_id ==  NES_AEQE_AEID_LLP_FIN_RECEIVED) {
                                        /* FIN Received but ib state not RTS,
                                                        close complete will be on its way */
-                                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                                       return;
-                               }
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
-                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
-                                       nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
-                                       nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
-                                                       " also added another reference\n",
-                                                       nesqp->hwqp.qp_id, next_iwarp_state);
-                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+                                       must_disconn = 0;
                                }
-                               nes_cm_disconn(nesqp);
                        }
-                       break;
-               case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
                        spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
-                                       " event on QP%u \n  Q2 Data:\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_FATAL;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
-                       if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
-                                       ((nesqp->ibqp_state == IB_QPS_RTS)&&
-                                       (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+
+                       if (must_terminate)
+                               nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+                       else if (must_disconn) {
+                               if (next_iwarp_state) {
+                                       nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
+                                                 nesqp->hwqp.qp_id, next_iwarp_state);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                               }
                                nes_cm_disconn(nesqp);
-                       } else {
-                               nesqp->in_disconnect = 0;
-                               wake_up(&nesqp->kick_waitq);
                        }
                        break;
-               case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
-                       nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
-                       nesqp->last_aeq = async_event_id;
-                       if (nesqp->cm_id) {
-                               nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
-                                               " event on QP%u, remote IP = 0x%08X \n",
-                                               nesqp->hwqp.qp_id,
-                                               ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
-                       } else {
-                               nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
-                                               " event on QP%u \n",
-                                               nesqp->hwqp.qp_id);
-                       }
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
-                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_FATAL;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
+
+               case NES_AEQE_AEID_TERMINATE_SENT:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       nes_terminate_send_fin(nesdev, nesqp, aeqe);
                        break;
-               case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
-                       if (NES_AEQE_INBOUND_RDMA&aeq_info) {
-                               nesqp = nesadapter->qp_table[le32_to_cpu(
-                                               aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
-                       } else {
-                               /* TODO: get the actual WQE and mask off wqe index */
-                               context &= ~((u64)511);
-                               nesqp = *((struct nes_qp **)&context);
-                       }
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_ACCESS_ERR;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
+
+               case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       nes_terminate_received(nesdev, nesqp, aeqe);
                        break;
+
+               case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+               case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
                case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-