]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
Add rcu user eqs exception hooks for async page fault
authorLi Zhong <zhong@linux.vnet.ibm.com>
Tue, 4 Dec 2012 02:35:13 +0000 (10:35 +0800)
committerGleb Natapov <gleb@redhat.com>
Tue, 18 Dec 2012 13:15:41 +0000 (15:15 +0200)
This patch adds user eqs exception hooks for async page fault page not
present code path, to exit the user eqs and re-enter it as necessary.

Async page fault is different from other exceptions that it may be
triggered from idle process, so we still need rcu_irq_enter() and
rcu_irq_exit() to exit cpu idle eqs when needed, to protect the code
that needs use rcu.

As Frederic pointed out it would be safest and simplest to protect the
whole kvm_async_pf_task_wait(). Otherwise, "we need to check all the
code there deeply for potential RCU uses and ensure it will never be
extended later to use RCU.".

However, We'd better re-enter the cpu idle eqs if we get the exception
in cpu idle eqs, by calling rcu_irq_exit() before native_safe_halt().

So the patch does what Frederic suggested for rcu_irq_*() API usage
here, except that I moved the rcu_irq_*() pair originally in
do_async_page_fault() into kvm_async_pf_task_wait().

That's because, I think it's better to have rcu_irq_*() pairs to be in
one function ( rcu_irq_exit() after rcu_irq_enter() ), especially here,
kvm_async_pf_task_wait() has other callers, which might cause
rcu_irq_exit() be called without a matching rcu_irq_enter() before it,
which is illegal if the cpu happens to be in rcu idle state.

Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/kernel/kvm.c

index 08b973f64032ba31f5e91a0558e2dc003904531a..9c2bd8bd4b4c1f3cbeb7965b04bb57c3239e41a2 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/apicdef.h>
 #include <asm/hypervisor.h>
 #include <asm/kvm_guest.h>
+#include <asm/context_tracking.h>
 
 static int kvmapf = 1;
 
@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
        struct kvm_task_sleep_node n, *e;
        DEFINE_WAIT(wait);
 
+       rcu_irq_enter();
+
        spin_lock(&b->lock);
        e = _find_apf_task(b, token);
        if (e) {
@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
                hlist_del(&e->link);
                kfree(e);
                spin_unlock(&b->lock);
+
+               rcu_irq_exit();
                return;
        }
 
@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
                        /*
                         * We cannot reschedule. So halt.
                         */
+                       rcu_irq_exit();
                        native_safe_halt();
+                       rcu_irq_enter();
                        local_irq_disable();
                }
        }
        if (!n.halted)
                finish_wait(&n.wq, &wait);
 
+       rcu_irq_exit();
        return;
 }
 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
                break;
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                /* page is swapped out by the host. */
-               rcu_irq_enter();
+               exception_enter(regs);
                exit_idle();
                kvm_async_pf_task_wait((u32)read_cr2());
-               rcu_irq_exit();
+               exception_exit(regs);
                break;
        case KVM_PV_REASON_PAGE_READY:
                rcu_irq_enter();