]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
Merge branch 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jan 2012 17:57:11 +0000 (09:57 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jan 2012 17:57:11 +0000 (09:57 -0800)
* 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (74 commits)
  KVM: PPC: Whitespace fix for kvm.h
  KVM: Fix whitespace in kvm_para.h
  KVM: PPC: annotate kvm_rma_init as __init
  KVM: x86 emulator: implement RDPMC (0F 33)
  KVM: x86 emulator: fix RDPMC privilege check
  KVM: Expose the architectural performance monitoring CPUID leaf
  KVM: VMX: Intercept RDPMC
  KVM: SVM: Intercept RDPMC
  KVM: Add generic RDPMC support
  KVM: Expose a version 2 architectural PMU to a guests
  KVM: Expose kvm_lapic_local_deliver()
  KVM: x86 emulator: Use opcode::execute for Group 9 instruction
  KVM: x86 emulator: Use opcode::execute for Group 4/5 instructions
  KVM: x86 emulator: Use opcode::execute for Group 1A instruction
  KVM: ensure that debugfs entries have been created
  KVM: drop bsp_vcpu pointer from kvm struct
  KVM: x86: Consolidate PIT legacy test
  KVM: x86: Do not rely on implicit inclusions
  KVM: Make KVM_INTEL depend on CPU_SUP_INTEL
  KVM: Use memdup_user instead of kmalloc/copy_from_user
  ...

37 files changed:
Documentation/feature-removal-schedule.txt
Documentation/kernel-parameters.txt
Documentation/virtual/kvm/api.txt
arch/ia64/kvm/kvm-ia64.c
arch/powerpc/include/asm/kvm.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/kvm.c
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/cpuid.c [new file with mode: 0644]
arch/x86/kvm/cpuid.h [new file with mode: 0644]
arch/x86/kvm/emulate.c
arch/x86/kvm/i8254.c
arch/x86/kvm/i8259.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu_audit.c
arch/x86/kvm/mmutrace.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/pmu.c [new file with mode: 0644]
arch/x86/kvm/svm.c
arch/x86/kvm/timer.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
include/linux/kvm_host.h
include/linux/kvm_para.h
kernel/jump_label.c
virt/kvm/coalesced_mmio.c
virt/kvm/ioapic.c
virt/kvm/iommu.c
virt/kvm/kvm_main.c

index 284b44259750bd570df63926aef019728f978043..5575759b84eedccead799d851499ec69c5da3d8d 100644 (file)
@@ -350,15 +350,6 @@ Who:       anybody or Florian Mickler <florian@mickler.org>
 
 ----------------------------
 
-What:  KVM paravirt mmu host support
-When:  January 2011
-Why:   The paravirt mmu host support is slower than non-paravirt mmu, both
-       on newer and older hardware.  It is already not exposed to the guest,
-       and kept only for live migration purposes.
-Who:   Avi Kivity <avi@redhat.com>
-
-----------------------------
-
 What:  iwlwifi 50XX module parameters
 When:  3.0
 Why:   The "..50" modules parameters were used to configure 5000 series and
index 7b2e5c5eefa60269e0311d2685bd28b10c1e22cc..e69a461a06c233573431f19ec9fdf95ee8cdcb8a 100644 (file)
@@ -1178,9 +1178,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
                        Default is 0 (don't ignore, but inject #GP)
 
-       kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
-                       Default is 1 (enabled)
-
        kvm.mmu_audit=  [KVM] This is a R/W parameter which allows audit
                        KVM MMU at runtime.
                        Default is 0 (off)
index e2a4b5287361d25c0800954cbc79eccea88291d3..e1d94bf4056e77ab6df7dbc6d9c72b35f183190a 100644 (file)
@@ -1466,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have
 an RMA, or 1 if the processor can use an RMA but doesn't require it,
 because it supports the Virtual RMA (VRMA) facility.
 
+4.64 KVM_NMI
+
+Capability: KVM_CAP_USER_NMI
+Architectures: x86
+Type: vcpu ioctl
+Parameters: none
+Returns: 0 on success, -1 on error
+
+Queues an NMI on the thread's vcpu.  Note this is well defined only
+when KVM_CREATE_IRQCHIP has not been called, since this is an interface
+between the virtual cpu core and virtual local APIC.  After KVM_CREATE_IRQCHIP
+has been called, this interface is completely emulated within the kernel.
+
+To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the
+following algorithm:
+
+  - pause the vpcu
+  - read the local APIC's state (KVM_GET_LAPIC)
+  - check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1)
+  - if so, issue KVM_NMI
+  - resume the vcpu
+
+Some guests configure the LINT1 NMI input to cause a panic, aiding in
+debugging.
+
 5. The kvm_run structure
 
 Application code obtains a pointer to the kvm_run structure by
index 43f4c92816ef9760f68849611cea83ea8d0799aa..405052002493b8de11884c109afe9fcaf008aba2 100644 (file)
@@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void)
        return kvm;
 }
 
-struct kvm_io_range {
+struct kvm_ia64_io_range {
        unsigned long start;
        unsigned long size;
        unsigned long type;
 };
 
-static const struct kvm_io_range io_ranges[] = {
+static const struct kvm_ia64_io_range io_ranges[] = {
        {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
        {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
        {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
@@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
-       int i, j;
+       int j;
        unsigned long base_gfn;
 
        slots = kvm_memslots(kvm);
-       for (i = 0; i < slots->nmemslots; i++) {
-               memslot = &slots->memslots[i];
+       kvm_for_each_memslot(memslot, slots) {
                base_gfn = memslot->base_gfn;
-
                for (j = 0; j < memslot->npages; j++) {
                        if (memslot->rmap[j])
                                put_page((struct page *)memslot->rmap[j]);
@@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (log->slot >= KVM_MEMORY_SLOTS)
                goto out;
 
-       memslot = &kvm->memslots->memslots[log->slot];
+       memslot = id_to_memslot(kvm->memslots, log->slot);
        r = -ENOENT;
        if (!memslot->dirty_bitmap)
                goto out;
index 0ad432bc81d66259d82e4e94f691c46e4c765555..f7727d91ac6b91d26325fdbbae46cc9e7698cba2 100644 (file)
@@ -170,8 +170,8 @@ struct kvm_sregs {
                        } ppc64;
                        struct {
                                __u32 sr[16];
-                               __u64 ibat[8]; 
-                               __u64 dbat[8]; 
+                               __u64 ibat[8];
+                               __u64 dbat[8];
                        } ppc32;
                } s;
                struct {
index a459479995c6619fd83d6731ea7baaa1c016e990..e41ac6f7dcf15105aa4b784fce7d43d1599c4d64 100644 (file)
@@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 
        /* If nothing is dirty, don't bother messing with page tables. */
        if (is_dirty) {
-               memslot = &kvm->memslots->memslots[log->slot];
+               memslot = id_to_memslot(kvm->memslots, log->slot);
 
                ga = memslot->base_gfn << PAGE_SHIFT;
                ga_end = ga + (memslot->npages << PAGE_SHIFT);
index 286f13d601cf589f1b3ca9e9259c4fd22bb78fc4..a795a13f4a70f9ddc212e8d125857ad50232248a 100644 (file)
@@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
  * to allocate contiguous physical memory for the real memory
  * areas for guests.
  */
-void kvm_rma_init(void)
+void __init kvm_rma_init(void)
 {
        unsigned long i;
        unsigned long j, npages;
index f3444f700f3619eedcc8f3fa050586148e80ad70..17c5d4bdee5ed5124c7be91bf8d4ef73b39b7c2f 100644 (file)
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
 #define X86_FEATURE_FSGSBASE   (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_BMI1       (9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_AVX2       (9*32+ 5) /* AVX2 instructions */
 #define X86_FEATURE_SMEP       (9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2       (9*32+ 8) /* 2nd group bit manipulation extensions */
 #define X86_FEATURE_ERMS       (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
index a026507893e9b566f0578a3955472c4b1f524559..ab4092e3214ecea7d02e3827fe596ed662243214 100644 (file)
@@ -181,6 +181,7 @@ struct x86_emulate_ops {
        int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
        int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
        int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
+       int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
        void (*halt)(struct x86_emulate_ctxt *ctxt);
        void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
        int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
@@ -364,6 +365,7 @@ enum x86_intercept {
 #endif
 
 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
index b4973f4dab9832da8569ef17d264406ac5c668d7..52d6640a5ca1eec731418d408fc6e03c161ce3fe 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/tracepoint.h>
 #include <linux/cpumask.h>
+#include <linux/irq_work.h>
 
 #include <linux/kvm.h>
 #include <linux/kvm_para.h>
 #include <linux/kvm_types.h>
+#include <linux/perf_event.h>
 
 #include <asm/pvclock-abi.h>
 #include <asm/desc.h>
@@ -31,6 +33,8 @@
 #define KVM_MEMORY_SLOTS 32
 /* memory slots that does not exposed to userspace */
 #define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+
 #define KVM_MMIO_SIZE 16
 
 #define KVM_PIO_PAGE_OFFSET 1
@@ -228,7 +232,7 @@ struct kvm_mmu_page {
         * One bit set per slot which has memory
         * in this shadow page.
         */
-       DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+       DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
        bool unsync;
        int root_count;          /* Currently serving as active root */
        unsigned int unsync_children;
@@ -239,14 +243,9 @@ struct kvm_mmu_page {
        int clear_spte_count;
 #endif
 
-       struct rcu_head rcu;
-};
+       int write_flooding_count;
 
-struct kvm_pv_mmu_op_buffer {
-       void *ptr;
-       unsigned len;
-       unsigned processed;
-       char buf[512] __aligned(sizeof(long));
+       struct rcu_head rcu;
 };
 
 struct kvm_pio_request {
@@ -294,6 +293,37 @@ struct kvm_mmu {
        u64 pdptrs[4]; /* pae */
 };
 
+enum pmc_type {
+       KVM_PMC_GP = 0,
+       KVM_PMC_FIXED,
+};
+
+struct kvm_pmc {
+       enum pmc_type type;
+       u8 idx;
+       u64 counter;
+       u64 eventsel;
+       struct perf_event *perf_event;
+       struct kvm_vcpu *vcpu;
+};
+
+struct kvm_pmu {
+       unsigned nr_arch_gp_counters;
+       unsigned nr_arch_fixed_counters;
+       unsigned available_event_types;
+       u64 fixed_ctr_ctrl;
+       u64 global_ctrl;
+       u64 global_status;
+       u64 global_ovf_ctrl;
+       u64 counter_bitmask[2];
+       u64 global_ctrl_mask;
+       u8 version;
+       struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
+       struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+       struct irq_work irq_work;
+       u64 reprogram_pmi;
+};
+
 struct kvm_vcpu_arch {
        /*
         * rip and regs accesses must go through
@@ -345,19 +375,10 @@ struct kvm_vcpu_arch {
         */
        struct kvm_mmu *walk_mmu;
 
-       /* only needed in kvm_pv_mmu_op() path, but it's hot so
-        * put it here to avoid allocation */
-       struct kvm_pv_mmu_op_buffer mmu_op_buffer;
-
        struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
        struct kvm_mmu_memory_cache mmu_page_cache;
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
-       gfn_t last_pt_write_gfn;
-       int   last_pt_write_count;
-       u64  *last_pte_updated;
-       gfn_t last_pte_gfn;
-
        struct fpu guest_fpu;
        u64 xcr0;
 
@@ -436,6 +457,8 @@ struct kvm_vcpu_arch {
        unsigned access;
        gfn_t mmio_gfn;
 
+       struct kvm_pmu pmu;
+
        /* used for guest single stepping over the given code position */
        unsigned long singlestep_rip;
 
@@ -444,6 +467,9 @@ struct kvm_vcpu_arch {
 
        cpumask_var_t wbinvd_dirty_mask;
 
+       unsigned long last_retry_eip;
+       unsigned long last_retry_addr;
+
        struct {
                bool halted;
                gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
@@ -459,7 +485,6 @@ struct kvm_arch {
        unsigned int n_requested_mmu_pages;
        unsigned int n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
-       atomic_t invlpg_counter;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
         * Hash table of struct kvm_mmu_page.
@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
+int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
+                              struct kvm_memory_slot *slot);
 void kvm_mmu_zap_all(struct kvm *kvm);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
                          const void *val, int bytes);
-int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
-                 gpa_t addr, unsigned long *ret);
 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 extern bool tdp_enabled;
@@ -692,6 +717,7 @@ enum emulation_result {
 #define EMULTYPE_NO_DECODE         (1 << 0)
 #define EMULTYPE_TRAP_UD           (1 << 1)
 #define EMULTYPE_SKIP              (1 << 2)
+#define EMULTYPE_RETRY             (1 << 3)
 int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
                            int emulation_type, void *insn, int insn_len);
 
@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
 
 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+bool kvm_rdpmc(struct kvm_vcpu *vcpu);
 
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes,
-                      bool guest_initiated);
+                      const u8 *new, int bytes);
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
                              struct x86_exception *exception);
 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -782,6 +810,11 @@ void kvm_disable_tdp(void);
 int complete_pio(struct kvm_vcpu *vcpu);
 bool kvm_check_iopl(struct kvm_vcpu *vcpu);
 
+static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+{
+       return gpa;
+}
+
 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
 {
        struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
+int kvm_is_in_guest(void);
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
+
 #endif /* _ASM_X86_KVM_HOST_H */
index a9c2116001d692f08b4d756cc90e7e5cd97ff3e5..f0c6fd6f176b00c9c3fdd972104c1961319f5eb9 100644 (file)
@@ -39,8 +39,6 @@
 #include <asm/desc.h>
 #include <asm/tlbflush.h>
 
-#define MMU_QUEUE_SIZE 1024
-
 static int kvmapf = 1;
 
 static int parse_no_kvmapf(char *arg)
@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg)
 
 early_param("no-steal-acc", parse_no_stealacc);
 
-struct kvm_para_state {
-       u8 mmu_queue[MMU_QUEUE_SIZE];
-       int mmu_queue_len;
-};
-
-static DEFINE_PER_CPU(struct kvm_para_state, para_state);
 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
 static int has_steal_clock = 0;
 
-static struct kvm_para_state *kvm_para_state(void)
-{
-       return &per_cpu(para_state, raw_smp_processor_id());
-}
-
 /*
  * No need for any "IO delay" on KVM
  */
@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
        }
 }
 
-static void kvm_mmu_op(void *buffer, unsigned len)
-{
-       int r;
-       unsigned long a1, a2;
-
-       do {
-               a1 = __pa(buffer);
-               a2 = 0;   /* on i386 __pa() always returns <4G */
-               r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
-               buffer += r;
-               len -= r;
-       } while (len);
-}
-
-static void mmu_queue_flush(struct kvm_para_state *state)
-{
-       if (state->mmu_queue_len) {
-               kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
-               state->mmu_queue_len = 0;
-       }
-}
-
-static void kvm_deferred_mmu_op(void *buffer, int len)
-{
-       struct kvm_para_state *state = kvm_para_state();
-
-       if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
-               kvm_mmu_op(buffer, len);
-               return;
-       }
-       if (state->mmu_queue_len + len > sizeof state->mmu_queue)
-               mmu_queue_flush(state);
-       memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
-       state->mmu_queue_len += len;
-}
-
-static void kvm_mmu_write(void *dest, u64 val)
-{
-       __u64 pte_phys;
-       struct kvm_mmu_op_write_pte wpte;
-
-#ifdef CONFIG_HIGHPTE
-       struct page *page;
-       unsigned long dst = (unsigned long) dest;
-
-       page = kmap_atomic_to_page(dest);
-       pte_phys = page_to_pfn(page);
-       pte_phys <<= PAGE_SHIFT;
-       pte_phys += (dst & ~(PAGE_MASK));
-#else
-       pte_phys = (unsigned long)__pa(dest);
-#endif
-       wpte.header.op = KVM_MMU_OP_WRITE_PTE;
-       wpte.pte_val = val;
-       wpte.pte_phys = pte_phys;
-
-       kvm_deferred_mmu_op(&wpte, sizeof wpte);
-}
-
-/*
- * We only need to hook operations that are MMU writes.  We hook these so that
- * we can use lazy MMU mode to batch these operations.  We could probably
- * improve the performance of the host code if we used some of the information
- * here to simplify processing of batched writes.
- */
-static void kvm_set_pte(pte_t *ptep, pte_t pte)
-{
-       kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
-                          pte_t *ptep, pte_t pte)
-{
-       kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
-{
-       kvm_mmu_write(pmdp, pmd_val(pmd));
-}
-
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
-static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
-{
-       kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_pte_clear(struct mm_struct *mm,
-                         unsigned long addr, pte_t *ptep)
-{
-       kvm_mmu_write(ptep, 0);
-}
-
-static void kvm_pmd_clear(pmd_t *pmdp)
-{
-       kvm_mmu_write(pmdp, 0);
-}
-#endif
-
-static void kvm_set_pud(pud_t *pudp, pud_t pud)
-{
-       kvm_mmu_write(pudp, pud_val(pud));
-}
-
-#if PAGETABLE_LEVELS == 4
-static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
-{
-       kvm_mmu_write(pgdp, pgd_val(pgd));
-}
-#endif
-#endif /* PAGETABLE_LEVELS >= 3 */
-
-static void kvm_flush_tlb(void)
-{
-       struct kvm_mmu_op_flush_tlb ftlb = {
-               .header.op = KVM_MMU_OP_FLUSH_TLB,
-       };
-
-       kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
-}
-
-static void kvm_release_pt(unsigned long pfn)
-{
-       struct kvm_mmu_op_release_pt rpt = {
-               .header.op = KVM_MMU_OP_RELEASE_PT,
-               .pt_phys = (u64)pfn << PAGE_SHIFT,
-       };
-
-       kvm_mmu_op(&rpt, sizeof rpt);
-}
-
-static void kvm_enter_lazy_mmu(void)
-{
-       paravirt_enter_lazy_mmu();
-}
-
-static void kvm_leave_lazy_mmu(void)
-{
-       struct kvm_para_state *state = kvm_para_state();
-
-       mmu_queue_flush(state);
-       paravirt_leave_lazy_mmu();
-}
-
 static void __init paravirt_ops_setup(void)
 {
        pv_info.name = "KVM";
@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void)
        if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
                pv_cpu_ops.io_delay = kvm_io_delay;
 
-       if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
-               pv_mmu_ops.set_pte = kvm_set_pte;
-               pv_mmu_ops.set_pte_at = kvm_set_pte_at;
-               pv_mmu_ops.set_pmd = kvm_set_pmd;
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
-               pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
-               pv_mmu_ops.pte_clear = kvm_pte_clear;
-               pv_mmu_ops.pmd_clear = kvm_pmd_clear;
-#endif
-               pv_mmu_ops.set_pud = kvm_set_pud;
-#if PAGETABLE_LEVELS == 4
-               pv_mmu_ops.set_pgd = kvm_set_pgd;
-#endif
-#endif
-               pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
-               pv_mmu_ops.release_pte = kvm_release_pt;
-               pv_mmu_ops.release_pmd = kvm_release_pt;
-               pv_mmu_ops.release_pud = kvm_release_pt;
-
-               pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
-               pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
-       }
 #ifdef CONFIG_X86_IO_APIC
        no_timer_check = 1;
 #endif
index ff5790d8e990f0c382fa43c5ff2d3b5d7ed46c48..1a7fe868f375cb6bccb281599359b8c2dbad8073 100644 (file)
@@ -35,6 +35,7 @@ config KVM
        select KVM_MMIO
        select TASKSTATS
        select TASK_DELAY_ACCT
+       select PERF_EVENTS
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
@@ -52,6 +53,8 @@ config KVM
 config KVM_INTEL
        tristate "KVM for Intel processors support"
        depends on KVM
+       # for perf_guest_get_msrs():
+       depends on CPU_SUP_INTEL
        ---help---
          Provides support for KVM on Intel processors equipped with the VT
          extensions.
index f15501f431c8d972a2eb3cc8dd6fb8c5870e248f..4f579e8dcacf6747a7e3a34db765bf112233680f 100644 (file)
@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API)       += $(addprefix ../../../virt/kvm/, iommu.o)
 kvm-$(CONFIG_KVM_ASYNC_PF)     += $(addprefix ../../../virt/kvm/, async_pf.o)
 
 kvm-y                  += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
-                          i8254.o timer.o
+                          i8254.o timer.o cpuid.o pmu.o
 kvm-intel-y            += vmx.o
 kvm-amd-y              += svm.o
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
new file mode 100644 (file)
index 0000000..89b02bf
--- /dev/null
@@ -0,0 +1,670 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ * cpuid support routines
+ *
+ * derived from arch/x86/kvm/x86.c
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ * Copyright IBM Corporation, 2008
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <asm/user.h>
+#include <asm/xsave.h>
+#include "cpuid.h"
+#include "lapic.h"
+#include "mmu.h"
+#include "trace.h"
+
+void kvm_update_cpuid(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       if (!best)
+               return;
+
+       /* Update OSXSAVE bit */
+       if (cpu_has_xsave && best->function == 0x1) {
+               best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
+                       best->ecx |= bit(X86_FEATURE_OSXSAVE);
+       }
+
+       if (apic) {
+               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+                       apic->lapic_timer.timer_mode_mask = 3 << 17;
+               else
+                       apic->lapic_timer.timer_mode_mask = 1 << 17;
+       }
+
+       kvm_pmu_cpuid_update(vcpu);
+}
+
+static int is_efer_nx(void)
+{
+       unsigned long long efer = 0;
+
+       rdmsrl_safe(MSR_EFER, &efer);
+       return efer & EFER_NX;
+}
+
+static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct kvm_cpuid_entry2 *e, *entry;
+
+       entry = NULL;
+       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+               e = &vcpu->arch.cpuid_entries[i];
+               if (e->function == 0x80000001) {
+                       entry = e;
+                       break;
+               }
+       }
+       if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
+               entry->edx &= ~(1 << 20);
+               printk(KERN_INFO "kvm: guest NX capability removed\n");
+       }
+}
+
+/* when an old userspace process fills a new kernel module */
+int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+                            struct kvm_cpuid *cpuid,
+                            struct kvm_cpuid_entry __user *entries)
+{
+       int r, i;
+       struct kvm_cpuid_entry *cpuid_entries;
+
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -ENOMEM;
+       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
+       if (!cpuid_entries)
+               goto out;
+       r = -EFAULT;
+       if (copy_from_user(cpuid_entries, entries,
+                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+               goto out_free;
+       for (i = 0; i < cpuid->nent; i++) {
+               vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
+               vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
+               vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
+               vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
+               vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
+               vcpu->arch.cpuid_entries[i].index = 0;
+               vcpu->arch.cpuid_entries[i].flags = 0;
+               vcpu->arch.cpuid_entries[i].padding[0] = 0;
+               vcpu->arch.cpuid_entries[i].padding[1] = 0;
+               vcpu->arch.cpuid_entries[i].padding[2] = 0;
+       }
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       cpuid_fix_nx_cap(vcpu);
+       r = 0;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+       kvm_update_cpuid(vcpu);
+
+out_free:
+       vfree(cpuid_entries);
+out:
+       return r;
+}
+
+int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+{
+       int r;
+
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -EFAULT;
+       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+       kvm_update_cpuid(vcpu);
+       return 0;
+
+out:
+       return r;
+}
+
+int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+{
+       int r;
+
+       r = -E2BIG;
+       if (cpuid->nent < vcpu->arch.cpuid_nent)
+               goto out;
+       r = -EFAULT;
+       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+                        vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
+       return 0;
+
+out:
+       cpuid->nent = vcpu->arch.cpuid_nent;
+       return r;
+}
+
+static void cpuid_mask(u32 *word, int wordnum)
+{
+       *word &= boot_cpu_data.x86_capability[wordnum];
+}
+
+static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+                          u32 index)
+{
+       entry->function = function;
+       entry->index = index;
+       cpuid_count(entry->function, entry->index,
+                   &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
+       entry->flags = 0;
+}
+
+static bool supported_xcr0_bit(unsigned bit)
+{
+       u64 mask = ((u64)1 << bit);
+
+       return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+}
+
+#define F(x) bit(X86_FEATURE_##x)
+
+static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+                        u32 index, int *nent, int maxnent)
+{
+       int r;
+       unsigned f_nx = is_efer_nx() ? F(NX) : 0;
+#ifdef CONFIG_X86_64
+       unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
+                               ? F(GBPAGES) : 0;
+       unsigned f_lm = F(LM);
+#else
+       unsigned f_gbpages = 0;
+       unsigned f_lm = 0;
+#endif
+       unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
+
+       /* cpuid 1.edx */
+       const u32 kvm_supported_word0_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
+               0 /* Reserved, DS, ACPI */ | F(MMX) |
+               F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
+               0 /* HTT, TM, Reserved, PBE */;
+       /* cpuid 0x80000001.edx */
+       const u32 kvm_supported_word1_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* Reserved */ |
+               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
+               0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
+       /* cpuid 1.ecx */
+       const u32 kvm_supported_word4_x86_features =
+               F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
+               0 /* DS-CPL, VMX, SMX, EST */ |
+               0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
+               F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
+               0 /* Reserved, DCA */ | F(XMM4_1) |
+               F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
+               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+               F(F16C) | F(RDRAND);
+       /* cpuid 0x80000001.ecx */
+       const u32 kvm_supported_word6_x86_features =
+               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
+               F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
+
+       /* cpuid 0xC0000001.edx */
+       const u32 kvm_supported_word5_x86_features =
+               F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
+               F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
+               F(PMM) | F(PMM_EN);
+
+       /* cpuid 7.0.ebx */
+       const u32 kvm_supported_word9_x86_features =
+               F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
+
+       /* all calls to cpuid_count() should be made on the same cpu */
+       get_cpu();
+
+       r = -E2BIG;
+
+       if (*nent >= maxnent)
+               goto out;
+
+       do_cpuid_1_ent(entry, function, index);
+       ++*nent;
+
+       switch (function) {
+       case 0:
+               entry->eax = min(entry->eax, (u32)0xd);
+               break;
+       case 1:
+               entry->edx &= kvm_supported_word0_x86_features;
+               cpuid_mask(&entry->edx, 0);
+               entry->ecx &= kvm_supported_word4_x86_features;
+               cpuid_mask(&entry->ecx, 4);
+               /* we support x2apic emulation even if host does not support
+                * it since we emulate x2apic in software */
+               entry->ecx |= F(X2APIC);
+               break;
+       /* function 2 entries are STATEFUL. That is, repeated cpuid commands
+        * may return different values. This forces us to get_cpu() before
+        * issuing the first command, and also to emulate this annoying behavior
+        * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
+       case 2: {
+               int t, times = entry->eax & 0xff;
+
+               entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+               entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+               for (t = 1; t < times; ++t) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       do_cpuid_1_ent(&entry[t], function, 0);
+                       entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+                       ++*nent;
+               }
+               break;
+       }
+       /* function 4 has additional index. */
+       case 4: {
+               int i, cache_type;
+
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* read more entries until cache_type is zero */
+               for (i = 1; ; ++i) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       cache_type = entry[i - 1].eax & 0x1f;
+                       if (!cache_type)
+                               break;
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+               }
+               break;
+       }
+       case 7: {
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* Mask ebx against host capbability word 9 */
+               if (index == 0) {
+                       entry->ebx &= kvm_supported_word9_x86_features;
+                       cpuid_mask(&entry->ebx, 9);
+               } else
+                       entry->ebx = 0;
+               entry->eax = 0;
+               entry->ecx = 0;
+               entry->edx = 0;
+               break;
+       }
+       case 9:
+               break;
+       case 0xa: { /* Architectural Performance Monitoring */
+               struct x86_pmu_capability cap;
+               union cpuid10_eax eax;
+               union cpuid10_edx edx;
+
+               perf_get_x86_pmu_capability(&cap);
+
+               /*
+                * Only support guest architectural pmu on a host
+                * with architectural pmu.
+                */
+               if (!cap.version)
+                       memset(&cap, 0, sizeof(cap));
+
+               eax.split.version_id = min(cap.version, 2);
+               eax.split.num_counters = cap.num_counters_gp;
+               eax.split.bit_width = cap.bit_width_gp;
+               eax.split.mask_length = cap.events_mask_len;
+
+               edx.split.num_counters_fixed = cap.num_counters_fixed;
+               edx.split.bit_width_fixed = cap.bit_width_fixed;
+               edx.split.reserved = 0;
+
+               entry->eax = eax.full;
+               entry->ebx = cap.events_mask;
+               entry->ecx = 0;
+               entry->edx = edx.full;
+               break;
+       }
+       /* function 0xb has additional index. */
+       case 0xb: {
+               int i, level_type;
+
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* read more entries until level_type is zero */
+               for (i = 1; ; ++i) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       level_type = entry[i - 1].ecx & 0xff00;
+                       if (!level_type)
+                               break;
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+               }
+               break;
+       }
+       case 0xd: {
+               int idx, i;
+
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               for (idx = 1, i = 1; idx < 64; ++idx) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       do_cpuid_1_ent(&entry[i], function, idx);
+                       if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
+                               continue;
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+                       ++i;
+               }
+               break;
+       }
+       case KVM_CPUID_SIGNATURE: {
+               char signature[12] = "KVMKVMKVM\0\0";
+               u32 *sigptr = (u32 *)signature;
+               entry->eax = 0;
+               entry->ebx = sigptr[0];
+               entry->ecx = sigptr[1];
+               entry->edx = sigptr[2];
+               break;
+       }
+       case KVM_CPUID_FEATURES:
+               entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
+                            (1 << KVM_FEATURE_NOP_IO_DELAY) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE2) |
+                            (1 << KVM_FEATURE_ASYNC_PF) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+
+               if (sched_info_on())
+                       entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
+
+               entry->ebx = 0;
+               entry->ecx = 0;
+               entry->edx = 0;
+               break;
+       case 0x80000000:
+               entry->eax = min(entry->eax, 0x8000001a);
+               break;
+       case 0x80000001:
+               entry->edx &= kvm_supported_word1_x86_features;
+               cpuid_mask(&entry->edx, 1);
+               entry->ecx &= kvm_supported_word6_x86_features;
+               cpuid_mask(&entry->ecx, 6);
+               break;
+       case 0x80000008: {
+               unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+               unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+               unsigned phys_as = entry->eax & 0xff;
+
+               if (!g_phys_as)
+                       g_phys_as = phys_as;
+               entry->eax = g_phys_as | (virt_as << 8);
+               entry->ebx = entry->edx = 0;
+               break;
+       }
+       case 0x80000019:
+               entry->ecx = entry->edx = 0;
+               break;
+       case 0x8000001a:
+               break;
+       case 0x8000001d:
+               break;
+       /*Add support for Centaur's CPUID instruction*/
+       case 0xC0000000:
+               /*Just support up to 0xC0000004 now*/
+               entry->eax = min(entry->eax, 0xC0000004);
+               break;
+       case 0xC0000001:
+               entry->edx &= kvm_supported_word5_x86_features;
+               cpuid_mask(&entry->edx, 5);
+               break;
+       case 3: /* Processor serial number */
+       case 5: /* MONITOR/MWAIT */
+       case 6: /* Thermal management */
+       case 0x80000007: /* Advanced power management */
+       case 0xC0000002:
+       case 0xC0000003:
+       case 0xC0000004:
+       default:
+               entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+               break;
+       }
+
+       kvm_x86_ops->set_supported_cpuid(function, entry);
+
+       r = 0;
+
+out:
+       put_cpu();
+
+       return r;
+}
+
+#undef F
+
+struct kvm_cpuid_param {
+       u32 func;
+       u32 idx;
+       bool has_leaf_count;
+       bool (*qualifier)(struct kvm_cpuid_param *param);
+};
+
+static bool is_centaur_cpu(struct kvm_cpuid_param *param)
+{
+       return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
+}
+
+int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+                                     struct kvm_cpuid_entry2 __user *entries)
+{
+       struct kvm_cpuid_entry2 *cpuid_entries;
+       int limit, nent = 0, r = -E2BIG, i;
+       u32 func;
+       static struct kvm_cpuid_param param[] = {
+               { .func = 0, .has_leaf_count = true },
+               { .func = 0x80000000, .has_leaf_count = true },
+               { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
+               { .func = KVM_CPUID_SIGNATURE },
+               { .func = KVM_CPUID_FEATURES },
+       };
+
+       if (cpuid->nent < 1)
+               goto out;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               cpuid->nent = KVM_MAX_CPUID_ENTRIES;
+       r = -ENOMEM;
+       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
+       if (!cpuid_entries)
+               goto out;
+
+       r = 0;
+       for (i = 0; i < ARRAY_SIZE(param); i++) {
+               struct kvm_cpuid_param *ent = &param[i];
+
+               if (ent->qualifier && !ent->qualifier(ent))
+                       continue;
+
+               r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
+                               &nent, cpuid->nent);
+
+               if (r)
+                       goto out_free;
+
+               if (!ent->has_leaf_count)
+                       continue;
+
+               limit = cpuid_entries[nent - 1].eax;
+               for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
+                       r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
+                                    &nent, cpuid->nent);
+
+               if (r)
+                       goto out_free;
+       }
+
+       r = -EFAULT;
+       if (copy_to_user(entries, cpuid_entries,
+                        nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out_free;
+       cpuid->nent = nent;
+       r = 0;
+
+out_free:
+       vfree(cpuid_entries);
+out:
+       return r;
+}
+
+static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
+{
+       struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
+       int j, nent = vcpu->arch.cpuid_nent;
+
+       e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+       /* when no next entry is found, the current entry[i] is reselected */
+       for (j = i + 1; ; j = (j + 1) % nent) {
+               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+               if (ej->function == e->function) {
+                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+                       return j;
+               }
+       }
+       return 0; /* silence gcc, even though control never reaches here */
+}
+
+/* find an entry with matching function, matching index (if needed), and that
+ * should be read next (if it's stateful) */
+static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
+       u32 function, u32 index)
+{
+       if (e->function != function)
+               return 0;
+       if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
+               return 0;
+       if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
+           !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
+               return 0;
+       return 1;
+}
+
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+                                             u32 function, u32 index)
+{
+       int i;
+       struct kvm_cpuid_entry2 *best = NULL;
+
+       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+               struct kvm_cpuid_entry2 *e;
+
+               e = &vcpu->arch.cpuid_entries[i];
+               if (is_matching_cpuid_entry(e, function, index)) {
+                       if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
+                               move_to_next_stateful_cpuid_entry(vcpu, i);
+                       best = e;
+                       break;
+               }
+       }
+       return best;
+}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
+
+int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+       if (!best || best->eax < 0x80000008)
+               goto not_found;
+       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+       if (best)
+               return best->eax & 0xff;
+not_found:
+       return 36;
+}
+
+/*
+ * If no match is found, check whether we exceed the vCPU's limit
+ * and return the content of the highest valid _standard_ leaf instead.
+ * This is to satisfy the CPUID specification.
+ */
+static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
+                                                  u32 function, u32 index)
+{
+       struct kvm_cpuid_entry2 *maxlevel;
+
+       maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+       if (!maxlevel || maxlevel->eax >= function)
+               return NULL;
+       if (function & 0x80000000) {
+               maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
+               if (!maxlevel)
+                       return NULL;
+       }
+       return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
+}
+
+void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+{
+       u32 function, index;
+       struct kvm_cpuid_entry2 *best;
+
+       function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
+       best = kvm_find_cpuid_entry(vcpu, function, index);
+
+       if (!best)
+               best = check_cpuid_limit(vcpu, function, index);
+
+       if (best) {
+               kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+               kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
+       }
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       trace_kvm_cpuid(function,
+                       kvm_register_read(vcpu, VCPU_REGS_RAX),
+                       kvm_register_read(vcpu, VCPU_REGS_RBX),
+                       kvm_register_read(vcpu, VCPU_REGS_RCX),
+                       kvm_register_read(vcpu, VCPU_REGS_RDX));
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
new file mode 100644 (file)
index 0000000..5b97e17
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef ARCH_X86_KVM_CPUID_H
+#define ARCH_X86_KVM_CPUID_H
+
+#include "x86.h"
+
+void kvm_update_cpuid(struct kvm_vcpu *vcpu);
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+                                             u32 function, u32 index);
+int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+                                     struct kvm_cpuid_entry2 __user *entries);
+int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+                            struct kvm_cpuid *cpuid,
+                            struct kvm_cpuid_entry __user *entries);
+int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries);
+int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries);
+
+
+static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+}
+
+static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_SMEP));
+}
+
+static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
+}
+
+#endif
index f1e3be18a08ff9507ee98f1020896167efe36932..05a562b850252b3c480ce67dd4e1e4c6c47f6055 100644 (file)
 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
 #define No64       (1<<28)
+#define PageTable   (1 << 29)   /* instruction used to write page table */
 /* Source 2 operand type */
-#define Src2Shift   (29)
+#define Src2Shift   (30)
 #define Src2None    (OpNone << Src2Shift)
 #define Src2CL      (OpCL << Src2Shift)
 #define Src2ImmByte (OpImmByte << Src2Shift)
@@ -1674,11 +1675,6 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
-static int em_grp1a(struct x86_emulate_ctxt *ctxt)
-{
-       return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
-}
-
 static int em_grp2(struct x86_emulate_ctxt *ctxt)
 {
        switch (ctxt->modrm_reg) {
@@ -1788,7 +1784,7 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
        return rc;
 }
 
-static int em_grp9(struct x86_emulate_ctxt *ctxt)
+static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
 {
        u64 old = ctxt->dst.orig_val64;
 
@@ -1831,6 +1827,24 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
        return rc;
 }
 
+static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
+{
+       /* Save real source value, then compare EAX against destination. */
+       ctxt->src.orig_val = ctxt->src.val;
+       ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
+       emulate_2op_SrcV(ctxt, "cmp");
+
+       if (ctxt->eflags & EFLG_ZF) {
+               /* Success: write back to memory. */
+               ctxt->dst.val = ctxt->src.orig_val;
+       } else {
+               /* Failure: write the value we saw to EAX. */
+               ctxt->dst.type = OP_REG;
+               ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
+       }
+       return X86EMUL_CONTINUE;
+}
+
 static int em_lseg(struct x86_emulate_ctxt *ctxt)
 {
        int seg = ctxt->src2.val;
@@ -2481,6 +2495,15 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_call(struct x86_emulate_ctxt *ctxt)
+{
+       long rel = ctxt->src.val;
+
+       ctxt->src.val = (unsigned long)ctxt->_eip;
+       jmp_rel(ctxt, rel);
+       return em_push(ctxt);
+}
+
 static int em_call_far(struct x86_emulate_ctxt *ctxt)
 {
        u16 sel, old_cs;
@@ -2622,12 +2645,75 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
+{
+       u64 pmc;
+
+       if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc))
+               return emulate_gp(ctxt, 0);
+       ctxt->regs[VCPU_REGS_RAX] = (u32)pmc;
+       ctxt->regs[VCPU_REGS_RDX] = pmc >> 32;
+       return X86EMUL_CONTINUE;
+}
+
 static int em_mov(struct x86_emulate_ctxt *ctxt)
 {
        ctxt->dst.val = ctxt->src.val;
        return X86EMUL_CONTINUE;
 }
 
+static int em_cr_write(struct x86_emulate_ctxt *ctxt)
+{
+       if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
+               return emulate_gp(ctxt, 0);
+
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       return X86EMUL_CONTINUE;
+}
+
+static int em_dr_write(struct x86_emulate_ctxt *ctxt)
+{
+       unsigned long val;
+
+       if (ctxt->mode == X86EMUL_MODE_PROT64)
+               val = ctxt->src.val & ~0ULL;
+       else
+               val = ctxt->src.val & ~0U;
+
+       /* #UD condition is already handled. */
+       if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
+               return emulate_gp(ctxt, 0);
+
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       return X86EMUL_CONTINUE;
+}
+
+static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
+{
+       u64 msr_data;
+
+       msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
+               | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
+       if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data))
+               return emulate_gp(ctxt, 0);
+
+       return X86EMUL_CONTINUE;
+}
+
+static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
+{
+       u64 msr_data;
+
+       if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data))
+               return emulate_gp(ctxt, 0);
+
+       ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
+       ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
+       return X86EMUL_CONTINUE;
+}
+
 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
 {
        if (ctxt->modrm_reg > VCPU_SREG_GS)
@@ -2775,6 +2861,24 @@ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_in(struct x86_emulate_ctxt *ctxt)
+{
+       if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
+                            &ctxt->dst.val))
+               return X86EMUL_IO_NEEDED;
+
+       return X86EMUL_CONTINUE;
+}
+
+static int em_out(struct x86_emulate_ctxt *ctxt)
+{
+       ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
+                                   &ctxt->src.val, 1);
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       return X86EMUL_CONTINUE;
+}
+
 static int em_cli(struct x86_emulate_ctxt *ctxt)
 {
        if (emulator_bad_iopl(ctxt))
@@ -2794,6 +2898,69 @@ static int em_sti(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_bt(struct x86_emulate_ctxt *ctxt)
+{
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       /* only subword offset */
+       ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
+
+       emulate_2op_SrcV_nobyte(ctxt, "bt");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_bts(struct x86_emulate_ctxt *ctxt)
+{
+       emulate_2op_SrcV_nobyte(ctxt, "bts");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_btr(struct x86_emulate_ctxt *ctxt)
+{
+       emulate_2op_SrcV_nobyte(ctxt, "btr");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_btc(struct x86_emulate_ctxt *ctxt)
+{
+       emulate_2op_SrcV_nobyte(ctxt, "btc");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_bsf(struct x86_emulate_ctxt *ctxt)
+{
+       u8 zf;
+
+       __asm__ ("bsf %2, %0; setz %1"
+                : "=r"(ctxt->dst.val), "=q"(zf)
+                : "r"(ctxt->src.val));
+
+       ctxt->eflags &= ~X86_EFLAGS_ZF;
+       if (zf) {
+               ctxt->eflags |= X86_EFLAGS_ZF;
+               /* Disable writeback. */
+               ctxt->dst.type = OP_NONE;
+       }
+       return X86EMUL_CONTINUE;
+}
+
+static int em_bsr(struct x86_emulate_ctxt *ctxt)
+{
+       u8 zf;
+
+       __asm__ ("bsr %2, %0; setz %1"
+                : "=r"(ctxt->dst.val), "=q"(zf)
+                : "r"(ctxt->src.val));
+
+       ctxt->eflags &= ~X86_EFLAGS_ZF;
+       if (zf) {
+               ctxt->eflags |= X86_EFLAGS_ZF;
+               /* Disable writeback. */
+               ctxt->dst.type = OP_NONE;
+       }
+       return X86EMUL_CONTINUE;
+}
+
 static bool valid_cr(int nr)
 {
        switch (nr) {
@@ -2867,9 +3034,6 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
                break;
                }
        case 4: {
-               u64 cr4;
-
-               cr4 = ctxt->ops->get_cr(ctxt, 4);
                ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 
                if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
@@ -3003,6 +3167,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
+#define I2bvIP(_f, _e, _i, _p) \
+       IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
 
 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),                \
                I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),     \
@@ -3033,17 +3199,17 @@ static struct opcode group7_rm7[] = {
 
 static struct opcode group1[] = {
        I(Lock, em_add),
-       I(Lock, em_or),
+       I(Lock | PageTable, em_or),
        I(Lock, em_adc),
        I(Lock, em_sbb),
-       I(Lock, em_and),
+       I(Lock | PageTable, em_and),
        I(Lock, em_sub),
        I(Lock, em_xor),
        I(0, em_cmp),
 };
 
 static struct opcode group1A[] = {
-       D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
+       I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
 };
 
 static struct opcode group3[] = {
@@ -3058,16 +3224,19 @@ static struct opcode group3[] = {
 };
 
 static struct opcode group4[] = {
-       D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
+       I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
+       I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
        N, N, N, N, N, N,
 };
 
 static struct opcode group5[] = {
-       D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
-       D(SrcMem | ModRM | Stack),
+       I(DstMem | SrcNone | ModRM | Lock, em_grp45),
+       I(DstMem | SrcNone | ModRM | Lock, em_grp45),
+       I(SrcMem | ModRM | Stack, em_grp45),
        I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
-       D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
-       D(SrcMem | ModRM | Stack), N,
+       I(SrcMem | ModRM | Stack, em_grp45),
+       I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
+       I(SrcMem | ModRM | Stack, em_grp45), N,
 };
 
 static struct opcode group6[] = {
@@ -3096,18 +3265,21 @@ static struct group_dual group7 = { {
 
 static struct opcode group8[] = {
        N, N, N, N,
-       D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
-       D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
+       I(DstMem | SrcImmByte | ModRM, em_bt),
+       I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
+       I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
+       I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
 };
 
 static struct group_dual group9 = { {
-       N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
+       N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
 }, {
        N, N, N, N, N, N, N, N,
 } };
 
 static struct opcode group11[] = {
-       I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
+       I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
+       X7(D(Undefined)),
 };
 
 static struct gprefix pfx_0f_6f_0f_7f = {
@@ -3120,7 +3292,7 @@ static struct opcode opcode_table[256] = {
        I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
        I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
        /* 0x08 - 0x0F */
-       I6ALU(Lock, em_or),
+       I6ALU(Lock | PageTable, em_or),
        I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
        N,
        /* 0x10 - 0x17 */
@@ -3132,7 +3304,7 @@ static struct opcode opcode_table[256] = {
        I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
        I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
        /* 0x20 - 0x27 */
-       I6ALU(Lock, em_and), N, N,
+       I6ALU(Lock | PageTable, em_and), N, N,
        /* 0x28 - 0x2F */
        I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
        /* 0x30 - 0x37 */
@@ -3155,8 +3327,8 @@ static struct opcode opcode_table[256] = {
        I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
        I(SrcImmByte | Mov | Stack, em_push),
        I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
-       D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
-       D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
+       I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */
+       I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
        /* 0x70 - 0x7F */
        X16(D(SrcImmByte)),
        /* 0x80 - 0x87 */
@@ -3165,11 +3337,11 @@ static struct opcode opcode_table[256] = {
        G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
        G(DstMem | SrcImmByte | ModRM | Group, group1),
        I2bv(DstMem | SrcReg | ModRM, em_test),
-       I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
+       I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
        /* 0x88 - 0x8F */
-       I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
+       I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
        I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
-       I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
+       I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
        D(ModRM | SrcMem | NoAccess | DstReg),
        I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
        G(0, group1A),
@@ -3182,7 +3354,7 @@ static struct opcode opcode_table[256] = {
        II(ImplicitOps | Stack, em_popf, popf), N, N,
        /* 0xA0 - 0xA7 */
        I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
-       I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
+       I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
        I2bv(SrcSI | DstDI | Mov | String, em_mov),
        I2bv(SrcSI | DstDI | String, em_cmp),
        /* 0xA8 - 0xAF */
@@ -3213,13 +3385,13 @@ static struct opcode opcode_table[256] = {
        /* 0xE0 - 0xE7 */
        X3(I(SrcImmByte, em_loop)),
        I(SrcImmByte, em_jcxz),
-       D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
-       D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
+       I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
+       I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
        /* 0xE8 - 0xEF */
-       D(SrcImm | Stack), D(SrcImm | ImplicitOps),
+       I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
        I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
-       D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
-       D2bvIP(SrcAcc | DstDX, out, check_perm_out),
+       I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
+       I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
        /* 0xF0 - 0xF7 */
        N, DI(ImplicitOps, icebp), N, N,
        DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3242,15 +3414,15 @@ static struct opcode twobyte_table[256] = {
        /* 0x20 - 0x2F */
        DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
        DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
-       DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
-       DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
+       IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
+       IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
        N, N, N, N,
        N, N, N, N, N, N, N, N,
        /* 0x30 - 0x3F */
-       DI(ImplicitOps | Priv, wrmsr),
+       II(ImplicitOps | Priv, em_wrmsr, wrmsr),
        IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
-       DI(ImplicitOps | Priv, rdmsr),
-       DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
+       II(ImplicitOps | Priv, em_rdmsr, rdmsr),
+       IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
        I(ImplicitOps | VendorSpecific, em_sysenter),
        I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
        N, N,
@@ -3275,26 +3447,28 @@ static struct opcode twobyte_table[256] = {
        X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
        /* 0xA0 - 0xA7 */
        I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
-       DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
+       DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
        D(DstMem | SrcReg | Src2ImmByte | ModRM),
        D(DstMem | SrcReg | Src2CL | ModRM), N, N,
        /* 0xA8 - 0xAF */
        I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
-       DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
+       DI(ImplicitOps, rsm),
+       I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
        D(DstMem | SrcReg | Src2ImmByte | ModRM),
        D(DstMem | SrcReg | Src2CL | ModRM),
        D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
        /* 0xB0 - 0xB7 */
-       D2bv(DstMem | SrcReg | ModRM | Lock),
+       I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
        I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
-       D(DstMem | SrcReg | ModRM | BitOp | Lock),
+       I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
        I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
        I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
        D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
        /* 0xB8 - 0xBF */
        N, N,
-       G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
-       D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
+       G(BitOp, group8),
+       I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
+       I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
        D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
        /* 0xC0 - 0xCF */
        D2bv(DstMem | SrcReg | ModRM | Lock),
@@ -3320,6 +3494,7 @@ static struct opcode twobyte_table[256] = {
 #undef D2bv
 #undef D2bvIP
 #undef I2bv
+#undef I2bvIP
 #undef I6ALU
 
 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
@@ -3697,6 +3872,11 @@ done:
        return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
 }
 
+bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
+{
+       return ctxt->d & PageTable;
+}
+
 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
 {
        /* The second termination condition only applies for REPE
@@ -3720,7 +3900,6 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 {
        struct x86_emulate_ops *ops = ctxt->ops;
-       u64 msr_data;
        int rc = X86EMUL_CONTINUE;
        int saved_dst_type = ctxt->dst.type;
 
@@ -3854,15 +4033,6 @@ special_insn:
                        goto cannot_emulate;
                ctxt->dst.val = (s32) ctxt->src.val;
                break;
-       case 0x6c:              /* insb */
-       case 0x6d:              /* insw/insd */
-               ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
-               goto do_io_in;
-       case 0x6e:              /* outsb */
-       case 0x6f:              /* outsw/outsd */
-               ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
-               goto do_io_out;
-               break;
        case 0x70 ... 0x7f: /* jcc (short) */
                if (test_cc(ctxt->b, ctxt->eflags))
                        jmp_rel(ctxt, ctxt->src.val);
@@ -3870,9 +4040,6 @@ special_insn:
        case 0x8d: /* lea r16/r32, m */
                ctxt->dst.val = ctxt->src.addr.mem.ea;
                break;
-       case 0x8f:              /* pop (sole member of Grp1a) */
-               rc = em_grp1a(ctxt);
-               break;
        case 0x90 ... 0x97: /* nop / xchg reg, rax */
                if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
                        break;
@@ -3905,38 +4072,11 @@ special_insn:
                ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
                rc = em_grp2(ctxt);
                break;
-       case 0xe4:      /* inb */
-       case 0xe5:      /* in */
-               goto do_io_in;
-       case 0xe6: /* outb */
-       case 0xe7: /* out */
-               goto do_io_out;
-       case 0xe8: /* call (near) */ {
-               long int rel = ctxt->src.val;
-               ctxt->src.val = (unsigned long) ctxt->_eip;
-               jmp_rel(ctxt, rel);
-               rc = em_push(ctxt);
-               break;
-       }
        case 0xe9: /* jmp rel */
        case 0xeb: /* jmp rel short */
                jmp_rel(ctxt, ctxt->src.val);
                ctxt->dst.type = OP_NONE; /* Disable writeback. */
                break;
-       case 0xec: /* in al,dx */
-       case 0xed: /* in (e/r)ax,dx */
-       do_io_in:
-               if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
-                                    &ctxt->dst.val))
-                       goto done; /* IO is needed */
-               break;
-       case 0xee: /* out dx,al */
-       case 0xef: /* out dx,(e/r)ax */
-       do_io_out:
-               ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
-                                     &ctxt->src.val, 1);
-               ctxt->dst.type = OP_NONE;       /* Disable writeback. */
-               break;
        case 0xf4:              /* hlt */
                ctxt->ops->halt(ctxt);
                break;
@@ -3956,12 +4096,6 @@ special_insn:
        case 0xfd: /* std */
                ctxt->eflags |= EFLG_DF;
                break;
-       case 0xfe: /* Grp4 */
-               rc = em_grp45(ctxt);
-               break;
-       case 0xff: /* Grp5 */
-               rc = em_grp45(ctxt);
-               break;
        default:
                goto cannot_emulate;
        }
@@ -4036,49 +4170,6 @@ twobyte_insn:
        case 0x21: /* mov from dr to reg */
                ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
                break;
-       case 0x22: /* mov reg, cr */
-               if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               }
-               ctxt->dst.type = OP_NONE;
-               break;
-       case 0x23: /* mov from reg to dr */
-               if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
-                               ((ctxt->mode == X86EMUL_MODE_PROT64) ?
-                                ~0ULL : ~0U)) < 0) {
-                       /* #UD condition is already handled by the code above */
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               }
-
-               ctxt->dst.type = OP_NONE;       /* no writeback */
-               break;
-       case 0x30:
-               /* wrmsr */
-               msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
-                       | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
-               if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               }
-               rc = X86EMUL_CONTINUE;
-               break;
-       case 0x32:
-               /* rdmsr */
-               if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               } else {
-                       ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
-                       ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
-               }
-               rc = X86EMUL_CONTINUE;
-               break;
        case 0x40 ... 0x4f:     /* cmov */
                ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
                if (!test_cc(ctxt->b, ctxt->eflags))
@@ -4091,93 +4182,21 @@ twobyte_insn:
        case 0x90 ... 0x9f:     /* setcc r/m8 */
                ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
                break;
-       case 0xa3:
-             bt:               /* bt */
-               ctxt->dst.type = OP_NONE;
-               /* only subword offset */
-               ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
-               emulate_2op_SrcV_nobyte(ctxt, "bt");
-               break;
        case 0xa4: /* shld imm8, r, r/m */
        case 0xa5: /* shld cl, r, r/m */
                emulate_2op_cl(ctxt, "shld");
                break;
-       case 0xab:
-             bts:              /* bts */
-               emulate_2op_SrcV_nobyte(ctxt, "bts");
-               break;
        case 0xac: /* shrd imm8, r, r/m */
        case 0xad: /* shrd cl, r, r/m */
                emulate_2op_cl(ctxt, "shrd");
                break;
        case 0xae:              /* clflush */
                break;
-       case 0xb0 ... 0xb1:     /* cmpxchg */
-               /*
-                * Save real source value, then compare EAX against
-                * destination.
-                */
-               ctxt->src.orig_val = ctxt->src.val;
-               ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
-               emulate_2op_SrcV(ctxt, "cmp");
-               if (ctxt->eflags & EFLG_ZF) {
-                       /* Success: write back to memory. */
-                       ctxt->dst.val = ctxt->src.orig_val;
-               } else {
-                       /* Failure: write the value we saw to EAX. */
-                       ctxt->dst.type = OP_REG;
-                       ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
-               }
-               break;
-       case 0xb3:
-             btr:              /* btr */
-               emulate_2op_SrcV_nobyte(ctxt, "btr");
-               break;
        case 0xb6 ... 0xb7:     /* movzx */
                ctxt->dst.bytes = ctxt->op_bytes;
                ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
                                                       : (u16) ctxt->src.val;
                break;
-       case 0xba:              /* Grp8 */
-               switch (ctxt->modrm_reg & 3) {
-               case 0:
-                       goto bt;
-               case 1:
-                       goto bts;
-               case 2:
-                       goto btr;
-               case 3:
-                       goto btc;
-               }
-               break;
-       case 0xbb:
-             btc:              /* btc */
-               emulate_2op_SrcV_nobyte(ctxt, "btc");
-               break;
-       case 0xbc: {            /* bsf */
-               u8 zf;
-               __asm__ ("bsf %2, %0; setz %1"
-                        : "=r"(ctxt->dst.val), "=q"(zf)
-                        : "r"(ctxt->src.val));
-               ctxt->eflags &= ~X86_EFLAGS_ZF;
-               if (zf) {
-                       ctxt->eflags |= X86_EFLAGS_ZF;
-                       ctxt->dst.type = OP_NONE;       /* Disable writeback. */
-               }
-               break;
-       }
-       case 0xbd: {            /* bsr */
-               u8 zf;
-               __asm__ ("bsr %2, %0; setz %1"
-                        : "=r"(ctxt->dst.val), "=q"(zf)
-                        : "r"(ctxt->src.val));
-               ctxt->eflags &= ~X86_EFLAGS_ZF;
-               if (zf) {
-                       ctxt->eflags |= X86_EFLAGS_ZF;
-                       ctxt->dst.type = OP_NONE;       /* Disable writeback. */
-               }
-               break;
-       }
        case 0xbe ... 0xbf:     /* movsx */
                ctxt->dst.bytes = ctxt->op_bytes;
                ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
@@ -4194,9 +4213,6 @@ twobyte_insn:
                ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
                                                        (u64) ctxt->src.val;
                break;
-       case 0xc7:              /* Grp9 (cmpxchg8b) */
-               rc = em_grp9(ctxt);
-               break;
        default:
                goto cannot_emulate;
        }
index 405f2620392f5e32d393166520e707b2d039b3bb..d68f99df690c72ba81b53f8436c77c2d420d56a5 100644 (file)
@@ -344,7 +344,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
        struct kvm_timer *pt = &ps->pit_timer;
        s64 interval;
 
-       if (!irqchip_in_kernel(kvm))
+       if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
                return;
 
        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
@@ -397,15 +397,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
        case 1:
         /* FIXME: enhance mode 4 precision */
        case 4:
-               if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
-                       create_pit_timer(kvm, val, 0);
-               }
+               create_pit_timer(kvm, val, 0);
                break;
        case 2:
        case 3:
-               if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
-                       create_pit_timer(kvm, val, 1);
-               }
+               create_pit_timer(kvm, val, 1);
                break;
        default:
                destroy_pit_timer(kvm->arch.vpit);
index cac4746d7ffb643b7fcd54451fb2d994f2f53c9b..b6a73537e1efd2270a00d52612342ed168081b55 100644 (file)
@@ -262,9 +262,10 @@ int kvm_pic_read_irq(struct kvm *kvm)
 
 void kvm_pic_reset(struct kvm_kpic_state *s)
 {
-       int irq;
-       struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu;
+       int irq, i;
+       struct kvm_vcpu *vcpu;
        u8 irr = s->irr, isr = s->imr;
+       bool found = false;
 
        s->last_irr = 0;
        s->irr = 0;
@@ -281,12 +282,19 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
        s->special_fully_nested_mode = 0;
        s->init4 = 0;
 
-       for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
-               if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
-                       if (irr & (1 << irq) || isr & (1 << irq)) {
-                               pic_clear_isr(s, irq);
-                       }
-       }
+       kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
+               if (kvm_apic_accept_pic_intr(vcpu)) {
+                       found = true;
+                       break;
+               }
+
+
+       if (!found)
+               return;
+
+       for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
+               if (irr & (1 << irq) || isr & (1 << irq))
+                       pic_clear_isr(s, irq);
 }
 
 static void pic_ioport_write(void *opaque, u32 addr, u32 val)
index 54abb40199d67d45ed9b1dc752ebc8cd83a6d509..cfdc6e0ef0025f4687c1a7b9d3c6dff42f0c8875 100644 (file)
@@ -38,6 +38,7 @@
 #include "irq.h"
 #include "trace.h"
 #include "x86.h"
+#include "cpuid.h"
 
 #ifndef CONFIG_X86_64
 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -1120,7 +1121,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
+int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
 {
        u32 reg = apic_get_reg(apic, lvt_type);
        int vector, mode, trig_mode;
index 138e8cc6fea600ef42a515cf08d6d2c4a6f7a60b..6f4ce2575d095edce570b7d15c0cd9724b6255ec 100644 (file)
@@ -34,6 +34,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
+int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
 
 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
index f1b36cf3e3d0aff4ee1d587deb044a9618d51869..2a2a9b40db199dd49c99af48d9e2c25bc4acd440 100644 (file)
@@ -59,15 +59,6 @@ enum {
        AUDIT_POST_SYNC
 };
 
-char *audit_point_name[] = {
-       "pre page fault",
-       "post page fault",
-       "pre pte write",
-       "post pte write",
-       "pre sync",
-       "post sync"
-};
-
 #undef MMU_DEBUG
 
 #ifdef MMU_DEBUG
@@ -87,9 +78,6 @@ static int dbg = 0;
 module_param(dbg, bool, 0644);
 #endif
 
-static int oos_shadow = 1;
-module_param(oos_shadow, bool, 0644);
-
 #ifndef MMU_DEBUG
 #define ASSERT(x) do { } while (0)
 #else
@@ -593,6 +581,11 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
        return 0;
 }
 
+static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
+{
+       return cache->nobjs;
+}
+
 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
                                  struct kmem_cache *cache)
 {
@@ -953,21 +946,35 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
        }
 }
 
+static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level,
+                                   struct kvm_memory_slot *slot)
+{
+       struct kvm_lpage_info *linfo;
+
+       if (likely(level == PT_PAGE_TABLE_LEVEL))
+               return &slot->rmap[gfn - slot->base_gfn];
+
+       linfo = lpage_info_slot(gfn, slot, level);
+       return &linfo->rmap_pde;
+}
+
 /*
  * Take gfn and return the reverse mapping to it.
  */
 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 {
        struct kvm_memory_slot *slot;
-       struct kvm_lpage_info *linfo;
 
        slot = gfn_to_memslot(kvm, gfn);
-       if (likely(level == PT_PAGE_TABLE_LEVEL))
-               return &slot->rmap[gfn - slot->base_gfn];
+       return __gfn_to_rmap(kvm, gfn, level, slot);
+}
 
-       linfo = lpage_info_slot(gfn, slot, level);
+static bool rmap_can_add(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_memory_cache *cache;
 
-       return &linfo->rmap_pde;
+       cache = &vcpu->arch.mmu_pte_list_desc_cache;
+       return mmu_memory_cache_free_objects(cache);
 }
 
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -1004,17 +1011,16 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
                rmap_remove(kvm, sptep);
 }
 
-static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
+                              struct kvm_memory_slot *slot)
 {
        unsigned long *rmapp;
        u64 *spte;
        int i, write_protected = 0;
 
-       rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
-
+       rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
        spte = rmap_next(kvm, rmapp, NULL);
        while (spte) {
-               BUG_ON(!spte);
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
                if (is_writable_pte(*spte)) {
@@ -1027,12 +1033,11 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
        /* check for huge page mappings */
        for (i = PT_DIRECTORY_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-               rmapp = gfn_to_rmap(kvm, gfn, i);
+               rmapp = __gfn_to_rmap(kvm, gfn, i, slot);
                spte = rmap_next(kvm, rmapp, NULL);
                while (spte) {
-                       BUG_ON(!spte);
                        BUG_ON(!(*spte & PT_PRESENT_MASK));
-                       BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
+                       BUG_ON(!is_large_pte(*spte));
                        pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
                        if (is_writable_pte(*spte)) {
                                drop_spte(kvm, spte);
@@ -1047,6 +1052,14 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
        return write_protected;
 }
 
+static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+{
+       struct kvm_memory_slot *slot;
+
+       slot = gfn_to_memslot(kvm, gfn);
+       return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
+}
+
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
                           unsigned long data)
 {
@@ -1103,15 +1116,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
                          int (*handler)(struct kvm *kvm, unsigned long *rmapp,
                                         unsigned long data))
 {
-       int i, j;
+       int j;
        int ret;
        int retval = 0;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++) {
-               struct kvm_memory_slot *memslot = &slots->memslots[i];
+       kvm_for_each_memslot(memslot, slots) {
                unsigned long start = memslot->userspace_addr;
                unsigned long end;
 
@@ -1324,7 +1337,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
                                                  PAGE_SIZE);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
-       bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+       bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM);
        sp->parent_ptes = 0;
        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
        kvm_mod_used_mmu_pages(vcpu->kvm, +1);
@@ -1511,6 +1524,13 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
        return ret;
 }
 
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
+#else
+static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
+static void mmu_audit_disable(void) { }
+#endif
+
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                         struct list_head *invalid_list)
 {
@@ -1640,6 +1660,18 @@ static void init_shadow_page_table(struct kvm_mmu_page *sp)
                sp->spt[i] = 0ull;
 }
 
+static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
+{
+       sp->write_flooding_count = 0;
+}
+
+static void clear_sp_write_flooding_count(u64 *spte)
+{
+       struct kvm_mmu_page *sp =  page_header(__pa(spte));
+
+       __clear_sp_write_flooding_count(sp);
+}
+
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gfn_t gfn,
                                             gva_t gaddr,
@@ -1683,6 +1715,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                } else if (sp->unsync)
                        kvm_mmu_mark_parents_unsync(sp);
 
+               __clear_sp_write_flooding_count(sp);
                trace_kvm_mmu_get_page(sp, false);
                return sp;
        }
@@ -1796,7 +1829,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        }
 }
 
-static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
                             u64 *spte)
 {
        u64 pte;
@@ -1804,17 +1837,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
 
        pte = *spte;
        if (is_shadow_present_pte(pte)) {
-               if (is_last_spte(pte, sp->role.level))
+               if (is_last_spte(pte, sp->role.level)) {
                        drop_spte(kvm, spte);
-               else {
+                       if (is_large_pte(pte))
+                               --kvm->stat.lpages;
+               } else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        drop_parent_pte(child, spte);
                }
-       } else if (is_mmio_spte(pte))
+               return true;
+       }
+
+       if (is_mmio_spte(pte))
                mmu_spte_clear_no_track(spte);
 
-       if (is_large_pte(pte))
-               --kvm->stat.lpages;
+       return false;
 }
 
 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
@@ -1831,15 +1868,6 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
        mmu_page_remove_parent_pte(sp, parent_pte);
 }
 
-static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
-{
-       int i;
-       struct kvm_vcpu *vcpu;
-
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               vcpu->arch.last_pte_updated = NULL;
-}
-
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        u64 *parent_pte;
@@ -1899,7 +1927,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
        }
 
        sp->role.invalid = 1;
-       kvm_mmu_reset_last_pte_updated(kvm);
        return ret;
 }
 
@@ -1985,7 +2012,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
        kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
 }
 
-static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_mmu_page *sp;
        struct hlist_node *node;
@@ -1994,7 +2021,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 
        pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
        r = 0;
-
+       spin_lock(&kvm->mmu_lock);
        for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
                pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
                         sp->role.word);
@@ -2002,22 +2029,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
                kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
        }
        kvm_mmu_commit_zap_page(kvm, &invalid_list);
-       return r;
-}
-
-static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
-{
-       struct kvm_mmu_page *sp;
-       struct hlist_node *node;
-       LIST_HEAD(invalid_list);
+       spin_unlock(&kvm->mmu_lock);
 
-       for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
-               pgprintk("%s: zap %llx %x\n",
-                        __func__, gfn, sp->role.word);
-               kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
-       }
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       return r;
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
 
 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
 {
@@ -2169,8 +2185,6 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
                        return 1;
 
                if (!need_unsync && !s->unsync) {
-                       if (!oos_shadow)
-                               return 1;
                        need_unsync = true;
                }
        }
@@ -2191,11 +2205,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        if (set_mmio_spte(sptep, gfn, pfn, pte_access))
                return 0;
 
-       /*
-        * We don't set the accessed bit, since we sometimes want to see
-        * whether the guest actually used the pte (in order to detect
-        * demand paging).
-        */
        spte = PT_PRESENT_MASK;
        if (!speculative)
                spte |= shadow_accessed_mask;
@@ -2346,10 +2355,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                }
        }
        kvm_release_pfn_clean(pfn);
-       if (speculative) {
-               vcpu->arch.last_pte_updated = sptep;
-               vcpu->arch.last_pte_gfn = gfn;
-       }
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2840,12 +2845,12 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
                return;
 
        vcpu_clear_mmio_info(vcpu, ~0ul);
-       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+       kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
        if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
                sp = page_header(root);
                mmu_sync_children(vcpu, sp);
-               trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
+               kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
                return;
        }
        for (i = 0; i < 4; ++i) {
@@ -2857,7 +2862,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
                        mmu_sync_children(vcpu, sp);
                }
        }
-       trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
+       kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
 }
 
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -3510,28 +3515,119 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
                kvm_mmu_flush_tlb(vcpu);
 }
 
-static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
+static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
+                                   const u8 *new, int *bytes)
 {
-       u64 *spte = vcpu->arch.last_pte_updated;
+       u64 gentry;
+       int r;
+
+       /*
+        * Assume that the pte write on a page table of the same type
+        * as the current vcpu paging mode since we update the sptes only
+        * when they have the same mode.
+        */
+       if (is_pae(vcpu) && *bytes == 4) {
+               /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+               *gpa &= ~(gpa_t)7;
+               *bytes = 8;
+               r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8));
+               if (r)
+                       gentry = 0;
+               new = (const u8 *)&gentry;
+       }
 
-       return !!(spte && (*spte & shadow_accessed_mask));
+       switch (*bytes) {
+       case 4:
+               gentry = *(const u32 *)new;
+               break;
+       case 8:
+               gentry = *(const u64 *)new;
+               break;
+       default:
+               gentry = 0;
+               break;
+       }
+
+       return gentry;
 }
 
-static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+/*
+ * If we're seeing too many writes to a page, it may no longer be a page table,
+ * or we may be forking, in which case it is better to unmap the page.
+ */
+static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
 {
-       u64 *spte = vcpu->arch.last_pte_updated;
+       /*
+        * Skip write-flooding detected for the sp whose level is 1, because
+        * it can become unsync, then the guest page is not write-protected.
+        */
+       if (sp->role.level == 1)
+               return false;
 
-       if (spte
-           && vcpu->arch.last_pte_gfn == gfn
-           && shadow_accessed_mask
-           && !(*spte & shadow_accessed_mask)
-           && is_shadow_present_pte(*spte))
-               set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+       return ++sp->write_flooding_count >= 3;
+}
+
+/*
+ * Misaligned accesses are too much trouble to fix up; also, they usually
+ * indicate a page is not used as a page table.
+ */
+static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
+                                   int bytes)
+{
+       unsigned offset, pte_size, misaligned;
+
+       pgprintk("misaligned: gpa %llx bytes %d role %x\n",
+                gpa, bytes, sp->role.word);
+
+       offset = offset_in_page(gpa);
+       pte_size = sp->role.cr4_pae ? 8 : 4;
+
+       /*
+        * Sometimes, the OS only writes the last one bytes to update status
+        * bits, for example, in linux, andb instruction is used in clear_bit().
+        */
+       if (!(offset & (pte_size - 1)) && bytes == 1)
+               return false;
+
+       misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+       misaligned |= bytes < 4;
+
+       return misaligned;
+}
+
+static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
+{
+       unsigned page_offset, quadrant;
+       u64 *spte;
+       int level;
+
+       page_offset = offset_in_page(gpa);
+       level = sp->role.level;
+       *nspte = 1;
+       if (!sp->role.cr4_pae) {
+               page_offset <<= 1;      /* 32->64 */
+               /*
+                * A 32-bit pde maps 4MB while the shadow pdes map
+                * only 2MB.  So we need to double the offset again
+                * and zap two pdes instead of one.
+                */
+               if (level == PT32_ROOT_LEVEL) {
+                       page_offset &= ~7; /* kill rounding error */
+                       page_offset <<= 1;
+                       *nspte = 2;
+               }
+               quadrant = page_offset >> PAGE_SHIFT;
+               page_offset &= ~PAGE_MASK;
+               if (quadrant != sp->role.quadrant)
+                       return NULL;
+       }
+
+       spte = &sp->spt[page_offset / sizeof(*spte)];
+       return spte;
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes,
-                      bool guest_initiated)
+                      const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        union kvm_mmu_page_role mask = { .word = 0 };
@@ -3539,8 +3635,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        struct hlist_node *node;
        LIST_HEAD(invalid_list);
        u64 entry, gentry, *spte;
-       unsigned pte_size, page_offset, misaligned, quadrant, offset;
-       int level, npte, invlpg_counter, r, flooded = 0;
+       int npte;
        bool remote_flush, local_flush, zap_page;
 
        /*
@@ -3551,112 +3646,45 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                return;
 
        zap_page = remote_flush = local_flush = false;
-       offset = offset_in_page(gpa);
 
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
-       invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
+       gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
 
        /*
-        * Assume that the pte write on a page table of the same type
-        * as the current vcpu paging mode since we update the sptes only
-        * when they have the same mode.
+        * No need to care whether allocation memory is successful
+        * or not since pte prefetch is skiped if it does not have
+        * enough objects in the cache.
         */
-       if ((is_pae(vcpu) && bytes == 4) || !new) {
-               /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
-               if (is_pae(vcpu)) {
-                       gpa &= ~(gpa_t)7;
-                       bytes = 8;
-               }
-               r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
-               if (r)
-                       gentry = 0;
-               new = (const u8 *)&gentry;
-       }
-
-       switch (bytes) {
-       case 4:
-               gentry = *(const u32 *)new;
-               break;
-       case 8:
-               gentry = *(const u64 *)new;
-               break;
-       default:
-               gentry = 0;
-               break;
-       }
+       mmu_topup_memory_caches(vcpu);
 
        spin_lock(&vcpu->kvm->mmu_lock);
-       if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
-               gentry = 0;
-       kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
-       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
-       if (guest_initiated) {
-               kvm_mmu_access_page(vcpu, gfn);
-               if (gfn == vcpu->arch.last_pt_write_gfn
-                   && !last_updated_pte_accessed(vcpu)) {
-                       ++vcpu->arch.last_pt_write_count;
-                       if (vcpu->arch.last_pt_write_count >= 3)
-                               flooded = 1;
-               } else {
-                       vcpu->arch.last_pt_write_gfn = gfn;
-                       vcpu->arch.last_pt_write_count = 1;
-                       vcpu->arch.last_pte_updated = NULL;
-               }
-       }
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
        mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
        for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
-               pte_size = sp->role.cr4_pae ? 8 : 4;
-               misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
-               misaligned |= bytes < 4;
-               if (misaligned || flooded) {
-                       /*
-                        * Misaligned accesses are too much trouble to fix
-                        * up; also, they usually indicate a page is not used
-                        * as a page table.
-                        *
-                        * If we're seeing too many writes to a page,
-                        * it may no longer be a page table, or we may be
-                        * forking, in which case it is better to unmap the
-                        * page.
-                        */
-                       pgprintk("misaligned: gpa %llx bytes %d role %x\n",
-                                gpa, bytes, sp->role.word);
+               spte = get_written_sptes(sp, gpa, &npte);
+
+               if (detect_write_misaligned(sp, gpa, bytes) ||
+                     detect_write_flooding(sp, spte)) {
                        zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
                                                     &invalid_list);
                        ++vcpu->kvm->stat.mmu_flooded;
                        continue;
                }
-               page_offset = offset;
-               level = sp->role.level;
-               npte = 1;
-               if (!sp->role.cr4_pae) {
-                       page_offset <<= 1;      /* 32->64 */
-                       /*
-                        * A 32-bit pde maps 4MB while the shadow pdes map
-                        * only 2MB.  So we need to double the offset again
-                        * and zap two pdes instead of one.
-                        */
-                       if (level == PT32_ROOT_LEVEL) {
-                               page_offset &= ~7; /* kill rounding error */
-                               page_offset <<= 1;
-                               npte = 2;
-                       }
-                       quadrant = page_offset >> PAGE_SHIFT;
-                       page_offset &= ~PAGE_MASK;
-                       if (quadrant != sp->role.quadrant)
-                               continue;
-               }
+
+               spte = get_written_sptes(sp, gpa, &npte);
+               if (!spte)
+                       continue;
+
                local_flush = true;
-               spte = &sp->spt[page_offset / sizeof(*spte)];
                while (npte--) {
                        entry = *spte;
                        mmu_page_zap_pte(vcpu->kvm, sp, spte);
                        if (gentry &&
                              !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
-                             & mask.word))
+                             & mask.word) && rmap_can_add(vcpu))
                                mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
                        if (!remote_flush && need_remote_flush(entry, *spte))
                                remote_flush = true;
@@ -3665,7 +3693,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        }
        mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-       trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
+       kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
@@ -3679,9 +3707,8 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 
        gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
 
-       spin_lock(&vcpu->kvm->mmu_lock);
        r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-       spin_unlock(&vcpu->kvm->mmu_lock);
+
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
@@ -3702,10 +3729,18 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
+static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
+{
+       if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
+               return vcpu_match_mmio_gpa(vcpu, addr);
+
+       return vcpu_match_mmio_gva(vcpu, addr);
+}
+
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
                       void *insn, int insn_len)
 {
-       int r;
+       int r, emulation_type = EMULTYPE_RETRY;
        enum emulation_result er;
 
        r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
@@ -3717,11 +3752,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
                goto out;
        }
 
-       r = mmu_topup_memory_caches(vcpu);
-       if (r)
-               goto out;
+       if (is_mmio_page_fault(vcpu, cr2))
+               emulation_type = 0;
 
-       er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
+       er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
 
        switch (er) {
        case EMULATE_DONE:
@@ -3792,7 +3826,11 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+       vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.mmu.translate_gpa = translate_gpa;
+       vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
 
        return alloc_mmu_pages(vcpu);
 }
@@ -3852,14 +3890,14 @@ restart:
        spin_unlock(&kvm->mmu_lock);
 }
 
-static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
-                                              struct list_head *invalid_list)
+static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
+                                               struct list_head *invalid_list)
 {
        struct kvm_mmu_page *page;
 
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
-       return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
+       kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
 }
 
 static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
@@ -3874,15 +3912,15 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
        raw_spin_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               int idx, freed_pages;
+               int idx;
                LIST_HEAD(invalid_list);
 
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
                if (!kvm_freed && nr_to_scan > 0 &&
                    kvm->arch.n_used_mmu_pages > 0) {
-                       freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
-                                                         &invalid_list);
+                       kvm_mmu_remove_some_alloc_mmu_pages(kvm,
+                                                           &invalid_list);
                        kvm_freed = kvm;
                }
                nr_to_scan--;
@@ -3944,15 +3982,15 @@ nomem:
  */
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
 {
-       int i;
        unsigned int nr_mmu_pages;
        unsigned int  nr_pages = 0;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++)
-               nr_pages += slots->memslots[i].npages;
+       kvm_for_each_memslot(memslot, slots)
+               nr_pages += memslot->npages;
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
        nr_mmu_pages = max(nr_mmu_pages,
@@ -3961,127 +3999,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        return nr_mmu_pages;
 }
 
-static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
-                               unsigned len)
-{
-       if (len > buffer->len)
-               return NULL;
-       return buffer->ptr;
-}
-
-static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
-                               unsigned len)
-{
-       void *ret;
-
-       ret = pv_mmu_peek_buffer(buffer, len);
-       if (!ret)
-               return ret;
-       buffer->ptr += len;
-       buffer->len -= len;
-       buffer->processed += len;
-       return ret;
-}
-
-static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
-                            gpa_t addr, gpa_t value)
-{
-       int bytes = 8;
-       int r;
-
-       if (!is_long_mode(vcpu) && !is_pae(vcpu))
-               bytes = 4;
-
-       r = mmu_topup_memory_caches(vcpu);
-       if (r)
-               return r;
-
-       if (!emulator_write_phys(vcpu, addr, &value, bytes))
-               return -EFAULT;
-
-       return 1;
-}
-
-static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
-{
-       (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
-       return 1;
-}
-
-static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
-{
-       spin_lock(&vcpu->kvm->mmu_lock);
-       mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-       return 1;
-}
-
-static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
-                            struct kvm_pv_mmu_op_buffer *buffer)
-{
-       struct kvm_mmu_op_header *header;
-
-       header = pv_mmu_peek_buffer(buffer, sizeof *header);
-       if (!header)
-               return 0;
-       switch (header->op) {
-       case KVM_MMU_OP_WRITE_PTE: {
-               struct kvm_mmu_op_write_pte *wpte;
-
-               wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
-               if (!wpte)
-                       return 0;
-               return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
-                                       wpte->pte_val);
-       }
-       case KVM_MMU_OP_FLUSH_TLB: {
-               struct kvm_mmu_op_flush_tlb *ftlb;
-
-               ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
-               if (!ftlb)
-                       return 0;
-               return kvm_pv_mmu_flush_tlb(vcpu);
-       }
-       case KVM_MMU_OP_RELEASE_PT: {
-               struct kvm_mmu_op_release_pt *rpt;
-
-               rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
-               if (!rpt)
-                       return 0;
-               return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
-       }
-       default: return 0;
-       }
-}
-
-int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
-                 gpa_t addr, unsigned long *ret)
-{
-       int r;
-       struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
-
-       buffer->ptr = buffer->buf;
-       buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
-       buffer->processed = 0;
-
-       r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
-       if (r)
-               goto out;
-
-       while (buffer->len) {
-               r = kvm_pv_mmu_op_one(vcpu, buffer);
-               if (r < 0)
-                       goto out;
-               if (r == 0)
-                       break;
-       }
-
-       r = 1;
-out:
-       *ret = buffer->processed;
-       return r;
-}
-
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
 {
        struct kvm_shadow_walk_iterator iterator;
@@ -4110,12 +4027,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
        mmu_free_memory_caches(vcpu);
 }
 
-#ifdef CONFIG_KVM_MMU_AUDIT
-#include "mmu_audit.c"
-#else
-static void mmu_audit_disable(void) { }
-#endif
-
 void kvm_mmu_module_exit(void)
 {
        mmu_destroy_caches();
index 746ec259d02490bf7b7ce552560ff37e170d9825..fe15dcc07a6b9f9497bffc4ea2d6c4d19019c6fd 100644 (file)
 
 #include <linux/ratelimit.h>
 
+char const *audit_point_name[] = {
+       "pre page fault",
+       "post page fault",
+       "pre pte write",
+       "post pte write",
+       "pre sync",
+       "post sync"
+};
+
 #define audit_printk(kvm, fmt, args...)                \
        printk(KERN_ERR "audit: (%s) error: "   \
                fmt, audit_point_name[kvm->arch.audit_point], ##args)
@@ -224,7 +233,10 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
        mmu_spte_walk(vcpu, audit_spte);
 }
 
-static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
+static bool mmu_audit;
+static struct jump_label_key mmu_audit_key;
+
+static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 {
        static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 
@@ -236,18 +248,18 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
        audit_vcpu_spte(vcpu);
 }
 
-static bool mmu_audit;
+static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
+{
+       if (static_branch((&mmu_audit_key)))
+               __kvm_mmu_audit(vcpu, point);
+}
 
 static void mmu_audit_enable(void)
 {
-       int ret;
-
        if (mmu_audit)
                return;
 
-       ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
-       WARN_ON(ret);
-
+       jump_label_inc(&mmu_audit_key);
        mmu_audit = true;
 }
 
@@ -256,8 +268,7 @@ static void mmu_audit_disable(void)
        if (!mmu_audit)
                return;
 
-       unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
-       tracepoint_synchronize_unregister();
+       jump_label_dec(&mmu_audit_key);
        mmu_audit = false;
 }
 
index eed67f34146d66557d5f5668a1f7c37191dd50a2..89fb0e81322a96d996d1fd554ec9c1d6c55553f6 100644 (file)
@@ -243,25 +243,6 @@ TRACE_EVENT(
        TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
                  __entry->access)
 );
-
-TRACE_EVENT(
-       kvm_mmu_audit,
-       TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
-       TP_ARGS(vcpu, audit_point),
-
-       TP_STRUCT__entry(
-               __field(struct kvm_vcpu *, vcpu)
-               __field(int, audit_point)
-       ),
-
-       TP_fast_assign(
-               __entry->vcpu = vcpu;
-               __entry->audit_point = audit_point;
-       ),
-
-       TP_printk("vcpu:%d %s", __entry->vcpu->cpu,
-                 audit_point_name[__entry->audit_point])
-);
 #endif /* _TRACE_KVMMMU_H */
 
 #undef TRACE_INCLUDE_PATH
index 92994100638b26749519bc69d9c59b5a4ba10cc7..15610285ebb647075e17a1388e031128eb764329 100644 (file)
@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
             shadow_walk_next(&it)) {
                gfn_t table_gfn;
 
+               clear_sp_write_flooding_count(it.sptep);
                drop_large_spte(vcpu, it.sptep);
 
                sp = NULL;
@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
             shadow_walk_next(&it)) {
                gfn_t direct_gfn;
 
+               clear_sp_write_flooding_count(it.sptep);
                validate_direct_spte(vcpu, it.sptep, direct_access);
 
                drop_large_spte(vcpu, it.sptep);
@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                link_shadow_page(it.sptep, sp);
        }
 
+       clear_sp_write_flooding_count(it.sptep);
        mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
                     user_fault, write_fault, emulate, it.level,
                     gw->gfn, pfn, prefault, map_writable);
@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
         */
        if (!r) {
                pgprintk("%s: guest page fault\n", __func__);
-               if (!prefault) {
+               if (!prefault)
                        inject_page_fault(vcpu, &walker.fault);
-                       /* reset fork detector */
-                       vcpu->arch.last_pt_write_count = 0;
-               }
+
                return 0;
        }
 
@@ -631,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        if (mmu_notifier_retry(vcpu, mmu_seq))
                goto out_unlock;
 
-       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
        kvm_mmu_free_some_pages(vcpu);
        if (!force_pt_level)
                transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
@@ -641,11 +642,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
                 sptep, *sptep, emulate);
 
-       if (!emulate)
-               vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-
        ++vcpu->stat.pf_fixed;
-       trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
+       kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
        return emulate;
@@ -656,65 +654,66 @@ out_unlock:
        return 0;
 }
 
+static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
+{
+       int offset = 0;
+
+       WARN_ON(sp->role.level != 1);
+
+       if (PTTYPE == 32)
+               offset = sp->role.quadrant << PT64_LEVEL_BITS;
+
+       return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+}
+
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
-       gpa_t pte_gpa = -1;
        int level;
        u64 *sptep;
-       int need_flush = 0;
 
        vcpu_clear_mmio_info(vcpu, gva);
 
-       spin_lock(&vcpu->kvm->mmu_lock);
+       /*
+        * No need to check return value here, rmap_can_add() can
+        * help us to skip pte prefetch later.
+        */
+       mmu_topup_memory_caches(vcpu);
 
+       spin_lock(&vcpu->kvm->mmu_lock);
        for_each_shadow_entry(vcpu, gva, iterator) {
                level = iterator.level;
                sptep = iterator.sptep;
 
                sp = page_header(__pa(sptep));
                if (is_last_spte(*sptep, level)) {
-                       int offset, shift;
+                       pt_element_t gpte;
+                       gpa_t pte_gpa;
 
                        if (!sp->unsync)
                                break;
 
-                       shift = PAGE_SHIFT -
-                                 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
-                       offset = sp->role.quadrant << shift;
-
-                       pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+                       pte_gpa = FNAME(get_level1_sp_gpa)(sp);
                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
-                       if (is_shadow_present_pte(*sptep)) {
-                               if (is_large_pte(*sptep))
-                                       --vcpu->kvm->stat.lpages;
-                               drop_spte(vcpu->kvm, sptep);
-                               need_flush = 1;
-                       } else if (is_mmio_spte(*sptep))
-                               mmu_spte_clear_no_track(sptep);
+                       if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
+                               kvm_flush_remote_tlbs(vcpu->kvm);
 
-                       break;
+                       if (!rmap_can_add(vcpu))
+                               break;
+
+                       if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+                                                 sizeof(pt_element_t)))
+                               break;
+
+                       FNAME(update_pte)(vcpu, sp, sptep, &gpte);
                }
 
                if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
                        break;
        }
-
-       if (need_flush)
-               kvm_flush_remote_tlbs(vcpu->kvm);
-
-       atomic_inc(&vcpu->kvm->arch.invlpg_counter);
-
        spin_unlock(&vcpu->kvm->mmu_lock);
-
-       if (pte_gpa == -1)
-               return;
-
-       if (mmu_topup_memory_caches(vcpu))
-               return;
-       kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
@@ -769,19 +768,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
-       int i, offset, nr_present;
+       int i, nr_present = 0;
        bool host_writable;
        gpa_t first_pte_gpa;
 
-       offset = nr_present = 0;
-
        /* direct kvm_mmu_page can not be unsync. */
        BUG_ON(sp->role.direct);
 
-       if (PTTYPE == 32)
-               offset = sp->role.quadrant << PT64_LEVEL_BITS;
-
-       first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+       first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
                unsigned pte_access;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
new file mode 100644 (file)
index 0000000..7aad544
--- /dev/null
@@ -0,0 +1,533 @@
+/*
+ * Kernel-based Virtual Machine -- Performane Monitoring Unit support
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ *   Avi Kivity   <avi@redhat.com>
+ *   Gleb Natapov <gleb@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+
+static struct kvm_arch_event_perf_mapping {
+       u8 eventsel;
+       u8 unit_mask;
+       unsigned event_type;
+       bool inexact;
+} arch_events[] = {
+       /* Index must match CPUID 0x0A.EBX bit vector */
+       [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+       [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+       [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
+       [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
+       [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+       [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+       [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+};
+
+/* mapping between fixed pmc index and arch_events array */
+int fixed_pmc_events[] = {1, 0, 2};
+
+static bool pmc_is_gp(struct kvm_pmc *pmc)
+{
+       return pmc->type == KVM_PMC_GP;
+}
+
+static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+
+       return pmu->counter_bitmask[pmc->type];
+}
+
+static inline bool pmc_enabled(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
+}
+
+static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
+                                        u32 base)
+{
+       if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
+               return &pmu->gp_counters[msr - base];
+       return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
+{
+       int base = MSR_CORE_PERF_FIXED_CTR0;
+       if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
+               return &pmu->fixed_counters[msr - base];
+       return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
+{
+       return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
+}
+
+static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+{
+       if (idx < X86_PMC_IDX_FIXED)
+               return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
+       else
+               return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
+}
+
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.apic)
+               kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+}
+
+static void trigger_pmi(struct irq_work *irq_work)
+{
+       struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
+                       irq_work);
+       struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
+                       arch.pmu);
+
+       kvm_deliver_pmi(vcpu);
+}
+
+static void kvm_perf_overflow(struct perf_event *perf_event,
+                             struct perf_sample_data *data,
+                             struct pt_regs *regs)
+{
+       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+}
+
+static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+               struct perf_sample_data *data, struct pt_regs *regs)
+{
+       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
+               kvm_perf_overflow(perf_event, data, regs);
+               kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+               /*
+                * Inject PMI. If vcpu was in a guest mode during NMI PMI
+                * can be ejected on a guest mode re-entry. Otherwise we can't
+                * be sure that vcpu wasn't executing hlt instruction at the
+                * time of vmexit and is not going to re-enter guest mode until,
+                * woken up. So we should wake it, but this is impossible from
+                * NMI context. Do it from irq work instead.
+                */
+               if (!kvm_is_in_guest())
+                       irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+               else
+                       kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+       }
+}
+
+static u64 read_pmc(struct kvm_pmc *pmc)
+{
+       u64 counter, enabled, running;
+
+       counter = pmc->counter;
+
+       if (pmc->perf_event)
+               counter += perf_event_read_value(pmc->perf_event,
+                                                &enabled, &running);
+
+       /* FIXME: Scaling needed? */
+
+       return counter & pmc_bitmask(pmc);
+}
+
+static void stop_counter(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               pmc->counter = read_pmc(pmc);
+               perf_event_release_kernel(pmc->perf_event);
+               pmc->perf_event = NULL;
+       }
+}
+
+static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
+               unsigned config, bool exclude_user, bool exclude_kernel,
+               bool intr)
+{
+       struct perf_event *event;
+       struct perf_event_attr attr = {
+               .type = type,
+               .size = sizeof(attr),
+               .pinned = true,
+               .exclude_idle = true,
+               .exclude_host = 1,
+               .exclude_user = exclude_user,
+               .exclude_kernel = exclude_kernel,
+               .config = config,
+       };
+
+       attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
+       event = perf_event_create_kernel_counter(&attr, -1, current,
+                                                intr ? kvm_perf_overflow_intr :
+                                                kvm_perf_overflow, pmc);
+       if (IS_ERR(event)) {
+               printk_once("kvm: pmu event creation failed %ld\n",
+                               PTR_ERR(event));
+               return;
+       }
+
+       pmc->perf_event = event;
+       clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+}
+
+static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
+               u8 unit_mask)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(arch_events); i++)
+               if (arch_events[i].eventsel == event_select
+                               && arch_events[i].unit_mask == unit_mask
+                               && (pmu->available_event_types & (1 << i)))
+                       break;
+
+       if (i == ARRAY_SIZE(arch_events))
+               return PERF_COUNT_HW_MAX;
+
+       return arch_events[i].event_type;
+}
+
+static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+{
+       unsigned config, type = PERF_TYPE_RAW;
+       u8 event_select, unit_mask;
+
+       pmc->eventsel = eventsel;
+
+       stop_counter(pmc);
+
+       if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
+               return;
+
+       event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+
+       if (!(event_select & (ARCH_PERFMON_EVENTSEL_EDGE |
+                               ARCH_PERFMON_EVENTSEL_INV |
+                               ARCH_PERFMON_EVENTSEL_CMASK))) {
+               config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
+                               unit_mask);
+               if (config != PERF_COUNT_HW_MAX)
+                       type = PERF_TYPE_HARDWARE;
+       }
+
+       if (type == PERF_TYPE_RAW)
+               config = eventsel & X86_RAW_EVENT_MASK;
+
+       reprogram_counter(pmc, type, config,
+                       !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+                       !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+                       eventsel & ARCH_PERFMON_EVENTSEL_INT);
+}
+
+static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
+{
+       unsigned en = en_pmi & 0x3;
+       bool pmi = en_pmi & 0x8;
+
+       stop_counter(pmc);
+
+       if (!en || !pmc_enabled(pmc))
+               return;
+
+       reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+                       arch_events[fixed_pmc_events[idx]].event_type,
+                       !(en & 0x2), /* exclude user */
+                       !(en & 0x1), /* exclude kernel */
+                       pmi);
+}
+
+static inline u8 fixed_en_pmi(u64 ctrl, int idx)
+{
+       return (ctrl >> (idx * 4)) & 0xf;
+}
+
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+{
+       int i;
+
+       for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+               u8 en_pmi = fixed_en_pmi(data, i);
+               struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
+
+               if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
+                       continue;
+
+               reprogram_fixed_counter(pmc, en_pmi, i);
+       }
+
+       pmu->fixed_ctr_ctrl = data;
+}
+
+static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+{
+       struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
+
+       if (!pmc)
+               return;
+
+       if (pmc_is_gp(pmc))
+               reprogram_gp_counter(pmc, pmc->eventsel);
+       else {
+               int fidx = idx - X86_PMC_IDX_FIXED;
+               reprogram_fixed_counter(pmc,
+                               fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
+       }
+}
+
+static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+{
+       int bit;
+       u64 diff = pmu->global_ctrl ^ data;
+
+       pmu->global_ctrl = data;
+
+       for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
+               reprogram_idx(pmu, bit);
+}
+
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       int ret;
+
+       switch (msr) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               ret = pmu->version > 1;
+               break;
+       default:
+               ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
+                       || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
+                       || get_fixed_pmc(pmu, msr);
+               break;
+       }
+       return ret;
+}
+
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       switch (index) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+               *data = pmu->fixed_ctr_ctrl;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               *data = pmu->global_status;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               *data = pmu->global_ctrl;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               *data = pmu->global_ovf_ctrl;
+               return 0;
+       default:
+               if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+                               (pmc = get_fixed_pmc(pmu, index))) {
+                       *data = read_pmc(pmc);
+                       return 0;
+               } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+                       *data = pmc->eventsel;
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       switch (index) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+               if (pmu->fixed_ctr_ctrl == data)
+                       return 0;
+               if (!(data & 0xfffffffffffff444)) {
+                       reprogram_fixed_counters(pmu, data);
+                       return 0;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               break; /* RO MSR */
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (pmu->global_ctrl == data)
+                       return 0;
+               if (!(data & pmu->global_ctrl_mask)) {
+                       global_ctrl_changed(pmu, data);
+                       return 0;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
+                       pmu->global_status &= ~data;
+                       pmu->global_ovf_ctrl = data;
+                       return 0;
+               }
+               break;
+       default:
+               if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+                               (pmc = get_fixed_pmc(pmu, index))) {
+                       data = (s64)(s32)data;
+                       pmc->counter += data - read_pmc(pmc);
+                       return 0;
+               } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+                       if (data == pmc->eventsel)
+                               return 0;
+                       if (!(data & 0xffffffff00200000ull)) {
+                               reprogram_gp_counter(pmc, data);
+                               return 0;
+                       }
+               }
+       }
+       return 1;
+}
+
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       bool fast_mode = pmc & (1u << 31);
+       bool fixed = pmc & (1u << 30);
+       struct kvm_pmc *counters;
+       u64 ctr;
+
+       pmc &= (3u << 30) - 1;
+       if (!fixed && pmc >= pmu->nr_arch_gp_counters)
+               return 1;
+       if (fixed && pmc >= pmu->nr_arch_fixed_counters)
+               return 1;
+       counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
+       ctr = read_pmc(&counters[pmc]);
+       if (fast_mode)
+               ctr = (u32)ctr;
+       *data = ctr;
+
+       return 0;
+}
+
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_cpuid_entry2 *entry;
+       unsigned bitmap_len;
+
+       pmu->nr_arch_gp_counters = 0;
+       pmu->nr_arch_fixed_counters = 0;
+       pmu->counter_bitmask[KVM_PMC_GP] = 0;
+       pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+       pmu->version = 0;
+
+       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       if (!entry)
+               return;
+
+       pmu->version = entry->eax & 0xff;
+       if (!pmu->version)
+               return;
+
+       pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
+                       X86_PMC_MAX_GENERIC);
+       pmu->counter_bitmask[KVM_PMC_GP] =
+               ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
+       bitmap_len = (entry->eax >> 24) & 0xff;
+       pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
+
+       if (pmu->version == 1) {
+               pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
+               return;
+       }
+
+       pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
+                       X86_PMC_MAX_FIXED);
+       pmu->counter_bitmask[KVM_PMC_FIXED] =
+               ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
+       pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
+                       | (((1ull << pmu->nr_arch_fixed_counters) - 1)
+                               << X86_PMC_IDX_FIXED));
+}
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+       memset(pmu, 0, sizeof(*pmu));
+       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+               pmu->gp_counters[i].type = KVM_PMC_GP;
+               pmu->gp_counters[i].vcpu = vcpu;
+               pmu->gp_counters[i].idx = i;
+       }
+       for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
+               pmu->fixed_counters[i].type = KVM_PMC_FIXED;
+               pmu->fixed_counters[i].vcpu = vcpu;
+               pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
+       }
+       init_irq_work(&pmu->irq_work, trigger_pmi);
+       kvm_pmu_cpuid_update(vcpu);
+}
+
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       int i;
+
+       irq_work_sync(&pmu->irq_work);
+       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+               struct kvm_pmc *pmc = &pmu->gp_counters[i];
+               stop_counter(pmc);
+               pmc->counter = pmc->eventsel = 0;
+       }
+
+       for (i = 0; i < X86_PMC_MAX_FIXED; i++)
+               stop_counter(&pmu->fixed_counters[i]);
+
+       pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
+               pmu->global_ovf_ctrl = 0;
+}
+
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_pmu_reset(vcpu);
+}
+
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       u64 bitmask;
+       int bit;
+
+       bitmask = pmu->reprogram_pmi;
+
+       for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+               struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
+
+               if (unlikely(!pmc || !pmc->perf_event)) {
+                       clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+                       continue;
+               }
+
+               reprogram_idx(pmu, bit);
+       }
+}
index e32243eac2f48874ac494f5f8a27ed7fb605ebc4..5fa553babe566876d70115d1d259bd2d1a4925a6 100644 (file)
@@ -1014,6 +1014,7 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_intercept(svm, INTERCEPT_NMI);
        set_intercept(svm, INTERCEPT_SMI);
        set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+       set_intercept(svm, INTERCEPT_RDPMC);
        set_intercept(svm, INTERCEPT_CPUID);
        set_intercept(svm, INTERCEPT_INVD);
        set_intercept(svm, INTERCEPT_HLT);
@@ -2770,6 +2771,19 @@ static int emulate_on_interception(struct vcpu_svm *svm)
        return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
+static int rdpmc_interception(struct vcpu_svm *svm)
+{
+       int err;
+
+       if (!static_cpu_has(X86_FEATURE_NRIPS))
+               return emulate_on_interception(svm);
+
+       err = kvm_rdpmc(&svm->vcpu);
+       kvm_complete_insn_gp(&svm->vcpu, err);
+
+       return 1;
+}
+
 bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
 {
        unsigned long cr0 = svm->vcpu.arch.cr0;
@@ -3190,6 +3204,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_SMI]                          = nop_on_interception,
        [SVM_EXIT_INIT]                         = nop_on_interception,
        [SVM_EXIT_VINTR]                        = interrupt_window_interception,
+       [SVM_EXIT_RDPMC]                        = rdpmc_interception,
        [SVM_EXIT_CPUID]                        = cpuid_interception,
        [SVM_EXIT_IRET]                         = iret_interception,
        [SVM_EXIT_INVD]                         = emulate_on_interception,
index ae432ea1cd835731f26451dd22f9ff53b7b85eb6..6b85cc647f346fed962e4bb5b870fdefc2cbd8cf 100644 (file)
 #include <linux/atomic.h>
 #include "kvm_timer.h"
 
-static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
+enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
 {
-       int restart_timer = 0;
+       struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
+       struct kvm_vcpu *vcpu = ktimer->vcpu;
        wait_queue_head_t *q = &vcpu->wq;
 
        /*
@@ -40,26 +41,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
 
        if (ktimer->t_ops->is_periodic(ktimer)) {
                hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
-               restart_timer = 1;
-       }
-
-       return restart_timer;
-}
-
-enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
-{
-       int restart_timer;
-       struct kvm_vcpu *vcpu;
-       struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
-
-       vcpu = ktimer->vcpu;
-       if (!vcpu)
-               return HRTIMER_NORESTART;
-
-       restart_timer = __kvm_timer_fn(vcpu, ktimer);
-       if (restart_timer)
                return HRTIMER_RESTART;
-       else
+       else
                return HRTIMER_NORESTART;
 }
-
index 579a0b51696ac560b1768ae445bb8f2084172f8c..906a7e84200f7b96eedbe5647d613a9a5d375546 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "irq.h"
 #include "mmu.h"
+#include "cpuid.h"
 
 #include <linux/kvm_host.h>
 #include <linux/module.h>
@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
        int save_nmsrs, index;
        unsigned long *msr_bitmap;
 
-       vmx_load_host_state(vmx);
        save_nmsrs = 0;
 #ifdef CONFIG_X86_64
        if (is_long_mode(&vmx->vcpu)) {
@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
 #endif
                CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
                CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+               CPU_BASED_RDPMC_EXITING |
                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        /*
         * We can allow some features even when not supported by the
@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                        return 1;
                /* Otherwise falls through */
        default:
-               vmx_load_host_state(to_vmx(vcpu));
                if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
                        return 0;
                msr = find_msr_entry(to_vmx(vcpu), msr_index);
                if (msr) {
-                       vmx_load_host_state(to_vmx(vcpu));
                        data = msr->data;
                        break;
                }
@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 
        switch (msr_index) {
        case MSR_EFER:
-               vmx_load_host_state(vmx);
                ret = kvm_set_msr_common(vcpu, msr_index, data);
                break;
 #ifdef CONFIG_X86_64
@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                        break;
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
-                       vmx_load_host_state(vmx);
                        msr->data = data;
                        break;
                }
@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
              CPU_BASED_USE_TSC_OFFSETING |
              CPU_BASED_MWAIT_EXITING |
              CPU_BASED_MONITOR_EXITING |
-             CPU_BASED_INVLPG_EXITING;
+             CPU_BASED_INVLPG_EXITING |
+             CPU_BASED_RDPMC_EXITING;
 
        if (yield_on_hlt)
                min |= CPU_BASED_HLT_EXITING;
@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm)
 {
        if (!kvm->arch.tss_addr) {
                struct kvm_memslots *slots;
+               struct kvm_memory_slot *slot;
                gfn_t base_gfn;
 
                slots = kvm_memslots(kvm);
-               base_gfn = slots->memslots[0].base_gfn +
-                                kvm->memslots->memslots[0].npages - 3;
+               slot = id_to_memslot(slots, 0);
+               base_gfn = slot->base_gfn + slot->npages - 3;
+
                return base_gfn << PAGE_SHIFT;
        }
        return kvm->arch.tss_addr;
@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
 static void enable_irq_window(struct kvm_vcpu *vcpu)
 {
        u32 cpu_based_vm_exec_control;
-       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
-               /* We can get here when nested_run_pending caused
-                * vmx_interrupt_allowed() to return false. In this case, do
-                * nothing - the interrupt will be injected later.
+       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+               /*
+                * We get here if vmx_interrupt_allowed() said we can't
+                * inject to L1 now because L2 must run. Ask L2 to exit
+                * right after entry, so we can inject to L1 more promptly.
                 */
+               kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
                return;
+       }
 
        cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
        cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
-               struct vmcs12 *vmcs12;
-               if (to_vmx(vcpu)->nested.nested_run_pending)
+               struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+               if (to_vmx(vcpu)->nested.nested_run_pending ||
+                   (vmcs12->idt_vectoring_info_field &
+                    VECTORING_INFO_VALID_MASK))
                        return 0;
                nested_vmx_vmexit(vcpu);
-               vmcs12 = get_vmcs12(vcpu);
                vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
                vmcs12->vm_exit_intr_info = 0;
                /* fall through to normal code, but now in L1, not L2 */
@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int handle_rdpmc(struct kvm_vcpu *vcpu)
+{
+       int err;
+
+       err = kvm_rdpmc(vcpu);
+       kvm_complete_insn_gp(vcpu, err);
+
+       return 1;
+}
+
 static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
        skip_emulated_instruction(vcpu);
@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_HLT]                     = handle_halt,
        [EXIT_REASON_INVD]                    = handle_invd,
        [EXIT_REASON_INVLPG]                  = handle_invlpg,
+       [EXIT_REASON_RDPMC]                   = handle_rdpmc,
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
        [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
        [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
index 4c938da2ba00b40dc430d5a7854b0ee62b6ce631..1171def5f96b76a1119709e706b977be872f8082 100644 (file)
@@ -26,6 +26,7 @@
 #include "tss.h"
 #include "kvm_cache_regs.h"
 #include "x86.h"
+#include "cpuid.h"
 
 #include <linux/clocksource.h>
 #include <linux/interrupt.h>
@@ -82,8 +83,6 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
-static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-                                   struct kvm_cpuid_entry2 __user *entries);
 static void process_nmi(struct kvm_vcpu *vcpu);
 
 struct kvm_x86_ops *kvm_x86_ops;
@@ -574,54 +573,6 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
-static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       return best && (best->ecx & bit(X86_FEATURE_XSAVE));
-}
-
-static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_SMEP));
-}
-
-static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
-}
-
-static void update_cpuid(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-       struct kvm_lapic *apic = vcpu->arch.apic;
-
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       if (!best)
-               return;
-
-       /* Update OSXSAVE bit */
-       if (cpu_has_xsave && best->function == 0x1) {
-               best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
-                       best->ecx |= bit(X86_FEATURE_OSXSAVE);
-       }
-
-       if (apic) {
-               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
-                       apic->lapic_timer.timer_mode_mask = 3 << 17;
-               else
-                       apic->lapic_timer.timer_mode_mask = 1 << 17;
-       }
-}
-
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        unsigned long old_cr4 = kvm_read_cr4(vcpu);
@@ -655,7 +606,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                kvm_mmu_reset_context(vcpu);
 
        if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
-               update_cpuid(vcpu);
+               kvm_update_cpuid(vcpu);
 
        return 0;
 }
@@ -809,6 +760,21 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
 }
 EXPORT_SYMBOL_GPL(kvm_get_dr);
 
+bool kvm_rdpmc(struct kvm_vcpu *vcpu)
+{
+       u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       u64 data;
+       int err;
+
+       err = kvm_pmu_read_pmc(vcpu, ecx, &data);
+       if (err)
+               return err;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
+       return err;
+}
+EXPORT_SYMBOL_GPL(kvm_rdpmc);
+
 /*
  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -1358,12 +1324,11 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
        if (page_num >= blob_size)
                goto out;
        r = -ENOMEM;
-       page = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!page)
+       page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
+       if (IS_ERR(page)) {
+               r = PTR_ERR(page);
                goto out;
-       r = -EFAULT;
-       if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
-               goto out_free;
+       }
        if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
                goto out_free;
        r = 0;
@@ -1652,8 +1617,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
         * which we perfectly emulate ;-). Any other value should be at least
         * reported, some guests depend on them.
         */
-       case MSR_P6_EVNTSEL0:
-       case MSR_P6_EVNTSEL1:
        case MSR_K7_EVNTSEL0:
        case MSR_K7_EVNTSEL1:
        case MSR_K7_EVNTSEL2:
@@ -1665,8 +1628,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        /* at least RHEL 4 unconditionally writes to the perfctr registers,
         * so we ignore writes to make it happy.
         */
-       case MSR_P6_PERFCTR0:
-       case MSR_P6_PERFCTR1:
        case MSR_K7_PERFCTR0:
        case MSR_K7_PERFCTR1:
        case MSR_K7_PERFCTR2:
@@ -1703,6 +1664,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_set_msr(vcpu, msr, data);
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
                                msr, data);
@@ -1865,10 +1828,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_K8_SYSCFG:
        case MSR_K7_HWCR:
        case MSR_VM_HSAVE_PA:
-       case MSR_P6_PERFCTR0:
-       case MSR_P6_PERFCTR1:
-       case MSR_P6_EVNTSEL0:
-       case MSR_P6_EVNTSEL1:
        case MSR_K7_EVNTSEL0:
        case MSR_K7_PERFCTR0:
        case MSR_K8_INT_PENDING_MSG:
@@ -1979,6 +1938,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data = 0xbe702111;
                break;
        default:
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_get_msr(vcpu, msr, pdata);
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
                        return 1;
@@ -2037,15 +1998,12 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
        if (msrs.nmsrs >= MAX_IO_MSRS)
                goto out;
 
-       r = -ENOMEM;
        size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
-       entries = kmalloc(size, GFP_KERNEL);
-       if (!entries)
+       entries = memdup_user(user_msrs->entries, size);
+       if (IS_ERR(entries)) {
+               r = PTR_ERR(entries);
                goto out;
-
-       r = -EFAULT;
-       if (copy_from_user(entries, user_msrs->entries, size))
-               goto out_free;
+       }
 
        r = n = __msr_io(vcpu, &msrs, entries, do_msr);
        if (r < 0)
@@ -2265,466 +2223,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
 }
 
-static int is_efer_nx(void)
-{
-       unsigned long long efer = 0;
-
-       rdmsrl_safe(MSR_EFER, &efer);
-       return efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct kvm_cpuid_entry2 *e, *entry;
-
-       entry = NULL;
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               e = &vcpu->arch.cpuid_entries[i];
-               if (e->function == 0x80000001) {
-                       entry = e;
-                       break;
-               }
-       }
-       if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
-               entry->edx &= ~(1 << 20);
-               printk(KERN_INFO "kvm: guest NX capability removed\n");
-       }
-}
-
-/* when an old userspace process fills a new kernel module */
-static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
-                                   struct kvm_cpuid *cpuid,
-                                   struct kvm_cpuid_entry __user *entries)
-{
-       int r, i;
-       struct kvm_cpuid_entry *cpuid_entries;
-
-       r = -E2BIG;
-       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-               goto out;
-       r = -ENOMEM;
-       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
-       if (!cpuid_entries)
-               goto out;
-       r = -EFAULT;
-       if (copy_from_user(cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-               goto out_free;
-       for (i = 0; i < cpuid->nent; i++) {
-               vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
-               vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
-               vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
-               vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
-               vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
-               vcpu->arch.cpuid_entries[i].index = 0;
-               vcpu->arch.cpuid_entries[i].flags = 0;
-               vcpu->arch.cpuid_entries[i].padding[0] = 0;
-               vcpu->arch.cpuid_entries[i].padding[1] = 0;
-               vcpu->arch.cpuid_entries[i].padding[2] = 0;
-       }
-       vcpu->arch.cpuid_nent = cpuid->nent;
-       cpuid_fix_nx_cap(vcpu);
-       r = 0;
-       kvm_apic_set_version(vcpu);
-       kvm_x86_ops->cpuid_update(vcpu);
-       update_cpuid(vcpu);
-
-out_free:
-       vfree(cpuid_entries);
-out:
-       return r;
-}
-
-static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
-                                    struct kvm_cpuid2 *cpuid,
-                                    struct kvm_cpuid_entry2 __user *entries)
-{
-       int r;
-
-       r = -E2BIG;
-       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-               goto out;
-       r = -EFAULT;
-       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
-               goto out;
-       vcpu->arch.cpuid_nent = cpuid->nent;
-       kvm_apic_set_version(vcpu);
-       kvm_x86_ops->cpuid_update(vcpu);
-       update_cpuid(vcpu);
-       return 0;
-
-out:
-       return r;
-}
-
-static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
-                                    struct kvm_cpuid2 *cpuid,
-                                    struct kvm_cpuid_entry2 __user *entries)
-{
-       int r;
-
-       r = -E2BIG;
-       if (cpuid->nent < vcpu->arch.cpuid_nent)
-               goto out;
-       r = -EFAULT;
-       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
-                        vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
-               goto out;
-       return 0;
-
-out:
-       cpuid->nent = vcpu->arch.cpuid_nent;
-       return r;
-}
-
-static void cpuid_mask(u32 *word, int wordnum)
-{
-       *word &= boot_cpu_data.x86_capability[wordnum];
-}
-
-static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
-                          u32 index)
-{
-       entry->function = function;
-       entry->index = index;
-       cpuid_count(entry->function, entry->index,
-                   &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
-       entry->flags = 0;
-}
-
-static bool supported_xcr0_bit(unsigned bit)
-{
-       u64 mask = ((u64)1 << bit);
-
-       return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
-}
-
-#define F(x) bit(X86_FEATURE_##x)
-
-static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
-                        u32 index, int *nent, int maxnent)
-{
-       unsigned f_nx = is_efer_nx() ? F(NX) : 0;
-#ifdef CONFIG_X86_64
-       unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
-                               ? F(GBPAGES) : 0;
-       unsigned f_lm = F(LM);
-#else
-       unsigned f_gbpages = 0;
-       unsigned f_lm = 0;
-#endif
-       unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
-
-       /* cpuid 1.edx */
-       const u32 kvm_supported_word0_x86_features =
-               F(FPU) | F(VME) | F(DE) | F(PSE) |
-               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
-               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
-               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
-               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
-               0 /* Reserved, DS, ACPI */ | F(MMX) |
-               F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
-               0 /* HTT, TM, Reserved, PBE */;
-       /* cpuid 0x80000001.edx */
-       const u32 kvm_supported_word1_x86_features =
-               F(FPU) | F(VME) | F(DE) | F(PSE) |
-               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
-               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
-               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
-               F(PAT) | F(PSE36) | 0 /* Reserved */ |
-               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
-               F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
-               0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
-       /* cpuid 1.ecx */
-       const u32 kvm_supported_word4_x86_features =
-               F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
-               0 /* DS-CPL, VMX, SMX, EST */ |
-               0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
-               0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
-               0 /* Reserved, DCA */ | F(XMM4_1) |
-               F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
-               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
-               F(F16C) | F(RDRAND);
-       /* cpuid 0x80000001.ecx */
-       const u32 kvm_supported_word6_x86_features =
-               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
-               F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
-               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
-               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
-
-       /* cpuid 0xC0000001.edx */
-       const u32 kvm_supported_word5_x86_features =
-               F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
-               F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
-               F(PMM) | F(PMM_EN);
-
-       /* cpuid 7.0.ebx */
-       const u32 kvm_supported_word9_x86_features =
-               F(SMEP) | F(FSGSBASE) | F(ERMS);
-
-       /* all calls to cpuid_count() should be made on the same cpu */
-       get_cpu();
-       do_cpuid_1_ent(entry, function, index);
-       ++*nent;
-
-       switch (function) {
-       case 0:
-               entry->eax = min(entry->eax, (u32)0xd);
-               break;
-       case 1:
-               entry->edx &= kvm_supported_word0_x86_features;
-               cpuid_mask(&entry->edx, 0);
-               entry->ecx &= kvm_supported_word4_x86_features;
-               cpuid_mask(&entry->ecx, 4);
-               /* we support x2apic emulation even if host does not support
-                * it since we emulate x2apic in software */
-               entry->ecx |= F(X2APIC);
-               break;
-       /* function 2 entries are STATEFUL. That is, repeated cpuid commands
-        * may return different values. This forces us to get_cpu() before
-        * issuing the first command, and also to emulate this annoying behavior
-        * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
-       case 2: {
-               int t, times = entry->eax & 0xff;
-
-               entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
-               entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-               for (t = 1; t < times && *nent < maxnent; ++t) {
-                       do_cpuid_1_ent(&entry[t], function, 0);
-                       entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
-                       ++*nent;
-               }
-               break;
-       }
-       /* function 4 has additional index. */
-       case 4: {
-               int i, cache_type;
-
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               /* read more entries until cache_type is zero */
-               for (i = 1; *nent < maxnent; ++i) {
-                       cache_type = entry[i - 1].eax & 0x1f;
-                       if (!cache_type)
-                               break;
-                       do_cpuid_1_ent(&entry[i], function, i);
-                       entry[i].flags |=
-                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-                       ++*nent;
-               }
-               break;
-       }
-       case 7: {
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               /* Mask ebx against host capbability word 9 */
-               if (index == 0) {
-                       entry->ebx &= kvm_supported_word9_x86_features;
-                       cpuid_mask(&entry->ebx, 9);
-               } else
-                       entry->ebx = 0;
-               entry->eax = 0;
-               entry->ecx = 0;
-               entry->edx = 0;
-               break;
-       }
-       case 9:
-               break;
-       /* function 0xb has additional index. */
-       case 0xb: {
-               int i, level_type;
-
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               /* read more entries until level_type is zero */
-               for (i = 1; *nent < maxnent; ++i) {
-                       level_type = entry[i - 1].ecx & 0xff00;
-                       if (!level_type)
-                               break;
-                       do_cpuid_1_ent(&entry[i], function, i);
-                       entry[i].flags |=
-                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-                       ++*nent;
-               }
-               break;
-       }
-       case 0xd: {
-               int idx, i;
-
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
-                       do_cpuid_1_ent(&entry[i], function, idx);
-                       if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
-                               continue;
-                       entry[i].flags |=
-                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-                       ++*nent;
-                       ++i;
-               }
-               break;
-       }
-       case KVM_CPUID_SIGNATURE: {
-               char signature[12] = "KVMKVMKVM\0\0";
-               u32 *sigptr = (u32 *)signature;
-               entry->eax = 0;
-               entry->ebx = sigptr[0];
-               entry->ecx = sigptr[1];
-               entry->edx = sigptr[2];
-               break;
-       }
-       case KVM_CPUID_FEATURES:
-               entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
-                            (1 << KVM_FEATURE_NOP_IO_DELAY) |
-                            (1 << KVM_FEATURE_CLOCKSOURCE2) |
-                            (1 << KVM_FEATURE_ASYNC_PF) |
-                            (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
-
-               if (sched_info_on())
-                       entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
-
-               entry->ebx = 0;
-               entry->ecx = 0;
-               entry->edx = 0;
-               break;
-       case 0x80000000:
-               entry->eax = min(entry->eax, 0x8000001a);
-               break;
-       case 0x80000001:
-               entry->edx &= kvm_supported_word1_x86_features;
-               cpuid_mask(&entry->edx, 1);
-               entry->ecx &= kvm_supported_word6_x86_features;
-               cpuid_mask(&entry->ecx, 6);
-               break;
-       case 0x80000008: {
-               unsigned g_phys_as = (entry->eax >> 16) & 0xff;
-               unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
-               unsigned phys_as = entry->eax & 0xff;
-
-               if (!g_phys_as)
-                       g_phys_as = phys_as;
-               entry->eax = g_phys_as | (virt_as << 8);
-               entry->ebx = entry->edx = 0;
-               break;
-       }
-       case 0x80000019:
-               entry->ecx = entry->edx = 0;
-               break;
-       case 0x8000001a:
-               break;
-       case 0x8000001d:
-               break;
-       /*Add support for Centaur's CPUID instruction*/
-       case 0xC0000000:
-               /*Just support up to 0xC0000004 now*/
-               entry->eax = min(entry->eax, 0xC0000004);
-               break;
-       case 0xC0000001:
-               entry->edx &= kvm_supported_word5_x86_features;
-               cpuid_mask(&entry->edx, 5);
-               break;
-       case 3: /* Processor serial number */
-       case 5: /* MONITOR/MWAIT */
-       case 6: /* Thermal management */
-       case 0xA: /* Architectural Performance Monitoring */
-       case 0x80000007: /* Advanced power management */
-       case 0xC0000002:
-       case 0xC0000003:
-       case 0xC0000004:
-       default:
-               entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
-               break;
-       }
-
-       kvm_x86_ops->set_supported_cpuid(function, entry);
-
-       put_cpu();
-}
-
-#undef F
-
-static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-                                    struct kvm_cpuid_entry2 __user *entries)
-{
-       struct kvm_cpuid_entry2 *cpuid_entries;
-       int limit, nent = 0, r = -E2BIG;
-       u32 func;
-
-       if (cpuid->nent < 1)
-               goto out;
-       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-               cpuid->nent = KVM_MAX_CPUID_ENTRIES;
-       r = -ENOMEM;
-       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
-       if (!cpuid_entries)
-               goto out;
-
-       do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
-       limit = cpuid_entries[0].eax;
-       for (func = 1; func <= limit && nent < cpuid->nent; ++func)
-               do_cpuid_ent(&cpuid_entries[nent], func, 0,
-                            &nent, cpuid->nent);
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
-       limit = cpuid_entries[nent - 1].eax;
-       for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
-               do_cpuid_ent(&cpuid_entries[nent], func, 0,
-                            &nent, cpuid->nent);
-
-
-
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       /* Add support for Centaur's CPUID instruction. */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
-               do_cpuid_ent(&cpuid_entries[nent], 0xC0000000, 0,
-                               &nent, cpuid->nent);
-
-               r = -E2BIG;
-               if (nent >= cpuid->nent)
-                       goto out_free;
-
-               limit = cpuid_entries[nent - 1].eax;
-               for (func = 0xC0000001;
-                       func <= limit && nent < cpuid->nent; ++func)
-                       do_cpuid_ent(&cpuid_entries[nent], func, 0,
-                                       &nent, cpuid->nent);
-
-               r = -E2BIG;
-               if (nent >= cpuid->nent)
-                       goto out_free;
-       }
-
-       do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
-                    cpuid->nent);
-
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
-                    cpuid->nent);
-
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       r = -EFAULT;
-       if (copy_to_user(entries, cpuid_entries,
-                        nent * sizeof(struct kvm_cpuid_entry2)))
-               goto out_free;
-       cpuid->nent = nent;
-       r = 0;
-
-out_free:
-       vfree(cpuid_entries);
-out:
-       return r;
-}
-
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
@@ -3042,13 +2540,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EINVAL;
                if (!vcpu->arch.apic)
                        goto out;
-               u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!u.lapic)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
+               u.lapic = memdup_user(argp, sizeof(*u.lapic));
+               if (IS_ERR(u.lapic)) {
+                       r = PTR_ERR(u.lapic);
                        goto out;
+               }
+
                r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
                if (r)
                        goto out;
@@ -3227,14 +2724,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                break;
        }
        case KVM_SET_XSAVE: {
-               u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!u.xsave)
-                       break;
-
-               r = -EFAULT;
-               if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
-                       break;
+               u.xsave = memdup_user(argp, sizeof(*u.xsave));
+               if (IS_ERR(u.xsave)) {
+                       r = PTR_ERR(u.xsave);
+                       goto out;
+               }
 
                r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
                break;
@@ -3255,15 +2749,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                break;
        }
        case KVM_SET_XCRS: {
-               u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!u.xcrs)
-                       break;
-
-               r = -EFAULT;
-               if (copy_from_user(u.xcrs, argp,
-                                  sizeof(struct kvm_xcrs)))
-                       break;
+               u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
+               if (IS_ERR(u.xcrs)) {
+                       r = PTR_ERR(u.xcrs);
+                       goto out;
+               }
 
                r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
                break;
@@ -3460,16 +2950,59 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
        return 0;
 }
 
+/**
+ * write_protect_slot - write protect a slot for dirty logging
+ * @kvm: the kvm instance
+ * @memslot: the slot we protect
+ * @dirty_bitmap: the bitmap indicating which pages are dirty
+ * @nr_dirty_pages: the number of dirty pages
+ *
+ * We have two ways to find all sptes to protect:
+ * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
+ *    checks ones that have a spte mapping a page in the slot.
+ * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
+ *
+ * Generally speaking, if there are not so many dirty pages compared to the
+ * number of shadow pages, we should use the latter.
+ *
+ * Note that letting others write into a page marked dirty in the old bitmap
+ * by using the remaining tlb entry is not a problem.  That page will become
+ * write protected again when we flush the tlb and then be reported dirty to
+ * the user space by copying the old bitmap.
+ */
+static void write_protect_slot(struct kvm *kvm,
+                              struct kvm_memory_slot *memslot,
+                              unsigned long *dirty_bitmap,
+                              unsigned long nr_dirty_pages)
+{
+       /* Not many dirty pages compared to # of shadow pages. */
+       if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
+               unsigned long gfn_offset;
+
+               for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
+                       unsigned long gfn = memslot->base_gfn + gfn_offset;
+
+                       spin_lock(&kvm->mmu_lock);
+                       kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
+                       spin_unlock(&kvm->mmu_lock);
+               }
+               kvm_flush_remote_tlbs(kvm);
+       } else {
+               spin_lock(&kvm->mmu_lock);
+               kvm_mmu_slot_remove_write_access(kvm, memslot->id);
+               spin_unlock(&kvm->mmu_lock);
+       }
+}
+
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                      struct kvm_dirty_log *log)
 {
-       int r, i;
+       int r;
        struct kvm_memory_slot *memslot;
-       unsigned long n;
-       unsigned long is_dirty = 0;
+       unsigned long n, nr_dirty_pages;
 
        mutex_lock(&kvm->slots_lock);
 
@@ -3477,43 +3010,41 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (log->slot >= KVM_MEMORY_SLOTS)
                goto out;
 
-       memslot = &kvm->memslots->memslots[log->slot];
+       memslot = id_to_memslot(kvm->memslots, log->slot);
        r = -ENOENT;
        if (!memslot->dirty_bitmap)
                goto out;
 
        n = kvm_dirty_bitmap_bytes(memslot);
-
-       for (i = 0; !is_dirty && i < n/sizeof(long); i++)
-               is_dirty = memslot->dirty_bitmap[i];
+       nr_dirty_pages = memslot->nr_dirty_pages;
 
        /* If nothing is dirty, don't bother messing with page tables. */
-       if (is_dirty) {
+       if (nr_dirty_pages) {
                struct kvm_memslots *slots, *old_slots;
-               unsigned long *dirty_bitmap;
+               unsigned long *dirty_bitmap, *dirty_bitmap_head;
 
-               dirty_bitmap = memslot->dirty_bitmap_head;
-               if (memslot->dirty_bitmap == dirty_bitmap)
-                       dirty_bitmap += n / sizeof(long);
-               memset(dirty_bitmap, 0, n);
+               dirty_bitmap = memslot->dirty_bitmap;
+               dirty_bitmap_head = memslot->dirty_bitmap_head;
+               if (dirty_bitmap == dirty_bitmap_head)
+                       dirty_bitmap_head += n / sizeof(long);
+               memset(dirty_bitmap_head, 0, n);
 
                r = -ENOMEM;
-               slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+               slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
                if (!slots)
                        goto out;
-               memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
-               slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
-               slots->generation++;
+
+               memslot = id_to_memslot(slots, log->slot);
+               memslot->nr_dirty_pages = 0;
+               memslot->dirty_bitmap = dirty_bitmap_head;
+               update_memslots(slots, NULL);
 
                old_slots = kvm->memslots;
                rcu_assign_pointer(kvm->memslots, slots);
                synchronize_srcu_expedited(&kvm->srcu);
-               dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
                kfree(old_slots);
 
-               spin_lock(&kvm->mmu_lock);
-               kvm_mmu_slot_remove_write_access(kvm, log->slot);
-               spin_unlock(&kvm->mmu_lock);
+               write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
 
                r = -EFAULT;
                if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
@@ -3658,14 +3189,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
        case KVM_GET_IRQCHIP: {
                /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-               struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
+               struct kvm_irqchip *chip;
 
-               r = -ENOMEM;
-               if (!chip)
+               chip = memdup_user(argp, sizeof(*chip));
+               if (IS_ERR(chip)) {
+                       r = PTR_ERR(chip);
                        goto out;
-               r = -EFAULT;
-               if (copy_from_user(chip, argp, sizeof *chip))
-                       goto get_irqchip_out;
+               }
+
                r = -ENXIO;
                if (!irqchip_in_kernel(kvm))
                        goto get_irqchip_out;
@@ -3684,14 +3215,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
        case KVM_SET_IRQCHIP: {
                /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-               struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
+               struct kvm_irqchip *chip;
 
-               r = -ENOMEM;
-               if (!chip)
+               chip = memdup_user(argp, sizeof(*chip));
+               if (IS_ERR(chip)) {
+                       r = PTR_ERR(chip);
                        goto out;
-               r = -EFAULT;
-               if (copy_from_user(chip, argp, sizeof *chip))
-                       goto set_irqchip_out;
+               }
+
                r = -ENXIO;
                if (!irqchip_in_kernel(kvm))
                        goto set_irqchip_out;
@@ -3898,12 +3429,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
        kvm_x86_ops->get_segment(vcpu, var, seg);
 }
 
-static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
-{
-       return gpa;
-}
-
-static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 {
        gpa_t t_gpa;
        struct x86_exception exception;
@@ -4087,7 +3613,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
        ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
        if (ret < 0)
                return 0;
-       kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
+       kvm_mmu_pte_write(vcpu, gpa, val, bytes);
        return 1;
 }
 
@@ -4324,7 +3850,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        if (!exchanged)
                return X86EMUL_CMPXCHG_FAILED;
 
-       kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
+       kvm_mmu_pte_write(vcpu, gpa, new, bytes);
 
        return X86EMUL_CONTINUE;
 
@@ -4349,32 +3875,24 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
        return r;
 }
 
-
-static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
-                                   int size, unsigned short port, void *val,
-                                   unsigned int count)
+static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
+                              unsigned short port, void *val,
+                              unsigned int count, bool in)
 {
-       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-
-       if (vcpu->arch.pio.count)
-               goto data_avail;
-
-       trace_kvm_pio(0, port, size, count);
+       trace_kvm_pio(!in, port, size, count);
 
        vcpu->arch.pio.port = port;
-       vcpu->arch.pio.in = 1;
+       vcpu->arch.pio.in = in;
        vcpu->arch.pio.count  = count;
        vcpu->arch.pio.size = size;
 
        if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
-       data_avail:
-               memcpy(val, vcpu->arch.pio_data, size * count);
                vcpu->arch.pio.count = 0;
                return 1;
        }
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
-       vcpu->run->io.direction = KVM_EXIT_IO_IN;
+       vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
        vcpu->run->io.size = size;
        vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
        vcpu->run->io.count = count;
@@ -4383,36 +3901,37 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
        return 0;
 }
 
-static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
-                                    int size, unsigned short port,
-                                    const void *val, unsigned int count)
+static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+                                   int size, unsigned short port, void *val,
+                                   unsigned int count)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       int ret;
 
-       trace_kvm_pio(1, port, size, count);
-
-       vcpu->arch.pio.port = port;
-       vcpu->arch.pio.in = 0;
-       vcpu->arch.pio.count = count;
-       vcpu->arch.pio.size = size;
-
-       memcpy(vcpu->arch.pio_data, val, size * count);
+       if (vcpu->arch.pio.count)
+               goto data_avail;
 
-       if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+       ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
+       if (ret) {
+data_avail:
+               memcpy(val, vcpu->arch.pio_data, size * count);
                vcpu->arch.pio.count = 0;
                return 1;
        }
 
-       vcpu->run->exit_reason = KVM_EXIT_IO;
-       vcpu->run->io.direction = KVM_EXIT_IO_OUT;
-       vcpu->run->io.size = size;
-       vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
-       vcpu->run->io.count = count;
-       vcpu->run->io.port = port;
-
        return 0;
 }
 
+static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
+                                    int size, unsigned short port,
+                                    const void *val, unsigned int count)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       memcpy(vcpu->arch.pio_data, val, size * count);
+       return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
+}
+
 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
        return kvm_x86_ops->get_segment_base(vcpu, seg);
@@ -4627,6 +4146,12 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
        return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
 }
 
+static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
+                            u32 pmc, u64 *pdata)
+{
+       return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
+}
+
 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
 {
        emul_to_vcpu(ctxt)->arch.halt_request = 1;
@@ -4679,6 +4204,7 @@ static struct x86_emulate_ops emulate_ops = {
        .set_dr              = emulator_set_dr,
        .set_msr             = emulator_set_msr,
        .get_msr             = emulator_get_msr,
+       .read_pmc            = emulator_read_pmc,
        .halt                = emulator_halt,
        .wbinvd              = emulator_wbinvd,
        .fix_hypercall       = emulator_fix_hypercall,
@@ -4836,6 +4362,50 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
        return false;
 }
 
+static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
+                             unsigned long cr2,  int emulation_type)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
+
+       last_retry_eip = vcpu->arch.last_retry_eip;
+       last_retry_addr = vcpu->arch.last_retry_addr;
+
+       /*
+        * If the emulation is caused by #PF and it is non-page_table
+        * writing instruction, it means the VM-EXIT is caused by shadow
+        * page protected, we can zap the shadow page and retry this
+        * instruction directly.
+        *
+        * Note: if the guest uses a non-page-table modifying instruction
+        * on the PDE that points to the instruction, then we will unmap
+        * the instruction and go to an infinite loop. So, we cache the
+        * last retried eip and the last fault address, if we meet the eip
+        * and the address again, we can break out of the potential infinite
+        * loop.
+        */
+       vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
+
+       if (!(emulation_type & EMULTYPE_RETRY))
+               return false;
+
+       if (x86_page_table_writing_insn(ctxt))
+               return false;
+
+       if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
+               return false;
+
+       vcpu->arch.last_retry_eip = ctxt->eip;
+       vcpu->arch.last_retry_addr = cr2;
+
+       if (!vcpu->arch.mmu.direct_map)
+               gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+
+       kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+
+       return true;
+}
+
 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                            unsigned long cr2,
                            int emulation_type,
@@ -4877,6 +4447,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                return EMULATE_DONE;
        }
 
+       if (retry_instruction(ctxt, cr2, emulation_type))
+               return EMULATE_DONE;
+
        /* this is needed for vmware backdoor interface to work since it
           changes registers values  during IO operation */
        if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
@@ -5095,17 +4668,17 @@ static void kvm_timer_init(void)
 
 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
 
-static int kvm_is_in_guest(void)
+int kvm_is_in_guest(void)
 {
-       return percpu_read(current_vcpu) != NULL;
+       return __this_cpu_read(current_vcpu) != NULL;
 }
 
 static int kvm_is_user_mode(void)
 {
        int user_mode = 3;
 
-       if (percpu_read(current_vcpu))
-               user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
+       if (__this_cpu_read(current_vcpu))
+               user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
 
        return user_mode != 0;
 }
@@ -5114,8 +4687,8 @@ static unsigned long kvm_get_guest_ip(void)
 {
        unsigned long ip = 0;
 
-       if (percpu_read(current_vcpu))
-               ip = kvm_rip_read(percpu_read(current_vcpu));
+       if (__this_cpu_read(current_vcpu))
+               ip = kvm_rip_read(__this_cpu_read(current_vcpu));
 
        return ip;
 }
@@ -5128,13 +4701,13 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
 
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
 {
-       percpu_write(current_vcpu, vcpu);
+       __this_cpu_write(current_vcpu, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
 
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
 {
-       percpu_write(current_vcpu, NULL);
+       __this_cpu_write(current_vcpu, NULL);
 }
 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
 
@@ -5233,15 +4806,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
-static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
-                          unsigned long a1)
-{
-       if (is_long_mode(vcpu))
-               return a0;
-       else
-               return a0 | ((gpa_t)a1 << 32);
-}
-
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 {
        u64 param, ingpa, outgpa, ret;
@@ -5337,9 +4901,6 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        case KVM_HC_VAPIC_POLL_IRQ:
                ret = 0;
                break;
-       case KVM_HC_MMU_OP:
-               r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
-               break;
        default:
                ret = -KVM_ENOSYS;
                break;
@@ -5369,125 +4930,6 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
        return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
 }
 
-static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
-{
-       struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
-       int j, nent = vcpu->arch.cpuid_nent;
-
-       e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
-       /* when no next entry is found, the current entry[i] is reselected */
-       for (j = i + 1; ; j = (j + 1) % nent) {
-               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
-               if (ej->function == e->function) {
-                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-                       return j;
-               }
-       }
-       return 0; /* silence gcc, even though control never reaches here */
-}
-
-/* find an entry with matching function, matching index (if needed), and that
- * should be read next (if it's stateful) */
-static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
-       u32 function, u32 index)
-{
-       if (e->function != function)
-               return 0;
-       if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
-               return 0;
-       if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
-           !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
-               return 0;
-       return 1;
-}
-
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
-                                             u32 function, u32 index)
-{
-       int i;
-       struct kvm_cpuid_entry2 *best = NULL;
-
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               struct kvm_cpuid_entry2 *e;
-
-               e = &vcpu->arch.cpuid_entries[i];
-               if (is_matching_cpuid_entry(e, function, index)) {
-                       if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
-                               move_to_next_stateful_cpuid_entry(vcpu, i);
-                       best = e;
-                       break;
-               }
-       }
-       return best;
-}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
-       if (!best || best->eax < 0x80000008)
-               goto not_found;
-       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-       if (best)
-               return best->eax & 0xff;
-not_found:
-       return 36;
-}
-
-/*
- * If no match is found, check whether we exceed the vCPU's limit
- * and return the content of the highest valid _standard_ leaf instead.
- * This is to satisfy the CPUID specification.
- */
-static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
-                                                  u32 function, u32 index)
-{
-       struct kvm_cpuid_entry2 *maxlevel;
-
-       maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
-       if (!maxlevel || maxlevel->eax >= function)
-               return NULL;
-       if (function & 0x80000000) {
-               maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
-               if (!maxlevel)
-                       return NULL;
-       }
-       return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
-}
-
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
-{
-       u32 function, index;
-       struct kvm_cpuid_entry2 *best;
-
-       function = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       index = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
-       kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
-       kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
-       best = kvm_find_cpuid_entry(vcpu, function, index);
-
-       if (!best)
-               best = check_cpuid_limit(vcpu, function, index);
-
-       if (best) {
-               kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
-               kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
-               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
-               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
-       }
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
-       trace_kvm_cpuid(function,
-                       kvm_register_read(vcpu, VCPU_REGS_RAX),
-                       kvm_register_read(vcpu, VCPU_REGS_RBX),
-                       kvm_register_read(vcpu, VCPU_REGS_RCX),
-                       kvm_register_read(vcpu, VCPU_REGS_RDX));
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
-
 /*
  * Check if userspace requested an interrupt window, and that the
  * interrupt window is open.
@@ -5648,6 +5090,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        int r;
        bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
                vcpu->run->request_interrupt_window;
+       bool req_immediate_exit = 0;
 
        if (vcpu->requests) {
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5687,7 +5130,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        record_steal_time(vcpu);
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
-
+               req_immediate_exit =
+                       kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
+               if (kvm_check_request(KVM_REQ_PMU, vcpu))
+                       kvm_handle_pmu_event(vcpu);
+               if (kvm_check_request(KVM_REQ_PMI, vcpu))
+                       kvm_deliver_pmi(vcpu);
        }
 
        r = kvm_mmu_reload(vcpu);
@@ -5738,6 +5186,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
+       if (req_immediate_exit)
+               smp_send_reschedule(vcpu->cpu);
+
        kvm_guest_enter();
 
        if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -5943,10 +5394,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        if (r <= 0)
                goto out;
 
-       if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
-               kvm_register_write(vcpu, VCPU_REGS_RAX,
-                                    kvm_run->hypercall.ret);
-
        r = __vcpu_run(vcpu);
 
 out:
@@ -6148,7 +5595,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (sregs->cr4 & X86_CR4_OSXSAVE)
-               update_cpuid(vcpu);
+               kvm_update_cpuid(vcpu);
 
        idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
@@ -6425,6 +5872,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        kvm_async_pf_hash_reset(vcpu);
        vcpu->arch.apf.halted = false;
 
+       kvm_pmu_reset(vcpu);
+
        return kvm_x86_ops->vcpu_reset(vcpu);
 }
 
@@ -6473,10 +5922,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm = vcpu->kvm;
 
        vcpu->arch.emulate_ctxt.ops = &emulate_ops;
-       vcpu->arch.walk_mmu = &vcpu->arch.mmu;
-       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
-       vcpu->arch.mmu.translate_gpa = translate_gpa;
-       vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
        if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        else
@@ -6513,6 +5958,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                goto fail_free_mce_banks;
 
        kvm_async_pf_hash_reset(vcpu);
+       kvm_pmu_init(vcpu);
 
        return 0;
 fail_free_mce_banks:
@@ -6531,6 +5977,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
        int idx;
 
+       kvm_pmu_destroy(vcpu);
        kfree(vcpu->arch.mce_banks);
        kvm_free_lapic(vcpu);
        idx = srcu_read_lock(&vcpu->kvm->srcu);
index d36fe237c6659ff3445772cce4a505499173b4e8..cb80c293cdd8ea1b26dbbaa050c1c5c43b1fd1ab 100644 (file)
@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
        return (nr == BP_VECTOR) || (nr == OF_VECTOR);
 }
 
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
-                                             u32 function, u32 index);
-
 static inline bool is_protmode(struct kvm_vcpu *vcpu)
 {
        return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
        gva_t addr, void *val, unsigned int bytes,
        struct x86_exception *exception);
 
+extern u64 host_xcr0;
+
 #endif
index d5262319997806bb6f1fdfcfb50b6cef37428b0e..900c76337e8f387b1dcf183260e43c1625536500 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/mmu_notifier.h>
 #include <linux/preempt.h>
 #include <linux/msi.h>
 #include <linux/slab.h>
@@ -50,6 +51,9 @@
 #define KVM_REQ_APF_HALT          12
 #define KVM_REQ_STEAL_UPDATE      13
 #define KVM_REQ_NMI               14
+#define KVM_REQ_IMMEDIATE_EXIT    15
+#define KVM_REQ_PMU               16
+#define KVM_REQ_PMI               17
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID    0
 
@@ -179,6 +183,7 @@ struct kvm_memory_slot {
        unsigned long *rmap;
        unsigned long *dirty_bitmap;
        unsigned long *dirty_bitmap_head;
+       unsigned long nr_dirty_pages;
        struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
        unsigned long userspace_addr;
        int user_alloc;
@@ -224,11 +229,20 @@ struct kvm_irq_routing_table {};
 
 #endif
 
+#ifndef KVM_MEM_SLOTS_NUM
+#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#endif
+
+/*
+ * Note:
+ * memslots are not sorted by id anymore, please use id_to_memslot()
+ * to get the memslot by its id.
+ */
 struct kvm_memslots {
-       int nmemslots;
        u64 generation;
-       struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
-                                       KVM_PRIVATE_MEM_SLOTS];
+       struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
+       /* The mapping table from slot id to the index in memslots[]. */
+       int id_to_index[KVM_MEM_SLOTS_NUM];
 };
 
 struct kvm {
@@ -239,7 +253,6 @@ struct kvm {
        struct srcu_struct srcu;
 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
        u32 bsp_vcpu_id;
-       struct kvm_vcpu *bsp_vcpu;
 #endif
        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
        atomic_t online_vcpus;
@@ -302,6 +315,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
             idx++)
 
+#define kvm_for_each_memslot(memslot, slots)   \
+       for (memslot = &slots->memslots[0];     \
+             memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
+               memslot++)
+
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 
@@ -314,6 +332,7 @@ void kvm_exit(void);
 
 void kvm_get_kvm(struct kvm *kvm);
 void kvm_put_kvm(struct kvm *kvm);
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
 
 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 {
@@ -322,6 +341,18 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
                        || lockdep_is_held(&kvm->slots_lock));
 }
 
+static inline struct kvm_memory_slot *
+id_to_memslot(struct kvm_memslots *slots, int id)
+{
+       int index = slots->id_to_index[id];
+       struct kvm_memory_slot *slot;
+
+       slot = &slots->memslots[index];
+
+       WARN_ON(slot->id != id);
+       return slot;
+}
+
 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
index 47a070b0520e1ee766b5b4b26816d12d9f45cad8..ff476ddaf3103cc02e5688bc00528e7d042af891 100644 (file)
@@ -35,4 +35,3 @@ static inline int kvm_para_has_feature(unsigned int feature)
 }
 #endif /* __KERNEL__ */
 #endif /* __LINUX_KVM_PARA_H */
-
index 30c3c770813275ed7c9f9a47ca47275a00fc5fbf..01d3b70fc98a346c3bf619016f12f1a7ed5d897a 100644 (file)
@@ -71,6 +71,7 @@ void jump_label_inc(struct jump_label_key *key)
        atomic_inc(&key->enabled);
        jump_label_unlock();
 }
+EXPORT_SYMBOL_GPL(jump_label_inc);
 
 static void __jump_label_dec(struct jump_label_key *key,
                unsigned long rate_limit, struct delayed_work *work)
@@ -86,6 +87,7 @@ static void __jump_label_dec(struct jump_label_key *key,
 
        jump_label_unlock();
 }
+EXPORT_SYMBOL_GPL(jump_label_dec);
 
 static void jump_label_update_timeout(struct work_struct *work)
 {
index a6ec206f36ba2f16f43c90680535ff6bcc45d56f..88b2fe3ddf42a3c60bba0a3fbc1d7bd3172a8730 100644 (file)
@@ -28,9 +28,15 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
         * (addr,len) is fully included in
         * (zone->addr, zone->size)
         */
-
-       return (dev->zone.addr <= addr &&
-               addr + len <= dev->zone.addr + dev->zone.size);
+       if (len < 0)
+               return 0;
+       if (addr + len < addr)
+               return 0;
+       if (addr < dev->zone.addr)
+               return 0;
+       if (addr + len > dev->zone.addr + dev->zone.size)
+               return 0;
+       return 1;
 }
 
 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
index 3eed61eb48675a63dd1f31b0095217ab6bc5f646..dcaf272c26c0e232d01b265e652488642e7cc496 100644 (file)
@@ -185,7 +185,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
                irqe.dest_mode = 0; /* Physical mode. */
                /* need to read apic_id from apic regiest since
                 * it can be rewritten */
-               irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
+               irqe.dest_id = ioapic->kvm->bsp_vcpu_id;
        }
 #endif
        return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
@@ -332,9 +332,18 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
                     (void*)addr, len, val);
        ASSERT(!(addr & 0xf));  /* check alignment */
 
-       if (len == 4 || len == 8)
+       switch (len) {
+       case 8:
+       case 4:
                data = *(u32 *) val;
-       else {
+               break;
+       case 2:
+               data = *(u16 *) val;
+               break;
+       case 1:
+               data = *(u8  *) val;
+               break;
+       default:
                printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
                return 0;
        }
@@ -343,7 +352,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
        spin_lock(&ioapic->lock);
        switch (addr) {
        case IOAPIC_REG_SELECT:
-               ioapic->ioregsel = data;
+               ioapic->ioregsel = data & 0xFF; /* 8-bit register */
                break;
 
        case IOAPIC_REG_WINDOW:
index a195c07fa8290b44cb435f5ac06d841507e33503..4e5f7b7f1d2be1828005e5081ab3af86d7f447f2 100644 (file)
@@ -134,14 +134,15 @@ unmap_pages:
 
 static int kvm_iommu_map_memslots(struct kvm *kvm)
 {
-       int i, idx, r = 0;
+       int idx, r = 0;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        idx = srcu_read_lock(&kvm->srcu);
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++) {
-               r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
+       kvm_for_each_memslot(memslot, slots) {
+               r = kvm_iommu_map_pages(kvm, memslot);
                if (r)
                        break;
        }
@@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
 
 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
 {
-       int i, idx;
+       int idx;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        idx = srcu_read_lock(&kvm->srcu);
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++) {
-               kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
-                                   slots->memslots[i].npages);
-       }
+       kvm_for_each_memslot(memslot, slots)
+               kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
+
        srcu_read_unlock(&kvm->srcu, idx);
 
        return 0;
index d9cfb782cb81a87fc5ef4f1734df1a308a55539b..7287bf5d1c9edc1fa84681aea4f989c9c750fa9c 100644 (file)
@@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
 
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 
+static void kvm_init_memslots_id(struct kvm *kvm)
+{
+       int i;
+       struct kvm_memslots *slots = kvm->memslots;
+
+       for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
+               slots->id_to_index[i] = slots->memslots[i].id = i;
+}
+
 static struct kvm *kvm_create_vm(void)
 {
        int r, i;
@@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void)
        kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!kvm->memslots)
                goto out_err_nosrcu;
+       kvm_init_memslots_id(kvm);
        if (init_srcu_struct(&kvm->srcu))
                goto out_err_nosrcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
@@ -547,11 +557,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
 
 void kvm_free_physmem(struct kvm *kvm)
 {
-       int i;
        struct kvm_memslots *slots = kvm->memslots;
+       struct kvm_memory_slot *memslot;
 
-       for (i = 0; i < slots->nmemslots; ++i)
-               kvm_free_physmem_slot(&slots->memslots[i], NULL);
+       kvm_for_each_memslot(memslot, slots)
+               kvm_free_physmem_slot(memslot, NULL);
 
        kfree(kvm->memslots);
 }
@@ -625,10 +635,69 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
                return -ENOMEM;
 
        memslot->dirty_bitmap_head = memslot->dirty_bitmap;
+       memslot->nr_dirty_pages = 0;
        return 0;
 }
 #endif /* !CONFIG_S390 */
 
+static struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+{
+       struct kvm_memory_slot *memslot;
+
+       kvm_for_each_memslot(memslot, slots)
+               if (gfn >= memslot->base_gfn &&
+                     gfn < memslot->base_gfn + memslot->npages)
+                       return memslot;
+
+       return NULL;
+}
+
+static int cmp_memslot(const void *slot1, const void *slot2)
+{
+       struct kvm_memory_slot *s1, *s2;
+
+       s1 = (struct kvm_memory_slot *)slot1;
+       s2 = (struct kvm_memory_slot *)slot2;
+
+       if (s1->npages < s2->npages)
+               return 1;
+       if (s1->npages > s2->npages)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Sort the memslots base on its size, so the larger slots
+ * will get better fit.
+ */
+static void sort_memslots(struct kvm_memslots *slots)
+{
+       int i;
+
+       sort(slots->memslots, KVM_MEM_SLOTS_NUM,
+             sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
+
+       for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
+               slots->id_to_index[slots->memslots[i].id] = i;
+}
+
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
+{
+       if (new) {
+               int id = new->id;
+               struct kvm_memory_slot *old = id_to_memslot(slots, id);
+               unsigned long npages = old->npages;
+
+               *old = *new;
+               if (new->npages != npages)
+                       sort_memslots(slots);
+       }
+
+       slots->generation++;
+}
+
 /*
  * Allocate some memory and give it an address in the guest physical address
  * space.
@@ -662,12 +731,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        (void __user *)(unsigned long)mem->userspace_addr,
                        mem->memory_size)))
                goto out;
-       if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+       if (mem->slot >= KVM_MEM_SLOTS_NUM)
                goto out;
        if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
                goto out;
 
-       memslot = &kvm->memslots->memslots[mem->slot];
+       memslot = id_to_memslot(kvm->memslots, mem->slot);
        base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
        npages = mem->memory_size >> PAGE_SHIFT;
 
@@ -774,15 +843,17 @@ skip_lpage:
 #endif /* not defined CONFIG_S390 */
 
        if (!npages) {
+               struct kvm_memory_slot *slot;
+
                r = -ENOMEM;
-               slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+               slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+                               GFP_KERNEL);
                if (!slots)
                        goto out_free;
-               memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
-               if (mem->slot >= slots->nmemslots)
-                       slots->nmemslots = mem->slot + 1;
-               slots->generation++;
-               slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
+               slot = id_to_memslot(slots, mem->slot);
+               slot->flags |= KVM_MEMSLOT_INVALID;
+
+               update_memslots(slots, NULL);
 
                old_memslots = kvm->memslots;
                rcu_assign_pointer(kvm->memslots, slots);
@@ -810,13 +881,10 @@ skip_lpage:
        }
 
        r = -ENOMEM;
-       slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+       slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+                       GFP_KERNEL);
        if (!slots)
                goto out_free;
-       memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
-       if (mem->slot >= slots->nmemslots)
-               slots->nmemslots = mem->slot + 1;
-       slots->generation++;
 
        /* actual memory is freed via old in kvm_free_physmem_slot below */
        if (!npages) {
@@ -826,7 +894,7 @@ skip_lpage:
                        new.lpage_info[i] = NULL;
        }
 
-       slots->memslots[mem->slot] = new;
+       update_memslots(slots, &new);
        old_memslots = kvm->memslots;
        rcu_assign_pointer(kvm->memslots, slots);
        synchronize_srcu_expedited(&kvm->srcu);
@@ -888,7 +956,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
        if (log->slot >= KVM_MEMORY_SLOTS)
                goto out;
 
-       memslot = &kvm->memslots->memslots[log->slot];
+       memslot = id_to_memslot(kvm->memslots, log->slot);
        r = -ENOENT;
        if (!memslot->dirty_bitmap)
                goto out;
@@ -966,16 +1034,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
                                                gfn_t gfn)
 {
-       int i;
-
-       for (i = 0; i < slots->nmemslots; ++i) {
-               struct kvm_memory_slot *memslot = &slots->memslots[i];
-
-               if (gfn >= memslot->base_gfn
-                   && gfn < memslot->base_gfn + memslot->npages)
-                       return memslot;
-       }
-       return NULL;
+       return search_memslots(slots, gfn);
 }
 
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
@@ -986,20 +1045,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot);
 
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 {
-       int i;
-       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
 
-       for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
-               struct kvm_memory_slot *memslot = &slots->memslots[i];
-
-               if (memslot->flags & KVM_MEMSLOT_INVALID)
-                       continue;
+       if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
+             memslot->flags & KVM_MEMSLOT_INVALID)
+               return 0;
 
-               if (gfn >= memslot->base_gfn
-                   && gfn < memslot->base_gfn + memslot->npages)
-                       return 1;
-       }
-       return 0;
+       return 1;
 }
 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
 
@@ -1491,7 +1543,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
        if (memslot && memslot->dirty_bitmap) {
                unsigned long rel_gfn = gfn - memslot->base_gfn;
 
-               __set_bit_le(rel_gfn, memslot->dirty_bitmap);
+               if (!__test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
+                       memslot->nr_dirty_pages++;
        }
 }
 
@@ -1690,10 +1743,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        smp_wmb();
        atomic_inc(&kvm->online_vcpus);
 
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-       if (kvm->bsp_vcpu_id == id)
-               kvm->bsp_vcpu = vcpu;
-#endif
        mutex_unlock(&kvm->lock);
        return r;
 
@@ -1768,12 +1817,11 @@ out_free1:
                struct kvm_regs *kvm_regs;
 
                r = -ENOMEM;
-               kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
-               if (!kvm_regs)
+               kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
+               if (IS_ERR(kvm_regs)) {
+                       r = PTR_ERR(kvm_regs);
                        goto out;
-               r = -EFAULT;
-               if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
-                       goto out_free2;
+               }
                r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
                if (r)
                        goto out_free2;
@@ -1797,13 +1845,11 @@ out_free2:
                break;
        }
        case KVM_SET_SREGS: {
-               kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!kvm_sregs)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
+               kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
+               if (IS_ERR(kvm_sregs)) {
+                       r = PTR_ERR(kvm_sregs);
                        goto out;
+               }
                r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
                if (r)
                        goto out;
@@ -1899,13 +1945,11 @@ out_free2:
                break;
        }
        case KVM_SET_FPU: {
-               fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!fpu)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
+               fpu = memdup_user(argp, sizeof(*fpu));
+               if (IS_ERR(fpu)) {
+                       r = PTR_ERR(fpu);
                        goto out;
+               }
                r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
                if (r)
                        goto out;
@@ -2520,10 +2564,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
        if (bus->dev_count > NR_IOBUS_DEVS-1)
                return -ENOSPC;
 
-       new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+       new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
        if (!new_bus)
                return -ENOMEM;
-       memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
        kvm_io_bus_insert_dev(new_bus, dev, addr, len);
        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
        synchronize_srcu_expedited(&kvm->srcu);
@@ -2539,13 +2582,12 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
        int i, r;
        struct kvm_io_bus *new_bus, *bus;
 
-       new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+       bus = kvm->buses[bus_idx];
+
+       new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
        if (!new_bus)
                return -ENOMEM;
 
-       bus = kvm->buses[bus_idx];
-       memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
-
        r = -ENOENT;
        for (i = 0; i < new_bus->dev_count; i++)
                if (new_bus->range[i].dev == dev) {
@@ -2612,15 +2654,29 @@ static const struct file_operations *stat_fops[] = {
        [KVM_STAT_VM]   = &vm_stat_fops,
 };
 
-static void kvm_init_debug(void)
+static int kvm_init_debug(void)
 {
+       int r = -EFAULT;
        struct kvm_stats_debugfs_item *p;
 
        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
-       for (p = debugfs_entries; p->name; ++p)
+       if (kvm_debugfs_dir == NULL)
+               goto out;
+
+       for (p = debugfs_entries; p->name; ++p) {
                p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
                                                (void *)(long)p->offset,
                                                stat_fops[p->kind]);
+               if (p->dentry == NULL)
+                       goto out_dir;
+       }
+
+       return 0;
+
+out_dir:
+       debugfs_remove_recursive(kvm_debugfs_dir);
+out:
+       return r;
 }
 
 static void kvm_exit_debug(void)
@@ -2764,10 +2820,16 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;
 
-       kvm_init_debug();
+       r = kvm_init_debug();
+       if (r) {
+               printk(KERN_ERR "kvm: create debugfs files failed\n");
+               goto out_undebugfs;
+       }
 
        return 0;
 
+out_undebugfs:
+       unregister_syscore_ops(&kvm_syscore_ops);
 out_unreg:
        kvm_async_pf_deinit();
 out_free: