Merge remote-tracking branch 'origin/x86/boot' into x86/mm2
[~shefty/rdma-dev.git] / arch / x86 / kernel / cpu / amd.c
index 913f94f..eafb084 100644 (file)
@@ -303,7 +303,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
        int cpu = smp_processor_id();
 
        /* get information required for multi-node processors */
-       if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
+       if (cpu_has_topoext) {
                u32 eax, ebx, ecx, edx;
 
                cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
@@ -630,6 +630,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                }
        }
 
+       /*
+        * The way access filter has a performance penalty on some workloads.
+        * Disable it on the affected CPUs.
+        */
+       if ((c->x86 == 0x15) &&
+           (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
+               u64 val;
+
+               if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
+                       val |= 0x1E;
+                       wrmsrl_safe(0xc0011021, val);
+               }
+       }
+
        cpu_detect_cache_sizes(c);
 
        /* Multi core CPU? */
@@ -642,12 +656,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
        detect_ht(c);
 #endif
 
-       if (c->extended_cpuid_level >= 0x80000006) {
-               if (cpuid_edx(0x80000006) & 0xf000)
-                       num_cache_leaves = 4;
-               else
-                       num_cache_leaves = 3;
-       }
+       init_amd_cacheinfo(c);
 
        if (c->x86 >= 0xf)
                set_cpu_cap(c, X86_FEATURE_K8);
@@ -736,9 +745,6 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
 
 static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
 {
-       if (!cpu_has_invlpg)
-               return;
-
        tlb_flushall_shift = 5;
 
        if (c->x86 <= 0x11)