Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Feb 2013 03:45:29 +0000 (19:45 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Feb 2013 03:45:29 +0000 (19:45 -0800)
Pull x86 fixes from Ingo Molnar.

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/pageattr: Prevent PSE and GLOABL leftovers to confuse pmd/pte_present and pmd_huge
  Revert "x86, mm: Make spurious_fault check explicitly check explicitly check the PRESENT bit"
  x86/mm/numa: Don't check if node is NUMA_NO_NODE
  x86, efi: Make "noefi" really disable EFI runtime serivces
  x86/apic: Fix parsing of the 'lapic' cmdline option

arch/x86/kernel/apic/apic.c
arch/x86/mm/fault.c
arch/x86/mm/numa.c
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi.c

index a5b4dce..904611b 100644 (file)
@@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
 {
        if (config_enabled(CONFIG_X86_32) && !arg)
                force_enable_local_apic = 1;
-       else if (!strncmp(arg, "notscdeadline", 13))
+       else if (arg && !strncmp(arg, "notscdeadline", 13))
                setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
        return 0;
 }
index fb674fd..2b97525 100644 (file)
@@ -939,14 +939,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
        if (pmd_large(*pmd))
                return spurious_fault_check(error_code, (pte_t *) pmd);
 
-       /*
-        * Note: don't use pte_present() here, since it returns true
-        * if the _PAGE_PROTNONE bit is set.  However, this aliases the
-        * _PAGE_GLOBAL bit, which for kernel pages give false positives
-        * when CONFIG_DEBUG_PAGEALLOC is used.
-        */
        pte = pte_offset_kernel(pmd, address);
-       if (!(pte_flags(*pte) & _PAGE_PRESENT))
+       if (!pte_present(*pte))
                return 0;
 
        ret = spurious_fault_check(error_code, pte);
index dfd3025..ff3633c 100644 (file)
@@ -97,8 +97,7 @@ void numa_set_node(int cpu, int node)
 #endif
        per_cpu(x86_cpu_to_node_map, cpu) = node;
 
-       if (node != NUMA_NO_NODE)
-               set_cpu_numa_node(cpu, node);
+       set_cpu_numa_node(cpu, node);
 }
 
 void numa_clear_node(int cpu)
index ca1f1c2..091934e 100644 (file)
@@ -472,6 +472,19 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
        pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
        pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
 
+       /*
+        * Set the PSE and GLOBAL flags only if the PRESENT flag is
+        * set otherwise pmd_present/pmd_huge will return true even on
+        * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
+        * for the ancient hardware that doesn't support it.
+        */
+       if (pgprot_val(new_prot) & _PAGE_PRESENT)
+               pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+       else
+               pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+
+       new_prot = canon_pgprot(new_prot);
+
        /*
         * old_pte points to the large page base address. So we need
         * to add the offset of the virtual address:
@@ -517,7 +530,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
                 * The address is aligned and the number of pages
                 * covers the full page.
                 */
-               new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
+               new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
                __set_pmd_pte(kpte, address, new_pte);
                cpa->flags |= CPA_FLUSHTLB;
                do_split = 0;
@@ -561,16 +574,35 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
 #ifdef CONFIG_X86_64
        if (level == PG_LEVEL_1G) {
                pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
-               pgprot_val(ref_prot) |= _PAGE_PSE;
+               /*
+                * Set the PSE flags only if the PRESENT flag is set
+                * otherwise pmd_present/pmd_huge will return true
+                * even on a non present pmd.
+                */
+               if (pgprot_val(ref_prot) & _PAGE_PRESENT)
+                       pgprot_val(ref_prot) |= _PAGE_PSE;
+               else
+                       pgprot_val(ref_prot) &= ~_PAGE_PSE;
        }
 #endif
 
+       /*
+        * Set the GLOBAL flags only if the PRESENT flag is set
+        * otherwise pmd/pte_present will return true even on a non
+        * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
+        * for the ancient hardware that doesn't support it.
+        */
+       if (pgprot_val(ref_prot) & _PAGE_PRESENT)
+               pgprot_val(ref_prot) |= _PAGE_GLOBAL;
+       else
+               pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
+
        /*
         * Get the target pfn from the original entry:
         */
        pfn = pte_pfn(*kpte);
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
-               set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
+               set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
 
        if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
                                PFN_DOWN(__pa(address)) + 1))
@@ -684,6 +716,18 @@ repeat:
 
                new_prot = static_protections(new_prot, address, pfn);
 
+               /*
+                * Set the GLOBAL flags only if the PRESENT flag is
+                * set otherwise pte_present will return true even on
+                * a non present pte. The canon_pgprot will clear
+                * _PAGE_GLOBAL for the ancient hardware that doesn't
+                * support it.
+                */
+               if (pgprot_val(new_prot) & _PAGE_PRESENT)
+                       pgprot_val(new_prot) |= _PAGE_GLOBAL;
+               else
+                       pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
+
                /*
                 * We need to keep the pfn from the existing PTE,
                 * after all we're only going to change it's attributes
index 70b2a3a..2f81db4 100644 (file)
@@ -85,9 +85,10 @@ int efi_enabled(int facility)
 }
 EXPORT_SYMBOL(efi_enabled);
 
+static bool disable_runtime = false;
 static int __init setup_noefi(char *arg)
 {
-       clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+       disable_runtime = true;
        return 0;
 }
 early_param("noefi", setup_noefi);
@@ -734,7 +735,7 @@ void __init efi_init(void)
        if (!efi_is_native())
                pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
        else {
-               if (efi_runtime_init())
+               if (disable_runtime || efi_runtime_init())
                        return;
                set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
        }