Merge tag 'please-pull-vm_unwrapped' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 25 Feb 2013 23:47:03 +0000 (15:47 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 25 Feb 2013 23:47:03 +0000 (15:47 -0800)
Pull ia64 update from Tony Luck:
 "ia64 vm patch series that was cooking in -mm tree"

* tag 'please-pull-vm_unwrapped' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  mm: use vm_unmapped_area() in hugetlbfs on ia64 architecture
  mm: use vm_unmapped_area() on ia64 architecture

arch/ia64/kernel/sys_ia64.c
arch/ia64/mm/hugetlbpage.c

index d9439ef..41e33f8 100644 (file)
@@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
                        unsigned long pgoff, unsigned long flags)
 {
        long map_shared = (flags & MAP_SHARED);
-       unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+       unsigned long align_mask = 0;
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
 
        if (len > RGN_MAP_LIMIT)
                return -ENOMEM;
@@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
                addr = 0;
 #endif
        if (!addr)
-               addr = mm->free_area_cache;
+               addr = TASK_UNMAPPED_BASE;
 
        if (map_shared && (TASK_SIZE > 0xfffffffful))
                /*
@@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
                 * tasks, we prefer to avoid exhausting the address space too quickly by
                 * limiting alignment to a single page.
                 */
-               align_mask = SHMLBA - 1;
-
-  full_search:
-       start_addr = addr = (addr + align_mask) & ~align_mask;
-
-       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
-                       if (start_addr != TASK_UNMAPPED_BASE) {
-                               /* Start a new search --- just in case we missed some holes.  */
-                               addr = TASK_UNMAPPED_BASE;
-                               goto full_search;
-                       }
-                       return -ENOMEM;
-               }
-               if (!vma || addr + len <= vma->vm_start) {
-                       /* Remember the address where we stopped this search:  */
-                       mm->free_area_cache = addr + len;
-                       return addr;
-               }
-               addr = (vma->vm_end + align_mask) & ~align_mask;
-       }
+               align_mask = PAGE_MASK & (SHMLBA - 1);
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = addr;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = align_mask;
+       info.align_offset = 0;
+       return vm_unmapped_area(&info);
 }
 
 asmlinkage long
index 5ca674b..76069c1 100644 (file)
@@ -148,7 +148,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
 {
-       struct vm_area_struct *vmm;
+       struct vm_unmapped_area_info info;
 
        if (len > RGN_MAP_LIMIT)
                return -ENOMEM;
@@ -165,16 +165,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
        /* This code assumes that RGN_HPAGE != 0. */
        if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
                addr = HPAGE_REGION_BASE;
-       else
-               addr = ALIGN(addr, HPAGE_SIZE);
-       for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
-               /* At this point:  (!vmm || addr < vmm->vm_end). */
-               if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
-                       return -ENOMEM;
-               if (!vmm || (addr + len) <= vmm->vm_start)
-                       return addr;
-               addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
-       }
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = addr;
+       info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+       info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+       info.align_offset = 0;
+       return vm_unmapped_area(&info);
 }
 
 static int __init hugetlb_setup_sz(char *str)