2 * linux/mm/page_alloc.c
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
17 #include <linux/stddef.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/module.h>
29 #include <linux/suspend.h>
30 #include <linux/pagevec.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/ratelimit.h>
34 #include <linux/oom.h>
35 #include <linux/notifier.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/stop_machine.h>
46 #include <linux/sort.h>
47 #include <linux/pfn.h>
48 #include <linux/backing-dev.h>
49 #include <linux/fault-inject.h>
50 #include <linux/page-isolation.h>
51 #include <linux/page_cgroup.h>
52 #include <linux/debugobjects.h>
53 #include <linux/kmemleak.h>
54 #include <linux/compaction.h>
55 #include <trace/events/kmem.h>
56 #include <linux/ftrace_event.h>
57 #include <linux/memcontrol.h>
58 #include <linux/prefetch.h>
59 #include <linux/migrate.h>
60 #include <linux/page-debug-flags.h>
62 #include <asm/tlbflush.h>
63 #include <asm/div64.h>
66 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
67 DEFINE_PER_CPU(int, numa_node);
68 EXPORT_PER_CPU_SYMBOL(numa_node);
71 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
73 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
74 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
75 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
76 * defined in <linux/topology.h>.
78 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
79 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
83 * Array of node states.
85 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
86 [N_POSSIBLE] = NODE_MASK_ALL,
87 [N_ONLINE] = { { [0] = 1UL } },
89 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
91 [N_HIGH_MEMORY] = { { [0] = 1UL } },
93 #ifdef CONFIG_MOVABLE_NODE
94 [N_MEMORY] = { { [0] = 1UL } },
96 [N_CPU] = { { [0] = 1UL } },
99 EXPORT_SYMBOL(node_states);
101 unsigned long totalram_pages __read_mostly;
102 unsigned long totalreserve_pages __read_mostly;
104 * When calculating the number of globally allowed dirty pages, there
105 * is a certain number of per-zone reserves that should not be
106 * considered dirtyable memory. This is the sum of those reserves
107 * over all existing zones that contribute dirtyable memory.
109 unsigned long dirty_balance_reserve __read_mostly;
111 int percpu_pagelist_fraction;
112 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
114 #ifdef CONFIG_PM_SLEEP
116 * The following functions are used by the suspend/hibernate code to temporarily
117 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
118 * while devices are suspended. To avoid races with the suspend/hibernate code,
119 * they should always be called with pm_mutex held (gfp_allowed_mask also should
120 * only be modified with pm_mutex held, unless the suspend/hibernate code is
121 * guaranteed not to run in parallel with that modification).
124 static gfp_t saved_gfp_mask;
126 void pm_restore_gfp_mask(void)
128 WARN_ON(!mutex_is_locked(&pm_mutex));
129 if (saved_gfp_mask) {
130 gfp_allowed_mask = saved_gfp_mask;
135 void pm_restrict_gfp_mask(void)
137 WARN_ON(!mutex_is_locked(&pm_mutex));
138 WARN_ON(saved_gfp_mask);
139 saved_gfp_mask = gfp_allowed_mask;
140 gfp_allowed_mask &= ~GFP_IOFS;
143 bool pm_suspended_storage(void)
145 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
149 #endif /* CONFIG_PM_SLEEP */
151 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
152 int pageblock_order __read_mostly;
155 static void __free_pages_ok(struct page *page, unsigned int order);
158 * results with 256, 32 in the lowmem_reserve sysctl:
159 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
160 * 1G machine -> (16M dma, 784M normal, 224M high)
161 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
162 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
163 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
165 * TBD: should special case ZONE_DMA32 machines here - in those we normally
166 * don't need any ZONE_NORMAL reservation
168 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
169 #ifdef CONFIG_ZONE_DMA
172 #ifdef CONFIG_ZONE_DMA32
175 #ifdef CONFIG_HIGHMEM
181 EXPORT_SYMBOL(totalram_pages);
183 static char * const zone_names[MAX_NR_ZONES] = {
184 #ifdef CONFIG_ZONE_DMA
187 #ifdef CONFIG_ZONE_DMA32
191 #ifdef CONFIG_HIGHMEM
197 int min_free_kbytes = 1024;
199 static unsigned long __meminitdata nr_kernel_pages;
200 static unsigned long __meminitdata nr_all_pages;
201 static unsigned long __meminitdata dma_reserve;
203 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
204 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
205 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
206 static unsigned long __initdata required_kernelcore;
207 static unsigned long __initdata required_movablecore;
208 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
210 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
212 EXPORT_SYMBOL(movable_zone);
213 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
216 int nr_node_ids __read_mostly = MAX_NUMNODES;
217 int nr_online_nodes __read_mostly = 1;
218 EXPORT_SYMBOL(nr_node_ids);
219 EXPORT_SYMBOL(nr_online_nodes);
222 int page_group_by_mobility_disabled __read_mostly;
226 * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly.
227 * Instead, use {un}set_pageblock_isolate.
229 void set_pageblock_migratetype(struct page *page, int migratetype)
232 if (unlikely(page_group_by_mobility_disabled))
233 migratetype = MIGRATE_UNMOVABLE;
235 set_pageblock_flags_group(page, (unsigned long)migratetype,
236 PB_migrate, PB_migrate_end);
239 bool oom_killer_disabled __read_mostly;
241 #ifdef CONFIG_DEBUG_VM
242 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
246 unsigned long pfn = page_to_pfn(page);
249 seq = zone_span_seqbegin(zone);
250 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
252 else if (pfn < zone->zone_start_pfn)
254 } while (zone_span_seqretry(zone, seq));
259 static int page_is_consistent(struct zone *zone, struct page *page)
261 if (!pfn_valid_within(page_to_pfn(page)))
263 if (zone != page_zone(page))
269 * Temporary debugging check for pages not lying within a given zone.
271 static int bad_range(struct zone *zone, struct page *page)
273 if (page_outside_zone_boundaries(zone, page))
275 if (!page_is_consistent(zone, page))
281 static inline int bad_range(struct zone *zone, struct page *page)
287 static void bad_page(struct page *page)
289 static unsigned long resume;
290 static unsigned long nr_shown;
291 static unsigned long nr_unshown;
293 /* Don't complain about poisoned pages */
294 if (PageHWPoison(page)) {
295 reset_page_mapcount(page); /* remove PageBuddy */
300 * Allow a burst of 60 reports, then keep quiet for that minute;
301 * or allow a steady drip of one report per second.
303 if (nr_shown == 60) {
304 if (time_before(jiffies, resume)) {
310 "BUG: Bad page state: %lu messages suppressed\n",
317 resume = jiffies + 60 * HZ;
319 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
320 current->comm, page_to_pfn(page));
326 /* Leave bad fields for debug, except PageBuddy could make trouble */
327 reset_page_mapcount(page); /* remove PageBuddy */
328 add_taint(TAINT_BAD_PAGE);
332 * Higher-order pages are called "compound pages". They are structured thusly:
334 * The first PAGE_SIZE page is called the "head page".
336 * The remaining PAGE_SIZE pages are called "tail pages".
338 * All pages have PG_compound set. All tail pages have their ->first_page
339 * pointing at the head page.
341 * The first tail page's ->lru.next holds the address of the compound page's
342 * put_page() function. Its ->lru.prev holds the order of allocation.
343 * This usage means that zero-order pages may not be compound.
346 static void free_compound_page(struct page *page)
348 __free_pages_ok(page, compound_order(page));
351 void prep_compound_page(struct page *page, unsigned long order)
354 int nr_pages = 1 << order;
356 set_compound_page_dtor(page, free_compound_page);
357 set_compound_order(page, order);
359 for (i = 1; i < nr_pages; i++) {
360 struct page *p = page + i;
362 set_page_count(p, 0);
363 p->first_page = page;
367 /* update __split_huge_page_refcount if you change this function */
368 static int destroy_compound_page(struct page *page, unsigned long order)
371 int nr_pages = 1 << order;
374 if (unlikely(compound_order(page) != order)) {
379 __ClearPageHead(page);
381 for (i = 1; i < nr_pages; i++) {
382 struct page *p = page + i;
384 if (unlikely(!PageTail(p) || (p->first_page != page))) {
394 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
399 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
400 * and __GFP_HIGHMEM from hard or soft interrupt context.
402 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
403 for (i = 0; i < (1 << order); i++)
404 clear_highpage(page + i);
407 #ifdef CONFIG_DEBUG_PAGEALLOC
408 unsigned int _debug_guardpage_minorder;
410 static int __init debug_guardpage_minorder_setup(char *buf)
414 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
415 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
418 _debug_guardpage_minorder = res;
419 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
422 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
424 static inline void set_page_guard_flag(struct page *page)
426 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
429 static inline void clear_page_guard_flag(struct page *page)
431 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
434 static inline void set_page_guard_flag(struct page *page) { }
435 static inline void clear_page_guard_flag(struct page *page) { }
438 static inline void set_page_order(struct page *page, int order)
440 set_page_private(page, order);
441 __SetPageBuddy(page);
444 static inline void rmv_page_order(struct page *page)
446 __ClearPageBuddy(page);
447 set_page_private(page, 0);
451 * Locate the struct page for both the matching buddy in our
452 * pair (buddy1) and the combined O(n+1) page they form (page).
454 * 1) Any buddy B1 will have an order O twin B2 which satisfies
455 * the following equation:
457 * For example, if the starting buddy (buddy2) is #8 its order
459 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
461 * 2) Any buddy B will have an order O+1 parent P which
462 * satisfies the following equation:
465 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
467 static inline unsigned long
468 __find_buddy_index(unsigned long page_idx, unsigned int order)
470 return page_idx ^ (1 << order);
474 * This function checks whether a page is free && is the buddy
475 * we can do coalesce a page and its buddy if
476 * (a) the buddy is not in a hole &&
477 * (b) the buddy is in the buddy system &&
478 * (c) a page and its buddy have the same order &&
479 * (d) a page and its buddy are in the same zone.
481 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
482 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
484 * For recording page's order, we use page_private(page).
486 static inline int page_is_buddy(struct page *page, struct page *buddy,
489 if (!pfn_valid_within(page_to_pfn(buddy)))
492 if (page_zone_id(page) != page_zone_id(buddy))
495 if (page_is_guard(buddy) && page_order(buddy) == order) {
496 VM_BUG_ON(page_count(buddy) != 0);
500 if (PageBuddy(buddy) && page_order(buddy) == order) {
501 VM_BUG_ON(page_count(buddy) != 0);
508 * Freeing function for a buddy system allocator.
510 * The concept of a buddy system is to maintain direct-mapped table
511 * (containing bit values) for memory blocks of various "orders".
512 * The bottom level table contains the map for the smallest allocatable
513 * units of memory (here, pages), and each level above it describes
514 * pairs of units from the levels below, hence, "buddies".
515 * At a high level, all that happens here is marking the table entry
516 * at the bottom level available, and propagating the changes upward
517 * as necessary, plus some accounting needed to play nicely with other
518 * parts of the VM system.
519 * At each level, we keep a list of pages, which are heads of continuous
520 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
521 * order is recorded in page_private(page) field.
522 * So when we are allocating or freeing one, we can derive the state of the
523 * other. That is, if we allocate a small block, and both were
524 * free, the remainder of the region must be split into blocks.
525 * If a block is freed, and its buddy is also free, then this
526 * triggers coalescing into a block of larger size.
531 static inline void __free_one_page(struct page *page,
532 struct zone *zone, unsigned int order,
535 unsigned long page_idx;
536 unsigned long combined_idx;
537 unsigned long uninitialized_var(buddy_idx);
540 if (unlikely(PageCompound(page)))
541 if (unlikely(destroy_compound_page(page, order)))
544 VM_BUG_ON(migratetype == -1);
546 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
548 VM_BUG_ON(page_idx & ((1 << order) - 1));
549 VM_BUG_ON(bad_range(zone, page));
551 while (order < MAX_ORDER-1) {
552 buddy_idx = __find_buddy_index(page_idx, order);
553 buddy = page + (buddy_idx - page_idx);
554 if (!page_is_buddy(page, buddy, order))
557 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
558 * merge with it and move up one order.
560 if (page_is_guard(buddy)) {
561 clear_page_guard_flag(buddy);
562 set_page_private(page, 0);
563 __mod_zone_freepage_state(zone, 1 << order,
566 list_del(&buddy->lru);
567 zone->free_area[order].nr_free--;
568 rmv_page_order(buddy);
570 combined_idx = buddy_idx & page_idx;
571 page = page + (combined_idx - page_idx);
572 page_idx = combined_idx;
575 set_page_order(page, order);
578 * If this is not the largest possible page, check if the buddy
579 * of the next-highest order is free. If it is, it's possible
580 * that pages are being freed that will coalesce soon. In case,
581 * that is happening, add the free page to the tail of the list
582 * so it's less likely to be used soon and more likely to be merged
583 * as a higher order page
585 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
586 struct page *higher_page, *higher_buddy;
587 combined_idx = buddy_idx & page_idx;
588 higher_page = page + (combined_idx - page_idx);
589 buddy_idx = __find_buddy_index(combined_idx, order + 1);
590 higher_buddy = higher_page + (buddy_idx - combined_idx);
591 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
592 list_add_tail(&page->lru,
593 &zone->free_area[order].free_list[migratetype]);
598 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
600 zone->free_area[order].nr_free++;
603 static inline int free_pages_check(struct page *page)
605 if (unlikely(page_mapcount(page) |
606 (page->mapping != NULL) |
607 (atomic_read(&page->_count) != 0) |
608 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
609 (mem_cgroup_bad_page_check(page)))) {
613 reset_page_last_nid(page);
614 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
615 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
620 * Frees a number of pages from the PCP lists
621 * Assumes all pages on list are in same zone, and of same order.
622 * count is the number of pages to free.
624 * If the zone was previously in an "all pages pinned" state then look to
625 * see if this freeing clears that state.
627 * And clear the zone's pages_scanned counter, to hold off the "all pages are
628 * pinned" detection logic.
630 static void free_pcppages_bulk(struct zone *zone, int count,
631 struct per_cpu_pages *pcp)
637 spin_lock(&zone->lock);
638 zone->all_unreclaimable = 0;
639 zone->pages_scanned = 0;
643 struct list_head *list;
646 * Remove pages from lists in a round-robin fashion. A
647 * batch_free count is maintained that is incremented when an
648 * empty list is encountered. This is so more pages are freed
649 * off fuller lists instead of spinning excessively around empty
654 if (++migratetype == MIGRATE_PCPTYPES)
656 list = &pcp->lists[migratetype];
657 } while (list_empty(list));
659 /* This is the only non-empty list. Free them all. */
660 if (batch_free == MIGRATE_PCPTYPES)
661 batch_free = to_free;
664 int mt; /* migratetype of the to-be-freed page */
666 page = list_entry(list->prev, struct page, lru);
667 /* must delete as __free_one_page list manipulates */
668 list_del(&page->lru);
669 mt = get_freepage_migratetype(page);
670 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
671 __free_one_page(page, zone, 0, mt);
672 trace_mm_page_pcpu_drain(page, 0, mt);
673 if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) {
674 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
675 if (is_migrate_cma(mt))
676 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
678 } while (--to_free && --batch_free && !list_empty(list));
680 spin_unlock(&zone->lock);
683 static void free_one_page(struct zone *zone, struct page *page, int order,
686 spin_lock(&zone->lock);
687 zone->all_unreclaimable = 0;
688 zone->pages_scanned = 0;
690 __free_one_page(page, zone, order, migratetype);
691 if (unlikely(migratetype != MIGRATE_ISOLATE))
692 __mod_zone_freepage_state(zone, 1 << order, migratetype);
693 spin_unlock(&zone->lock);
696 static bool free_pages_prepare(struct page *page, unsigned int order)
701 trace_mm_page_free(page, order);
702 kmemcheck_free_shadow(page, order);
705 page->mapping = NULL;
706 for (i = 0; i < (1 << order); i++)
707 bad += free_pages_check(page + i);
711 if (!PageHighMem(page)) {
712 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
713 debug_check_no_obj_freed(page_address(page),
716 arch_free_page(page, order);
717 kernel_map_pages(page, 1 << order, 0);
722 static void __free_pages_ok(struct page *page, unsigned int order)
727 if (!free_pages_prepare(page, order))
730 local_irq_save(flags);
731 __count_vm_events(PGFREE, 1 << order);
732 migratetype = get_pageblock_migratetype(page);
733 set_freepage_migratetype(page, migratetype);
734 free_one_page(page_zone(page), page, order, migratetype);
735 local_irq_restore(flags);
739 * Read access to zone->managed_pages is safe because it's unsigned long,
740 * but we still need to serialize writers. Currently all callers of
741 * __free_pages_bootmem() except put_page_bootmem() should only be used
742 * at boot time. So for shorter boot time, we shift the burden to
743 * put_page_bootmem() to serialize writers.
745 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
747 unsigned int nr_pages = 1 << order;
751 for (loop = 0; loop < nr_pages; loop++) {
752 struct page *p = &page[loop];
754 if (loop + 1 < nr_pages)
756 __ClearPageReserved(p);
757 set_page_count(p, 0);
760 page_zone(page)->managed_pages += 1 << order;
761 set_page_refcounted(page);
762 __free_pages(page, order);
766 /* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
767 void __init init_cma_reserved_pageblock(struct page *page)
769 unsigned i = pageblock_nr_pages;
770 struct page *p = page;
773 __ClearPageReserved(p);
774 set_page_count(p, 0);
777 set_page_refcounted(page);
778 set_pageblock_migratetype(page, MIGRATE_CMA);
779 __free_pages(page, pageblock_order);
780 totalram_pages += pageblock_nr_pages;
785 * The order of subdivision here is critical for the IO subsystem.
786 * Please do not alter this order without good reasons and regression
787 * testing. Specifically, as large blocks of memory are subdivided,
788 * the order in which smaller blocks are delivered depends on the order
789 * they're subdivided in this function. This is the primary factor
790 * influencing the order in which pages are delivered to the IO
791 * subsystem according to empirical testing, and this is also justified
792 * by considering the behavior of a buddy system containing a single
793 * large block of memory acted on by a series of small allocations.
794 * This behavior is a critical factor in sglist merging's success.
798 static inline void expand(struct zone *zone, struct page *page,
799 int low, int high, struct free_area *area,
802 unsigned long size = 1 << high;
808 VM_BUG_ON(bad_range(zone, &page[size]));
810 #ifdef CONFIG_DEBUG_PAGEALLOC
811 if (high < debug_guardpage_minorder()) {
813 * Mark as guard pages (or page), that will allow to
814 * merge back to allocator when buddy will be freed.
815 * Corresponding page table entries will not be touched,
816 * pages will stay not present in virtual address space
818 INIT_LIST_HEAD(&page[size].lru);
819 set_page_guard_flag(&page[size]);
820 set_page_private(&page[size], high);
821 /* Guard pages are not available for any usage */
822 __mod_zone_freepage_state(zone, -(1 << high),
827 list_add(&page[size].lru, &area->free_list[migratetype]);
829 set_page_order(&page[size], high);
834 * This page is about to be returned from the page allocator
836 static inline int check_new_page(struct page *page)
838 if (unlikely(page_mapcount(page) |
839 (page->mapping != NULL) |
840 (atomic_read(&page->_count) != 0) |
841 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
842 (mem_cgroup_bad_page_check(page)))) {
849 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
853 for (i = 0; i < (1 << order); i++) {
854 struct page *p = page + i;
855 if (unlikely(check_new_page(p)))
859 set_page_private(page, 0);
860 set_page_refcounted(page);
862 arch_alloc_page(page, order);
863 kernel_map_pages(page, 1 << order, 1);
865 if (gfp_flags & __GFP_ZERO)
866 prep_zero_page(page, order, gfp_flags);
868 if (order && (gfp_flags & __GFP_COMP))
869 prep_compound_page(page, order);
875 * Go through the free lists for the given migratetype and remove
876 * the smallest available page from the freelists
879 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
882 unsigned int current_order;
883 struct free_area * area;
886 /* Find a page of the appropriate size in the preferred list */
887 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
888 area = &(zone->free_area[current_order]);
889 if (list_empty(&area->free_list[migratetype]))
892 page = list_entry(area->free_list[migratetype].next,
894 list_del(&page->lru);
895 rmv_page_order(page);
897 expand(zone, page, order, current_order, area, migratetype);
906 * This array describes the order lists are fallen back to when
907 * the free lists for the desirable migrate type are depleted
909 static int fallbacks[MIGRATE_TYPES][4] = {
910 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
911 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
913 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
914 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
916 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
918 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
919 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
923 * Move the free pages in a range to the free lists of the requested type.
924 * Note that start_page and end_pages are not aligned on a pageblock
925 * boundary. If alignment is required, use move_freepages_block()
927 int move_freepages(struct zone *zone,
928 struct page *start_page, struct page *end_page,
935 #ifndef CONFIG_HOLES_IN_ZONE
937 * page_zone is not safe to call in this context when
938 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
939 * anyway as we check zone boundaries in move_freepages_block().
940 * Remove at a later date when no bug reports exist related to
941 * grouping pages by mobility
943 BUG_ON(page_zone(start_page) != page_zone(end_page));
946 for (page = start_page; page <= end_page;) {
947 /* Make sure we are not inadvertently changing nodes */
948 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
950 if (!pfn_valid_within(page_to_pfn(page))) {
955 if (!PageBuddy(page)) {
960 order = page_order(page);
961 list_move(&page->lru,
962 &zone->free_area[order].free_list[migratetype]);
963 set_freepage_migratetype(page, migratetype);
965 pages_moved += 1 << order;
971 int move_freepages_block(struct zone *zone, struct page *page,
974 unsigned long start_pfn, end_pfn;
975 struct page *start_page, *end_page;
977 start_pfn = page_to_pfn(page);
978 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
979 start_page = pfn_to_page(start_pfn);
980 end_page = start_page + pageblock_nr_pages - 1;
981 end_pfn = start_pfn + pageblock_nr_pages - 1;
983 /* Do not cross zone boundaries */
984 if (start_pfn < zone->zone_start_pfn)
986 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
989 return move_freepages(zone, start_page, end_page, migratetype);
992 static void change_pageblock_range(struct page *pageblock_page,
993 int start_order, int migratetype)
995 int nr_pageblocks = 1 << (start_order - pageblock_order);
997 while (nr_pageblocks--) {
998 set_pageblock_migratetype(pageblock_page, migratetype);
999 pageblock_page += pageblock_nr_pages;
1003 /* Remove an element from the buddy allocator from the fallback list */
1004 static inline struct page *
1005 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1007 struct free_area * area;
1012 /* Find the largest possible block of pages in the other list */
1013 for (current_order = MAX_ORDER-1; current_order >= order;
1016 migratetype = fallbacks[start_migratetype][i];
1018 /* MIGRATE_RESERVE handled later if necessary */
1019 if (migratetype == MIGRATE_RESERVE)
1022 area = &(zone->free_area[current_order]);
1023 if (list_empty(&area->free_list[migratetype]))
1026 page = list_entry(area->free_list[migratetype].next,
1031 * If breaking a large block of pages, move all free
1032 * pages to the preferred allocation list. If falling
1033 * back for a reclaimable kernel allocation, be more
1034 * aggressive about taking ownership of free pages
1036 * On the other hand, never change migration
1037 * type of MIGRATE_CMA pageblocks nor move CMA
1038 * pages on different free lists. We don't
1039 * want unmovable pages to be allocated from
1040 * MIGRATE_CMA areas.
1042 if (!is_migrate_cma(migratetype) &&
1043 (unlikely(current_order >= pageblock_order / 2) ||
1044 start_migratetype == MIGRATE_RECLAIMABLE ||
1045 page_group_by_mobility_disabled)) {
1047 pages = move_freepages_block(zone, page,
1050 /* Claim the whole block if over half of it is free */
1051 if (pages >= (1 << (pageblock_order-1)) ||
1052 page_group_by_mobility_disabled)
1053 set_pageblock_migratetype(page,
1056 migratetype = start_migratetype;
1059 /* Remove the page from the freelists */
1060 list_del(&page->lru);
1061 rmv_page_order(page);
1063 /* Take ownership for orders >= pageblock_order */
1064 if (current_order >= pageblock_order &&
1065 !is_migrate_cma(migratetype))
1066 change_pageblock_range(page, current_order,
1069 expand(zone, page, order, current_order, area,
1070 is_migrate_cma(migratetype)
1071 ? migratetype : start_migratetype);
1073 trace_mm_page_alloc_extfrag(page, order, current_order,
1074 start_migratetype, migratetype);
1084 * Do the hard work of removing an element from the buddy allocator.
1085 * Call me with the zone->lock already held.
1087 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1093 page = __rmqueue_smallest(zone, order, migratetype);
1095 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1096 page = __rmqueue_fallback(zone, order, migratetype);
1099 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1100 * is used because __rmqueue_smallest is an inline function
1101 * and we want just one call site
1104 migratetype = MIGRATE_RESERVE;
1109 trace_mm_page_alloc_zone_locked(page, order, migratetype);
1114 * Obtain a specified number of elements from the buddy allocator, all under
1115 * a single hold of the lock, for efficiency. Add them to the supplied list.
1116 * Returns the number of new pages which were placed at *list.
1118 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1119 unsigned long count, struct list_head *list,
1120 int migratetype, int cold)
1122 int mt = migratetype, i;
1124 spin_lock(&zone->lock);
1125 for (i = 0; i < count; ++i) {
1126 struct page *page = __rmqueue(zone, order, migratetype);
1127 if (unlikely(page == NULL))
1131 * Split buddy pages returned by expand() are received here
1132 * in physical page order. The page is added to the callers and
1133 * list and the list head then moves forward. From the callers
1134 * perspective, the linked list is ordered by page number in
1135 * some conditions. This is useful for IO devices that can
1136 * merge IO requests if the physical pages are ordered
1139 if (likely(cold == 0))
1140 list_add(&page->lru, list);
1142 list_add_tail(&page->lru, list);
1143 if (IS_ENABLED(CONFIG_CMA)) {
1144 mt = get_pageblock_migratetype(page);
1145 if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1148 set_freepage_migratetype(page, mt);
1150 if (is_migrate_cma(mt))
1151 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1154 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1155 spin_unlock(&zone->lock);
1161 * Called from the vmstat counter updater to drain pagesets of this
1162 * currently executing processor on remote nodes after they have
1165 * Note that this function must be called with the thread pinned to
1166 * a single processor.
1168 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1170 unsigned long flags;
1173 local_irq_save(flags);
1174 if (pcp->count >= pcp->batch)
1175 to_drain = pcp->batch;
1177 to_drain = pcp->count;
1179 free_pcppages_bulk(zone, to_drain, pcp);
1180 pcp->count -= to_drain;
1182 local_irq_restore(flags);
1187 * Drain pages of the indicated processor.
1189 * The processor must either be the current processor and the
1190 * thread pinned to the current processor or a processor that
1193 static void drain_pages(unsigned int cpu)
1195 unsigned long flags;
1198 for_each_populated_zone(zone) {
1199 struct per_cpu_pageset *pset;
1200 struct per_cpu_pages *pcp;
1202 local_irq_save(flags);
1203 pset = per_cpu_ptr(zone->pageset, cpu);
1207 free_pcppages_bulk(zone, pcp->count, pcp);
1210 local_irq_restore(flags);
1215 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1217 void drain_local_pages(void *arg)
1219 drain_pages(smp_processor_id());
1223 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1225 * Note that this code is protected against sending an IPI to an offline
1226 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1227 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1228 * nothing keeps CPUs from showing up after we populated the cpumask and
1229 * before the call to on_each_cpu_mask().
1231 void drain_all_pages(void)
1234 struct per_cpu_pageset *pcp;
1238 * Allocate in the BSS so we wont require allocation in
1239 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1241 static cpumask_t cpus_with_pcps;
1244 * We don't care about racing with CPU hotplug event
1245 * as offline notification will cause the notified
1246 * cpu to drain that CPU pcps and on_each_cpu_mask
1247 * disables preemption as part of its processing
1249 for_each_online_cpu(cpu) {
1250 bool has_pcps = false;
1251 for_each_populated_zone(zone) {
1252 pcp = per_cpu_ptr(zone->pageset, cpu);
1253 if (pcp->pcp.count) {
1259 cpumask_set_cpu(cpu, &cpus_with_pcps);
1261 cpumask_clear_cpu(cpu, &cpus_with_pcps);
1263 on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
1266 #ifdef CONFIG_HIBERNATION
1268 void mark_free_pages(struct zone *zone)
1270 unsigned long pfn, max_zone_pfn;
1271 unsigned long flags;
1273 struct list_head *curr;
1275 if (!zone->spanned_pages)
1278 spin_lock_irqsave(&zone->lock, flags);
1280 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1281 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1282 if (pfn_valid(pfn)) {
1283 struct page *page = pfn_to_page(pfn);
1285 if (!swsusp_page_is_forbidden(page))
1286 swsusp_unset_page_free(page);
1289 for_each_migratetype_order(order, t) {
1290 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1293 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1294 for (i = 0; i < (1UL << order); i++)
1295 swsusp_set_page_free(pfn_to_page(pfn + i));
1298 spin_unlock_irqrestore(&zone->lock, flags);
1300 #endif /* CONFIG_PM */
1303 * Free a 0-order page
1304 * cold == 1 ? free a cold page : free a hot page
1306 void free_hot_cold_page(struct page *page, int cold)
1308 struct zone *zone = page_zone(page);
1309 struct per_cpu_pages *pcp;
1310 unsigned long flags;
1313 if (!free_pages_prepare(page, 0))
1316 migratetype = get_pageblock_migratetype(page);
1317 set_freepage_migratetype(page, migratetype);
1318 local_irq_save(flags);
1319 __count_vm_event(PGFREE);
1322 * We only track unmovable, reclaimable and movable on pcp lists.
1323 * Free ISOLATE pages back to the allocator because they are being
1324 * offlined but treat RESERVE as movable pages so we can get those
1325 * areas back if necessary. Otherwise, we may have to free
1326 * excessively into the page allocator
1328 if (migratetype >= MIGRATE_PCPTYPES) {
1329 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1330 free_one_page(zone, page, 0, migratetype);
1333 migratetype = MIGRATE_MOVABLE;
1336 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1338 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1340 list_add(&page->lru, &pcp->lists[migratetype]);
1342 if (pcp->count >= pcp->high) {
1343 free_pcppages_bulk(zone, pcp->batch, pcp);
1344 pcp->count -= pcp->batch;
1348 local_irq_restore(flags);
1352 * Free a list of 0-order pages
1354 void free_hot_cold_page_list(struct list_head *list, int cold)
1356 struct page *page, *next;
1358 list_for_each_entry_safe(page, next, list, lru) {
1359 trace_mm_page_free_batched(page, cold);
1360 free_hot_cold_page(page, cold);
1365 * split_page takes a non-compound higher-order page, and splits it into
1366 * n (1<<order) sub-pages: page[0..n]
1367 * Each sub-page must be freed individually.
1369 * Note: this is probably too low level an operation for use in drivers.
1370 * Please consult with lkml before using this in your driver.
1372 void split_page(struct page *page, unsigned int order)
1376 VM_BUG_ON(PageCompound(page));
1377 VM_BUG_ON(!page_count(page));
1379 #ifdef CONFIG_KMEMCHECK
1381 * Split shadow pages too, because free(page[0]) would
1382 * otherwise free the whole shadow.
1384 if (kmemcheck_page_is_tracked(page))
1385 split_page(virt_to_page(page[0].shadow), order);
1388 for (i = 1; i < (1 << order); i++)
1389 set_page_refcounted(page + i);
1393 * Similar to the split_page family of functions except that the page
1394 * required at the given order and being isolated now to prevent races
1395 * with parallel allocators
1397 int capture_free_page(struct page *page, int alloc_order, int migratetype)
1400 unsigned long watermark;
1404 BUG_ON(!PageBuddy(page));
1406 zone = page_zone(page);
1407 order = page_order(page);
1408 mt = get_pageblock_migratetype(page);
1410 if (mt != MIGRATE_ISOLATE) {
1411 /* Obey watermarks as if the page was being allocated */
1412 watermark = low_wmark_pages(zone) + (1 << order);
1413 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1416 __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
1419 /* Remove page from free list */
1420 list_del(&page->lru);
1421 zone->free_area[order].nr_free--;
1422 rmv_page_order(page);
1424 if (alloc_order != order)
1425 expand(zone, page, alloc_order, order,
1426 &zone->free_area[order], migratetype);
1428 /* Set the pageblock if the captured page is at least a pageblock */
1429 if (order >= pageblock_order - 1) {
1430 struct page *endpage = page + (1 << order) - 1;
1431 for (; page < endpage; page += pageblock_nr_pages) {
1432 int mt = get_pageblock_migratetype(page);
1433 if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
1434 set_pageblock_migratetype(page,
1439 return 1UL << alloc_order;
1443 * Similar to split_page except the page is already free. As this is only
1444 * being used for migration, the migratetype of the block also changes.
1445 * As this is called with interrupts disabled, the caller is responsible
1446 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1449 * Note: this is probably too low level an operation for use in drivers.
1450 * Please consult with lkml before using this in your driver.
1452 int split_free_page(struct page *page)
1457 BUG_ON(!PageBuddy(page));
1458 order = page_order(page);
1460 nr_pages = capture_free_page(page, order, 0);
1464 /* Split into individual pages */
1465 set_page_refcounted(page);
1466 split_page(page, order);
1471 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1472 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1476 struct page *buffered_rmqueue(struct zone *preferred_zone,
1477 struct zone *zone, int order, gfp_t gfp_flags,
1480 unsigned long flags;
1482 int cold = !!(gfp_flags & __GFP_COLD);
1485 if (likely(order == 0)) {
1486 struct per_cpu_pages *pcp;
1487 struct list_head *list;
1489 local_irq_save(flags);
1490 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1491 list = &pcp->lists[migratetype];
1492 if (list_empty(list)) {
1493 pcp->count += rmqueue_bulk(zone, 0,
1496 if (unlikely(list_empty(list)))
1501 page = list_entry(list->prev, struct page, lru);
1503 page = list_entry(list->next, struct page, lru);
1505 list_del(&page->lru);
1508 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1510 * __GFP_NOFAIL is not to be used in new code.
1512 * All __GFP_NOFAIL callers should be fixed so that they
1513 * properly detect and handle allocation failures.
1515 * We most definitely don't want callers attempting to
1516 * allocate greater than order-1 page units with
1519 WARN_ON_ONCE(order > 1);
1521 spin_lock_irqsave(&zone->lock, flags);
1522 page = __rmqueue(zone, order, migratetype);
1523 spin_unlock(&zone->lock);
1526 __mod_zone_freepage_state(zone, -(1 << order),
1527 get_pageblock_migratetype(page));
1530 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1531 zone_statistics(preferred_zone, zone, gfp_flags);
1532 local_irq_restore(flags);
1534 VM_BUG_ON(bad_range(zone, page));
1535 if (prep_new_page(page, order, gfp_flags))
1540 local_irq_restore(flags);
1544 #ifdef CONFIG_FAIL_PAGE_ALLOC
1547 struct fault_attr attr;
1549 u32 ignore_gfp_highmem;
1550 u32 ignore_gfp_wait;
1552 } fail_page_alloc = {
1553 .attr = FAULT_ATTR_INITIALIZER,
1554 .ignore_gfp_wait = 1,
1555 .ignore_gfp_highmem = 1,
1559 static int __init setup_fail_page_alloc(char *str)
1561 return setup_fault_attr(&fail_page_alloc.attr, str);
1563 __setup("fail_page_alloc=", setup_fail_page_alloc);
1565 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1567 if (order < fail_page_alloc.min_order)
1569 if (gfp_mask & __GFP_NOFAIL)
1571 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1573 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1576 return should_fail(&fail_page_alloc.attr, 1 << order);
1579 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1581 static int __init fail_page_alloc_debugfs(void)
1583 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1586 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1587 &fail_page_alloc.attr);
1589 return PTR_ERR(dir);
1591 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1592 &fail_page_alloc.ignore_gfp_wait))
1594 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1595 &fail_page_alloc.ignore_gfp_highmem))
1597 if (!debugfs_create_u32("min-order", mode, dir,
1598 &fail_page_alloc.min_order))
1603 debugfs_remove_recursive(dir);
1608 late_initcall(fail_page_alloc_debugfs);
1610 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1612 #else /* CONFIG_FAIL_PAGE_ALLOC */
1614 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1619 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1622 * Return true if free pages are above 'mark'. This takes into account the order
1623 * of the allocation.
1625 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1626 int classzone_idx, int alloc_flags, long free_pages)
1628 /* free_pages my go negative - that's OK */
1630 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1633 free_pages -= (1 << order) - 1;
1634 if (alloc_flags & ALLOC_HIGH)
1636 if (alloc_flags & ALLOC_HARDER)
1639 /* If allocation can't use CMA areas don't use free CMA pages */
1640 if (!(alloc_flags & ALLOC_CMA))
1641 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1643 if (free_pages <= min + lowmem_reserve)
1645 for (o = 0; o < order; o++) {
1646 /* At the next order, this order's pages become unavailable */
1647 free_pages -= z->free_area[o].nr_free << o;
1649 /* Require fewer higher order pages to be free */
1652 if (free_pages <= min)
1658 #ifdef CONFIG_MEMORY_ISOLATION
1659 static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
1661 if (unlikely(zone->nr_pageblock_isolate))
1662 return zone->nr_pageblock_isolate * pageblock_nr_pages;
1666 static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
1672 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1673 int classzone_idx, int alloc_flags)
1675 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1676 zone_page_state(z, NR_FREE_PAGES));
1679 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1680 int classzone_idx, int alloc_flags)
1682 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1684 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1685 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1688 * If the zone has MIGRATE_ISOLATE type free pages, we should consider
1689 * it. nr_zone_isolate_freepages is never accurate so kswapd might not
1690 * sleep although it could do so. But this is more desirable for memory
1691 * hotplug than sleeping which can cause a livelock in the direct
1694 free_pages -= nr_zone_isolate_freepages(z);
1695 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1701 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1702 * skip over zones that are not allowed by the cpuset, or that have
1703 * been recently (in last second) found to be nearly full. See further
1704 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1705 * that have to skip over a lot of full or unallowed zones.
1707 * If the zonelist cache is present in the passed in zonelist, then
1708 * returns a pointer to the allowed node mask (either the current
1709 * tasks mems_allowed, or node_states[N_MEMORY].)
1711 * If the zonelist cache is not available for this zonelist, does
1712 * nothing and returns NULL.
1714 * If the fullzones BITMAP in the zonelist cache is stale (more than
1715 * a second since last zap'd) then we zap it out (clear its bits.)
1717 * We hold off even calling zlc_setup, until after we've checked the
1718 * first zone in the zonelist, on the theory that most allocations will
1719 * be satisfied from that first zone, so best to examine that zone as
1720 * quickly as we can.
1722 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1724 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1725 nodemask_t *allowednodes; /* zonelist_cache approximation */
1727 zlc = zonelist->zlcache_ptr;
1731 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1732 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1733 zlc->last_full_zap = jiffies;
1736 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1737 &cpuset_current_mems_allowed :
1738 &node_states[N_MEMORY];
1739 return allowednodes;
1743 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1744 * if it is worth looking at further for free memory:
1745 * 1) Check that the zone isn't thought to be full (doesn't have its
1746 * bit set in the zonelist_cache fullzones BITMAP).
1747 * 2) Check that the zones node (obtained from the zonelist_cache
1748 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1749 * Return true (non-zero) if zone is worth looking at further, or
1750 * else return false (zero) if it is not.
1752 * This check -ignores- the distinction between various watermarks,
1753 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1754 * found to be full for any variation of these watermarks, it will
1755 * be considered full for up to one second by all requests, unless
1756 * we are so low on memory on all allowed nodes that we are forced
1757 * into the second scan of the zonelist.
1759 * In the second scan we ignore this zonelist cache and exactly
1760 * apply the watermarks to all zones, even it is slower to do so.
1761 * We are low on memory in the second scan, and should leave no stone
1762 * unturned looking for a free page.
1764 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1765 nodemask_t *allowednodes)
1767 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1768 int i; /* index of *z in zonelist zones */
1769 int n; /* node that zone *z is on */
1771 zlc = zonelist->zlcache_ptr;
1775 i = z - zonelist->_zonerefs;
1778 /* This zone is worth trying if it is allowed but not full */
1779 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1783 * Given 'z' scanning a zonelist, set the corresponding bit in
1784 * zlc->fullzones, so that subsequent attempts to allocate a page
1785 * from that zone don't waste time re-examining it.
1787 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1789 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1790 int i; /* index of *z in zonelist zones */
1792 zlc = zonelist->zlcache_ptr;
1796 i = z - zonelist->_zonerefs;
1798 set_bit(i, zlc->fullzones);
1802 * clear all zones full, called after direct reclaim makes progress so that
1803 * a zone that was recently full is not skipped over for up to a second
1805 static void zlc_clear_zones_full(struct zonelist *zonelist)
1807 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1809 zlc = zonelist->zlcache_ptr;
1813 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1816 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1818 return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
1821 static void __paginginit init_zone_allows_reclaim(int nid)
1825 for_each_online_node(i)
1826 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
1827 node_set(i, NODE_DATA(nid)->reclaim_nodes);
1829 zone_reclaim_mode = 1;
1832 #else /* CONFIG_NUMA */
1834 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1839 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1840 nodemask_t *allowednodes)
1845 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1849 static void zlc_clear_zones_full(struct zonelist *zonelist)
1853 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1858 static inline void init_zone_allows_reclaim(int nid)
1861 #endif /* CONFIG_NUMA */
1864 * get_page_from_freelist goes through the zonelist trying to allocate
1867 static struct page *
1868 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1869 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1870 struct zone *preferred_zone, int migratetype)
1873 struct page *page = NULL;
1876 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1877 int zlc_active = 0; /* set if using zonelist_cache */
1878 int did_zlc_setup = 0; /* just call zlc_setup() one time */
1880 classzone_idx = zone_idx(preferred_zone);
1883 * Scan zonelist, looking for a zone with enough free.
1884 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1886 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1887 high_zoneidx, nodemask) {
1888 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
1889 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1891 if ((alloc_flags & ALLOC_CPUSET) &&
1892 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1895 * When allocating a page cache page for writing, we
1896 * want to get it from a zone that is within its dirty
1897 * limit, such that no single zone holds more than its
1898 * proportional share of globally allowed dirty pages.
1899 * The dirty limits take into account the zone's
1900 * lowmem reserves and high watermark so that kswapd
1901 * should be able to balance it without having to
1902 * write pages from its LRU list.
1904 * This may look like it could increase pressure on
1905 * lower zones by failing allocations in higher zones
1906 * before they are full. But the pages that do spill
1907 * over are limited as the lower zones are protected
1908 * by this very same mechanism. It should not become
1909 * a practical burden to them.
1911 * XXX: For now, allow allocations to potentially
1912 * exceed the per-zone dirty limit in the slowpath
1913 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1914 * which is important when on a NUMA setup the allowed
1915 * zones are together not big enough to reach the
1916 * global limit. The proper fix for these situations
1917 * will require awareness of zones in the
1918 * dirty-throttling and the flusher threads.
1920 if ((alloc_flags & ALLOC_WMARK_LOW) &&
1921 (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1922 goto this_zone_full;
1924 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1925 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1929 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1930 if (zone_watermark_ok(zone, order, mark,
1931 classzone_idx, alloc_flags))
1934 if (IS_ENABLED(CONFIG_NUMA) &&
1935 !did_zlc_setup && nr_online_nodes > 1) {
1937 * we do zlc_setup if there are multiple nodes
1938 * and before considering the first zone allowed
1941 allowednodes = zlc_setup(zonelist, alloc_flags);
1946 if (zone_reclaim_mode == 0 ||
1947 !zone_allows_reclaim(preferred_zone, zone))
1948 goto this_zone_full;
1951 * As we may have just activated ZLC, check if the first
1952 * eligible zone has failed zone_reclaim recently.
1954 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
1955 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1958 ret = zone_reclaim(zone, gfp_mask, order);
1960 case ZONE_RECLAIM_NOSCAN:
1963 case ZONE_RECLAIM_FULL:
1964 /* scanned but unreclaimable */
1967 /* did we reclaim enough */
1968 if (!zone_watermark_ok(zone, order, mark,
1969 classzone_idx, alloc_flags))
1970 goto this_zone_full;
1975 page = buffered_rmqueue(preferred_zone, zone, order,
1976 gfp_mask, migratetype);
1980 if (IS_ENABLED(CONFIG_NUMA))
1981 zlc_mark_zone_full(zonelist, z);
1984 if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
1985 /* Disable zlc cache for second zonelist scan */
1992 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
1993 * necessary to allocate the page. The expectation is
1994 * that the caller is taking steps that will free more
1995 * memory. The caller should avoid the page being used
1996 * for !PFMEMALLOC purposes.
1998 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
2004 * Large machines with many possible nodes should not always dump per-node
2005 * meminfo in irq context.
2007 static inline bool should_suppress_show_mem(void)
2012 ret = in_interrupt();
2017 static DEFINE_RATELIMIT_STATE(nopage_rs,
2018 DEFAULT_RATELIMIT_INTERVAL,
2019 DEFAULT_RATELIMIT_BURST);
2021 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2023 unsigned int filter = SHOW_MEM_FILTER_NODES;
2025 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2026 debug_guardpage_minorder() > 0)
2030 * This documents exceptions given to allocations in certain
2031 * contexts that are allowed to allocate outside current's set
2034 if (!(gfp_mask & __GFP_NOMEMALLOC))
2035 if (test_thread_flag(TIF_MEMDIE) ||
2036 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2037 filter &= ~SHOW_MEM_FILTER_NODES;
2038 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2039 filter &= ~SHOW_MEM_FILTER_NODES;
2042 struct va_format vaf;
2045 va_start(args, fmt);
2050 pr_warn("%pV", &vaf);
2055 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2056 current->comm, order, gfp_mask);
2059 if (!should_suppress_show_mem())
2064 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2065 unsigned long did_some_progress,
2066 unsigned long pages_reclaimed)
2068 /* Do not loop if specifically requested */
2069 if (gfp_mask & __GFP_NORETRY)
2072 /* Always retry if specifically requested */
2073 if (gfp_mask & __GFP_NOFAIL)
2077 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2078 * making forward progress without invoking OOM. Suspend also disables
2079 * storage devices so kswapd will not help. Bail if we are suspending.
2081 if (!did_some_progress && pm_suspended_storage())
2085 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2086 * means __GFP_NOFAIL, but that may not be true in other
2089 if (order <= PAGE_ALLOC_COSTLY_ORDER)
2093 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2094 * specified, then we retry until we no longer reclaim any pages
2095 * (above), or we've reclaimed an order of pages at least as
2096 * large as the allocation's order. In both cases, if the
2097 * allocation still fails, we stop retrying.
2099 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2105 static inline struct page *
2106 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2107 struct zonelist *zonelist, enum zone_type high_zoneidx,
2108 nodemask_t *nodemask, struct zone *preferred_zone,
2113 /* Acquire the OOM killer lock for the zones in zonelist */
2114 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
2115 schedule_timeout_uninterruptible(1);
2120 * Go through the zonelist yet one more time, keep very high watermark
2121 * here, this is only to catch a parallel oom killing, we must fail if
2122 * we're still under heavy pressure.
2124 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2125 order, zonelist, high_zoneidx,
2126 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2127 preferred_zone, migratetype);
2131 if (!(gfp_mask & __GFP_NOFAIL)) {
2132 /* The OOM killer will not help higher order allocs */
2133 if (order > PAGE_ALLOC_COSTLY_ORDER)
2135 /* The OOM killer does not needlessly kill tasks for lowmem */
2136 if (high_zoneidx < ZONE_NORMAL)
2139 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2140 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2141 * The caller should handle page allocation failure by itself if
2142 * it specifies __GFP_THISNODE.
2143 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2145 if (gfp_mask & __GFP_THISNODE)
2148 /* Exhausted what can be done so it's blamo time */
2149 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2152 clear_zonelist_oom(zonelist, gfp_mask);
2156 #ifdef CONFIG_COMPACTION
2157 /* Try memory compaction for high-order allocations before reclaim */
2158 static struct page *
2159 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2160 struct zonelist *zonelist, enum zone_type high_zoneidx,
2161 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2162 int migratetype, bool sync_migration,
2163 bool *contended_compaction, bool *deferred_compaction,
2164 unsigned long *did_some_progress)
2166 struct page *page = NULL;
2171 if (compaction_deferred(preferred_zone, order)) {
2172 *deferred_compaction = true;
2176 current->flags |= PF_MEMALLOC;
2177 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2178 nodemask, sync_migration,
2179 contended_compaction, &page);
2180 current->flags &= ~PF_MEMALLOC;
2182 /* If compaction captured a page, prep and use it */
2184 prep_new_page(page, order, gfp_mask);
2188 if (*did_some_progress != COMPACT_SKIPPED) {
2189 /* Page migration frees to the PCP lists but we want merging */
2190 drain_pages(get_cpu());
2193 page = get_page_from_freelist(gfp_mask, nodemask,
2194 order, zonelist, high_zoneidx,
2195 alloc_flags & ~ALLOC_NO_WATERMARKS,
2196 preferred_zone, migratetype);
2199 preferred_zone->compact_blockskip_flush = false;
2200 preferred_zone->compact_considered = 0;
2201 preferred_zone->compact_defer_shift = 0;
2202 if (order >= preferred_zone->compact_order_failed)
2203 preferred_zone->compact_order_failed = order + 1;
2204 count_vm_event(COMPACTSUCCESS);
2209 * It's bad if compaction run occurs and fails.
2210 * The most likely reason is that pages exist,
2211 * but not enough to satisfy watermarks.
2213 count_vm_event(COMPACTFAIL);
2216 * As async compaction considers a subset of pageblocks, only
2217 * defer if the failure was a sync compaction failure.
2220 defer_compaction(preferred_zone, order);
2228 static inline struct page *
2229 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2230 struct zonelist *zonelist, enum zone_type high_zoneidx,
2231 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2232 int migratetype, bool sync_migration,
2233 bool *contended_compaction, bool *deferred_compaction,
2234 unsigned long *did_some_progress)
2238 #endif /* CONFIG_COMPACTION */
2240 /* Perform direct synchronous page reclaim */
2242 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2243 nodemask_t *nodemask)
2245 struct reclaim_state reclaim_state;
2250 /* We now go into synchronous reclaim */
2251 cpuset_memory_pressure_bump();
2252 current->flags |= PF_MEMALLOC;
2253 lockdep_set_current_reclaim_state(gfp_mask);
2254 reclaim_state.reclaimed_slab = 0;
2255 current->reclaim_state = &reclaim_state;
2257 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2259 current->reclaim_state = NULL;
2260 lockdep_clear_current_reclaim_state();
2261 current->flags &= ~PF_MEMALLOC;
2268 /* The really slow allocator path where we enter direct reclaim */
2269 static inline struct page *
2270 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2271 struct zonelist *zonelist, enum zone_type high_zoneidx,
2272 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2273 int migratetype, unsigned long *did_some_progress)
2275 struct page *page = NULL;
2276 bool drained = false;
2278 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2280 if (unlikely(!(*did_some_progress)))
2283 /* After successful reclaim, reconsider all zones for allocation */
2284 if (IS_ENABLED(CONFIG_NUMA))
2285 zlc_clear_zones_full(zonelist);
2288 page = get_page_from_freelist(gfp_mask, nodemask, order,
2289 zonelist, high_zoneidx,
2290 alloc_flags & ~ALLOC_NO_WATERMARKS,
2291 preferred_zone, migratetype);
2294 * If an allocation failed after direct reclaim, it could be because
2295 * pages are pinned on the per-cpu lists. Drain them and try again
2297 if (!page && !drained) {
2307 * This is called in the allocator slow-path if the allocation request is of
2308 * sufficient urgency to ignore watermarks and take other desperate measures
2310 static inline struct page *
2311 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2312 struct zonelist *zonelist, enum zone_type high_zoneidx,
2313 nodemask_t *nodemask, struct zone *preferred_zone,
2319 page = get_page_from_freelist(gfp_mask, nodemask, order,
2320 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2321 preferred_zone, migratetype);
2323 if (!page && gfp_mask & __GFP_NOFAIL)
2324 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2325 } while (!page && (gfp_mask & __GFP_NOFAIL));
2331 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2332 enum zone_type high_zoneidx,
2333 enum zone_type classzone_idx)
2338 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2339 wakeup_kswapd(zone, order, classzone_idx);
2343 gfp_to_alloc_flags(gfp_t gfp_mask)
2345 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2346 const gfp_t wait = gfp_mask & __GFP_WAIT;
2348 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2349 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2352 * The caller may dip into page reserves a bit more if the caller
2353 * cannot run direct reclaim, or if the caller has realtime scheduling
2354 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
2355 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2357 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2361 * Not worth trying to allocate harder for
2362 * __GFP_NOMEMALLOC even if it can't schedule.
2364 if (!(gfp_mask & __GFP_NOMEMALLOC))
2365 alloc_flags |= ALLOC_HARDER;
2367 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2368 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2370 alloc_flags &= ~ALLOC_CPUSET;
2371 } else if (unlikely(rt_task(current)) && !in_interrupt())
2372 alloc_flags |= ALLOC_HARDER;
2374 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2375 if (gfp_mask & __GFP_MEMALLOC)
2376 alloc_flags |= ALLOC_NO_WATERMARKS;
2377 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2378 alloc_flags |= ALLOC_NO_WATERMARKS;
2379 else if (!in_interrupt() &&
2380 ((current->flags & PF_MEMALLOC) ||
2381 unlikely(test_thread_flag(TIF_MEMDIE))))
2382 alloc_flags |= ALLOC_NO_WATERMARKS;
2385 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2386 alloc_flags |= ALLOC_CMA;
2391 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2393 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2396 static inline struct page *
2397 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2398 struct zonelist *zonelist, enum zone_type high_zoneidx,
2399 nodemask_t *nodemask, struct zone *preferred_zone,
2402 const gfp_t wait = gfp_mask & __GFP_WAIT;
2403 struct page *page = NULL;
2405 unsigned long pages_reclaimed = 0;
2406 unsigned long did_some_progress;
2407 bool sync_migration = false;
2408 bool deferred_compaction = false;
2409 bool contended_compaction = false;
2412 * In the slowpath, we sanity check order to avoid ever trying to
2413 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2414 * be using allocators in order of preference for an area that is
2417 if (order >= MAX_ORDER) {
2418 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2423 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2424 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2425 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2426 * using a larger set of nodes after it has established that the
2427 * allowed per node queues are empty and that nodes are
2430 if (IS_ENABLED(CONFIG_NUMA) &&
2431 (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2435 if (!(gfp_mask & __GFP_NO_KSWAPD))
2436 wake_all_kswapd(order, zonelist, high_zoneidx,
2437 zone_idx(preferred_zone));
2440 * OK, we're below the kswapd watermark and have kicked background
2441 * reclaim. Now things get more complex, so set up alloc_flags according
2442 * to how we want to proceed.
2444 alloc_flags = gfp_to_alloc_flags(gfp_mask);
2447 * Find the true preferred zone if the allocation is unconstrained by
2450 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2451 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2455 /* This is the last chance, in general, before the goto nopage. */
2456 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2457 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2458 preferred_zone, migratetype);
2462 /* Allocate without watermarks if the context allows */
2463 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2465 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2466 * the allocation is high priority and these type of
2467 * allocations are system rather than user orientated
2469 zonelist = node_zonelist(numa_node_id(), gfp_mask);
2471 page = __alloc_pages_high_priority(gfp_mask, order,
2472 zonelist, high_zoneidx, nodemask,
2473 preferred_zone, migratetype);
2479 /* Atomic allocations - we can't balance anything */
2483 /* Avoid recursion of direct reclaim */
2484 if (current->flags & PF_MEMALLOC)
2487 /* Avoid allocations with no watermarks from looping endlessly */
2488 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2492 * Try direct compaction. The first pass is asynchronous. Subsequent
2493 * attempts after direct reclaim are synchronous
2495 page = __alloc_pages_direct_compact(gfp_mask, order,
2496 zonelist, high_zoneidx,
2498 alloc_flags, preferred_zone,
2499 migratetype, sync_migration,
2500 &contended_compaction,
2501 &deferred_compaction,
2502 &did_some_progress);
2505 sync_migration = true;
2508 * If compaction is deferred for high-order allocations, it is because
2509 * sync compaction recently failed. In this is the case and the caller
2510 * requested a movable allocation that does not heavily disrupt the
2511 * system then fail the allocation instead of entering direct reclaim.
2513 if ((deferred_compaction || contended_compaction) &&
2514 (gfp_mask & __GFP_NO_KSWAPD))
2517 /* Try direct reclaim and then allocating */
2518 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2519 zonelist, high_zoneidx,
2521 alloc_flags, preferred_zone,
2522 migratetype, &did_some_progress);
2527 * If we failed to make any progress reclaiming, then we are
2528 * running out of options and have to consider going OOM
2530 if (!did_some_progress) {
2531 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2532 if (oom_killer_disabled)
2534 /* Coredumps can quickly deplete all memory reserves */
2535 if ((current->flags & PF_DUMPCORE) &&
2536 !(gfp_mask & __GFP_NOFAIL))
2538 page = __alloc_pages_may_oom(gfp_mask, order,
2539 zonelist, high_zoneidx,
2540 nodemask, preferred_zone,
2545 if (!(gfp_mask & __GFP_NOFAIL)) {
2547 * The oom killer is not called for high-order
2548 * allocations that may fail, so if no progress
2549 * is being made, there are no other options and
2550 * retrying is unlikely to help.
2552 if (order > PAGE_ALLOC_COSTLY_ORDER)
2555 * The oom killer is not called for lowmem
2556 * allocations to prevent needlessly killing
2559 if (high_zoneidx < ZONE_NORMAL)
2567 /* Check if we should retry the allocation */
2568 pages_reclaimed += did_some_progress;
2569 if (should_alloc_retry(gfp_mask, order, did_some_progress,
2571 /* Wait for some write requests to complete then retry */
2572 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2576 * High-order allocations do not necessarily loop after
2577 * direct reclaim and reclaim/compaction depends on compaction
2578 * being called after reclaim so call directly if necessary
2580 page = __alloc_pages_direct_compact(gfp_mask, order,
2581 zonelist, high_zoneidx,
2583 alloc_flags, preferred_zone,
2584 migratetype, sync_migration,
2585 &contended_compaction,
2586 &deferred_compaction,
2587 &did_some_progress);
2593 warn_alloc_failed(gfp_mask, order, NULL);
2596 if (kmemcheck_enabled)
2597 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2603 * This is the 'heart' of the zoned buddy allocator.
2606 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2607 struct zonelist *zonelist, nodemask_t *nodemask)
2609 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2610 struct zone *preferred_zone;
2611 struct page *page = NULL;
2612 int migratetype = allocflags_to_migratetype(gfp_mask);
2613 unsigned int cpuset_mems_cookie;
2614 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2615 struct mem_cgroup *memcg = NULL;
2617 gfp_mask &= gfp_allowed_mask;
2619 lockdep_trace_alloc(gfp_mask);
2621 might_sleep_if(gfp_mask & __GFP_WAIT);
2623 if (should_fail_alloc_page(gfp_mask, order))
2627 * Check the zones suitable for the gfp_mask contain at least one
2628 * valid zone. It's possible to have an empty zonelist as a result
2629 * of GFP_THISNODE and a memoryless node
2631 if (unlikely(!zonelist->_zonerefs->zone))
2635 * Will only have any effect when __GFP_KMEMCG is set. This is
2636 * verified in the (always inline) callee
2638 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2642 cpuset_mems_cookie = get_mems_allowed();
2644 /* The preferred zone is used for statistics later */
2645 first_zones_zonelist(zonelist, high_zoneidx,
2646 nodemask ? : &cpuset_current_mems_allowed,
2648 if (!preferred_zone)
2652 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2653 alloc_flags |= ALLOC_CMA;
2655 /* First allocation attempt */
2656 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2657 zonelist, high_zoneidx, alloc_flags,
2658 preferred_zone, migratetype);
2659 if (unlikely(!page))
2660 page = __alloc_pages_slowpath(gfp_mask, order,
2661 zonelist, high_zoneidx, nodemask,
2662 preferred_zone, migratetype);
2664 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2668 * When updating a task's mems_allowed, it is possible to race with
2669 * parallel threads in such a way that an allocation can fail while
2670 * the mask is being updated. If a page allocation is about to fail,
2671 * check if the cpuset changed during allocation and if so, retry.
2673 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2676 memcg_kmem_commit_charge(page, memcg, order);
2680 EXPORT_SYMBOL(__alloc_pages_nodemask);
2683 * Common helper functions.
2685 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2690 * __get_free_pages() returns a 32-bit address, which cannot represent
2693 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2695 page = alloc_pages(gfp_mask, order);
2698 return (unsigned long) page_address(page);
2700 EXPORT_SYMBOL(__get_free_pages);
2702 unsigned long get_zeroed_page(gfp_t gfp_mask)
2704 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2706 EXPORT_SYMBOL(get_zeroed_page);
2708 void __free_pages(struct page *page, unsigned int order)
2710 if (put_page_testzero(page)) {
2712 free_hot_cold_page(page, 0);
2714 __free_pages_ok(page, order);
2718 EXPORT_SYMBOL(__free_pages);
2720 void free_pages(unsigned long addr, unsigned int order)
2723 VM_BUG_ON(!virt_addr_valid((void *)addr));
2724 __free_pages(virt_to_page((void *)addr), order);
2728 EXPORT_SYMBOL(free_pages);
2731 * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
2732 * pages allocated with __GFP_KMEMCG.
2734 * Those pages are accounted to a particular memcg, embedded in the
2735 * corresponding page_cgroup. To avoid adding a hit in the allocator to search
2736 * for that information only to find out that it is NULL for users who have no
2737 * interest in that whatsoever, we provide these functions.
2739 * The caller knows better which flags it relies on.
2741 void __free_memcg_kmem_pages(struct page *page, unsigned int order)
2743 memcg_kmem_uncharge_pages(page, order);
2744 __free_pages(page, order);
2747 void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
2750 VM_BUG_ON(!virt_addr_valid((void *)addr));
2751 __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
2755 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2758 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2759 unsigned long used = addr + PAGE_ALIGN(size);
2761 split_page(virt_to_page((void *)addr), order);
2762 while (used < alloc_end) {
2767 return (void *)addr;
2771 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2772 * @size: the number of bytes to allocate
2773 * @gfp_mask: GFP flags for the allocation
2775 * This function is similar to alloc_pages(), except that it allocates the
2776 * minimum number of pages to satisfy the request. alloc_pages() can only
2777 * allocate memory in power-of-two pages.
2779 * This function is also limited by MAX_ORDER.
2781 * Memory allocated by this function must be released by free_pages_exact().
2783 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2785 unsigned int order = get_order(size);
2788 addr = __get_free_pages(gfp_mask, order);
2789 return make_alloc_exact(addr, order, size);
2791 EXPORT_SYMBOL(alloc_pages_exact);
2794 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2796 * @nid: the preferred node ID where memory should be allocated
2797 * @size: the number of bytes to allocate
2798 * @gfp_mask: GFP flags for the allocation
2800 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2802 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2805 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2807 unsigned order = get_order(size);
2808 struct page *p = alloc_pages_node(nid, gfp_mask, order);
2811 return make_alloc_exact((unsigned long)page_address(p), order, size);
2813 EXPORT_SYMBOL(alloc_pages_exact_nid);
2816 * free_pages_exact - release memory allocated via alloc_pages_exact()
2817 * @virt: the value returned by alloc_pages_exact.
2818 * @size: size of allocation, same value as passed to alloc_pages_exact().
2820 * Release the memory allocated by a previous call to alloc_pages_exact.
2822 void free_pages_exact(void *virt, size_t size)
2824 unsigned long addr = (unsigned long)virt;
2825 unsigned long end = addr + PAGE_ALIGN(size);
2827 while (addr < end) {
2832 EXPORT_SYMBOL(free_pages_exact);
2834 static unsigned int nr_free_zone_pages(int offset)
2839 /* Just pick one node, since fallback list is circular */
2840 unsigned int sum = 0;
2842 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2844 for_each_zone_zonelist(zone, z, zonelist, offset) {
2845 unsigned long size = zone->present_pages;
2846 unsigned long high = high_wmark_pages(zone);
2855 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2857 unsigned int nr_free_buffer_pages(void)
2859 return nr_free_zone_pages(gfp_zone(GFP_USER));
2861 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2864 * Amount of free RAM allocatable within all zones
2866 unsigned int nr_free_pagecache_pages(void)
2868 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2871 static inline void show_node(struct zone *zone)
2873 if (IS_ENABLED(CONFIG_NUMA))
2874 printk("Node %d ", zone_to_nid(zone));
2877 void si_meminfo(struct sysinfo *val)
2879 val->totalram = totalram_pages;
2881 val->freeram = global_page_state(NR_FREE_PAGES);
2882 val->bufferram = nr_blockdev_pages();
2883 val->totalhigh = totalhigh_pages;
2884 val->freehigh = nr_free_highpages();
2885 val->mem_unit = PAGE_SIZE;
2888 EXPORT_SYMBOL(si_meminfo);
2891 void si_meminfo_node(struct sysinfo *val, int nid)
2893 pg_data_t *pgdat = NODE_DATA(nid);
2895 val->totalram = pgdat->node_present_pages;
2896 val->freeram = node_page_state(nid, NR_FREE_PAGES);
2897 #ifdef CONFIG_HIGHMEM
2898 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2899 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2905 val->mem_unit = PAGE_SIZE;
2910 * Determine whether the node should be displayed or not, depending on whether
2911 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2913 bool skip_free_areas_node(unsigned int flags, int nid)
2916 unsigned int cpuset_mems_cookie;
2918 if (!(flags & SHOW_MEM_FILTER_NODES))
2922 cpuset_mems_cookie = get_mems_allowed();
2923 ret = !node_isset(nid, cpuset_current_mems_allowed);
2924 } while (!put_mems_allowed(cpuset_mems_cookie));
2929 #define K(x) ((x) << (PAGE_SHIFT-10))
2931 static void show_migration_types(unsigned char type)
2933 static const char types[MIGRATE_TYPES] = {
2934 [MIGRATE_UNMOVABLE] = 'U',
2935 [MIGRATE_RECLAIMABLE] = 'E',
2936 [MIGRATE_MOVABLE] = 'M',
2937 [MIGRATE_RESERVE] = 'R',
2939 [MIGRATE_CMA] = 'C',
2941 [MIGRATE_ISOLATE] = 'I',
2943 char tmp[MIGRATE_TYPES + 1];
2947 for (i = 0; i < MIGRATE_TYPES; i++) {
2948 if (type & (1 << i))
2953 printk("(%s) ", tmp);
2957 * Show free area list (used inside shift_scroll-lock stuff)
2958 * We also calculate the percentage fragmentation. We do this by counting the
2959 * memory on each free list with the exception of the first item on the list.
2960 * Suppresses nodes that are not allowed by current's cpuset if
2961 * SHOW_MEM_FILTER_NODES is passed.
2963 void show_free_areas(unsigned int filter)
2968 for_each_populated_zone(zone) {
2969 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2972 printk("%s per-cpu:\n", zone->name);
2974 for_each_online_cpu(cpu) {
2975 struct per_cpu_pageset *pageset;
2977 pageset = per_cpu_ptr(zone->pageset, cpu);
2979 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2980 cpu, pageset->pcp.high,
2981 pageset->pcp.batch, pageset->pcp.count);
2985 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2986 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2988 " dirty:%lu writeback:%lu unstable:%lu\n"
2989 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2990 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
2992 global_page_state(NR_ACTIVE_ANON),
2993 global_page_state(NR_INACTIVE_ANON),
2994 global_page_state(NR_ISOLATED_ANON),
2995 global_page_state(NR_ACTIVE_FILE),
2996 global_page_state(NR_INACTIVE_FILE),
2997 global_page_state(NR_ISOLATED_FILE),
2998 global_page_state(NR_UNEVICTABLE),
2999 global_page_state(NR_FILE_DIRTY),
3000 global_page_state(NR_WRITEBACK),
3001 global_page_state(NR_UNSTABLE_NFS),
3002 global_page_state(NR_FREE_PAGES),
3003 global_page_state(NR_SLAB_RECLAIMABLE),
3004 global_page_state(NR_SLAB_UNRECLAIMABLE),
3005 global_page_state(NR_FILE_MAPPED),
3006 global_page_state(NR_SHMEM),
3007 global_page_state(NR_PAGETABLE),
3008 global_page_state(NR_BOUNCE),
3009 global_page_state(NR_FREE_CMA_PAGES));
3011 for_each_populated_zone(zone) {
3014 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3022 " active_anon:%lukB"
3023 " inactive_anon:%lukB"
3024 " active_file:%lukB"
3025 " inactive_file:%lukB"
3026 " unevictable:%lukB"
3027 " isolated(anon):%lukB"
3028 " isolated(file):%lukB"
3036 " slab_reclaimable:%lukB"
3037 " slab_unreclaimable:%lukB"
3038 " kernel_stack:%lukB"
3043 " writeback_tmp:%lukB"
3044 " pages_scanned:%lu"
3045 " all_unreclaimable? %s"
3048 K(zone_page_state(zone, NR_FREE_PAGES)),
3049 K(min_wmark_pages(zone)),
3050 K(low_wmark_pages(zone)),
3051 K(high_wmark_pages(zone)),
3052 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3053 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3054 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3055 K(zone_page_state(zone, NR_INACTIVE_FILE)),
3056 K(zone_page_state(zone, NR_UNEVICTABLE)),
3057 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3058 K(zone_page_state(zone, NR_ISOLATED_FILE)),
3059 K(zone->present_pages),
3060 K(zone->managed_pages),
3061 K(zone_page_state(zone, NR_MLOCK)),
3062 K(zone_page_state(zone, NR_FILE_DIRTY)),
3063 K(zone_page_state(zone, NR_WRITEBACK)),
3064 K(zone_page_state(zone, NR_FILE_MAPPED)),
3065 K(zone_page_state(zone, NR_SHMEM)),
3066 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3067 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3068 zone_page_state(zone, NR_KERNEL_STACK) *
3070 K(zone_page_state(zone, NR_PAGETABLE)),
3071 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3072 K(zone_page_state(zone, NR_BOUNCE)),
3073 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3074 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3075 zone->pages_scanned,
3076 (zone->all_unreclaimable ? "yes" : "no")
3078 printk("lowmem_reserve[]:");
3079 for (i = 0; i < MAX_NR_ZONES; i++)
3080 printk(" %lu", zone->lowmem_reserve[i]);
3084 for_each_populated_zone(zone) {
3085 unsigned long nr[MAX_ORDER], flags, order, total = 0;
3086 unsigned char types[MAX_ORDER];
3088 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3091 printk("%s: ", zone->name);
3093 spin_lock_irqsave(&zone->lock, flags);
3094 for (order = 0; order < MAX_ORDER; order++) {
3095 struct free_area *area = &zone->free_area[order];
3098 nr[order] = area->nr_free;
3099 total += nr[order] << order;
3102 for (type = 0; type < MIGRATE_TYPES; type++) {
3103 if (!list_empty(&area->free_list[type]))
3104 types[order] |= 1 << type;
3107 spin_unlock_irqrestore(&zone->lock, flags);
3108 for (order = 0; order < MAX_ORDER; order++) {
3109 printk("%lu*%lukB ", nr[order], K(1UL) << order);
3111 show_migration_types(types[order]);
3113 printk("= %lukB\n", K(total));
3116 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3118 show_swap_cache_info();
3121 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3123 zoneref->zone = zone;
3124 zoneref->zone_idx = zone_idx(zone);
3128 * Builds allocation fallback zone lists.
3130 * Add all populated zones of a node to the zonelist.
3132 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3133 int nr_zones, enum zone_type zone_type)
3137 BUG_ON(zone_type >= MAX_NR_ZONES);
3142 zone = pgdat->node_zones + zone_type;
3143 if (populated_zone(zone)) {
3144 zoneref_set_zone(zone,
3145 &zonelist->_zonerefs[nr_zones++]);
3146 check_highest_zone(zone_type);
3149 } while (zone_type);
3156 * 0 = automatic detection of better ordering.
3157 * 1 = order by ([node] distance, -zonetype)
3158 * 2 = order by (-zonetype, [node] distance)
3160 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3161 * the same zonelist. So only NUMA can configure this param.
3163 #define ZONELIST_ORDER_DEFAULT 0
3164 #define ZONELIST_ORDER_NODE 1
3165 #define ZONELIST_ORDER_ZONE 2
3167 /* zonelist order in the kernel.
3168 * set_zonelist_order() will set this to NODE or ZONE.
3170 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3171 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3175 /* The value user specified ....changed by config */
3176 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3177 /* string for sysctl */
3178 #define NUMA_ZONELIST_ORDER_LEN 16
3179 char numa_zonelist_order[16] = "default";
3182 * interface for configure zonelist ordering.
3183 * command line option "numa_zonelist_order"
3184 * = "[dD]efault - default, automatic configuration.
3185 * = "[nN]ode - order by node locality, then by zone within node
3186 * = "[zZ]one - order by zone, then by locality within zone
3189 static int __parse_numa_zonelist_order(char *s)
3191 if (*s == 'd' || *s == 'D') {
3192 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3193 } else if (*s == 'n' || *s == 'N') {
3194 user_zonelist_order = ZONELIST_ORDER_NODE;
3195 } else if (*s == 'z' || *s == 'Z') {
3196 user_zonelist_order = ZONELIST_ORDER_ZONE;
3199 "Ignoring invalid numa_zonelist_order value: "
3206 static __init int setup_numa_zonelist_order(char *s)
3213 ret = __parse_numa_zonelist_order(s);
3215 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3219 early_param("numa_zonelist_order", setup_numa_zonelist_order);
3222 * sysctl handler for numa_zonelist_order
3224 int numa_zonelist_order_handler(ctl_table *table, int write,
3225 void __user *buffer, size_t *length,
3228 char saved_string[NUMA_ZONELIST_ORDER_LEN];
3230 static DEFINE_MUTEX(zl_order_mutex);
3232 mutex_lock(&zl_order_mutex);
3234 strcpy(saved_string, (char*)table->data);
3235 ret = proc_dostring(table, write, buffer, length, ppos);
3239 int oldval = user_zonelist_order;
3240 if (__parse_numa_zonelist_order((char*)table->data)) {
3242 * bogus value. restore saved string
3244 strncpy((char*)table->data, saved_string,
3245 NUMA_ZONELIST_ORDER_LEN);
3246 user_zonelist_order = oldval;
3247 } else if (oldval != user_zonelist_order) {
3248 mutex_lock(&zonelists_mutex);
3249 build_all_zonelists(NULL, NULL);
3250 mutex_unlock(&zonelists_mutex);
3254 mutex_unlock(&zl_order_mutex);
3259 #define MAX_NODE_LOAD (nr_online_nodes)
3260 static int node_load[MAX_NUMNODES];
3263 * find_next_best_node - find the next node that should appear in a given node's fallback list
3264 * @node: node whose fallback list we're appending
3265 * @used_node_mask: nodemask_t of already used nodes
3267 * We use a number of factors to determine which is the next node that should
3268 * appear on a given node's fallback list. The node should not have appeared
3269 * already in @node's fallback list, and it should be the next closest node
3270 * according to the distance array (which contains arbitrary distance values
3271 * from each node to each node in the system), and should also prefer nodes
3272 * with no CPUs, since presumably they'll have very little allocation pressure
3273 * on them otherwise.
3274 * It returns -1 if no node is found.
3276 static int find_next_best_node(int node, nodemask_t *used_node_mask)
3279 int min_val = INT_MAX;
3281 const struct cpumask *tmp = cpumask_of_node(0);
3283 /* Use the local node if we haven't already */
3284 if (!node_isset(node, *used_node_mask)) {
3285 node_set(node, *used_node_mask);
3289 for_each_node_state(n, N_MEMORY) {
3291 /* Don't want a node to appear more than once */
3292 if (node_isset(n, *used_node_mask))
3295 /* Use the distance array to find the distance */
3296 val = node_distance(node, n);
3298 /* Penalize nodes under us ("prefer the next node") */
3301 /* Give preference to headless and unused nodes */
3302 tmp = cpumask_of_node(n);
3303 if (!cpumask_empty(tmp))
3304 val += PENALTY_FOR_NODE_WITH_CPUS;
3306 /* Slight preference for less loaded node */
3307 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3308 val += node_load[n];
3310 if (val < min_val) {
3317 node_set(best_node, *used_node_mask);
3324 * Build zonelists ordered by node and zones within node.
3325 * This results in maximum locality--normal zone overflows into local
3326 * DMA zone, if any--but risks exhausting DMA zone.
3328 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3331 struct zonelist *zonelist;
3333 zonelist = &pgdat->node_zonelists[0];
3334 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3336 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3338 zonelist->_zonerefs[j].zone = NULL;
3339 zonelist->_zonerefs[j].zone_idx = 0;
3343 * Build gfp_thisnode zonelists
3345 static void build_thisnode_zonelists(pg_data_t *pgdat)
3348 struct zonelist *zonelist;
3350 zonelist = &pgdat->node_zonelists[1];
3351 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3352 zonelist->_zonerefs[j].zone = NULL;
3353 zonelist->_zonerefs[j].zone_idx = 0;
3357 * Build zonelists ordered by zone and nodes within zones.
3358 * This results in conserving DMA zone[s] until all Normal memory is
3359 * exhausted, but results in overflowing to remote node while memory
3360 * may still exist in local DMA zone.
3362 static int node_order[MAX_NUMNODES];
3364 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3367 int zone_type; /* needs to be signed */
3369 struct zonelist *zonelist;
3371 zonelist = &pgdat->node_zonelists[0];
3373 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3374 for (j = 0; j < nr_nodes; j++) {
3375 node = node_order[j];
3376 z = &NODE_DATA(node)->node_zones[zone_type];
3377 if (populated_zone(z)) {
3379 &zonelist->_zonerefs[pos++]);
3380 check_highest_zone(zone_type);
3384 zonelist->_zonerefs[pos].zone = NULL;
3385 zonelist->_zonerefs[pos].zone_idx = 0;
3388 static int default_zonelist_order(void)
3391 unsigned long low_kmem_size,total_size;
3395 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
3396 * If they are really small and used heavily, the system can fall
3397 * into OOM very easily.
3398 * This function detect ZONE_DMA/DMA32 size and configures zone order.
3400 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3403 for_each_online_node(nid) {
3404 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3405 z = &NODE_DATA(nid)->node_zones[zone_type];
3406 if (populated_zone(z)) {
3407 if (zone_type < ZONE_NORMAL)
3408 low_kmem_size += z->present_pages;
3409 total_size += z->present_pages;
3410 } else if (zone_type == ZONE_NORMAL) {
3412 * If any node has only lowmem, then node order
3413 * is preferred to allow kernel allocations
3414 * locally; otherwise, they can easily infringe
3415 * on other nodes when there is an abundance of
3416 * lowmem available to allocate from.
3418 return ZONELIST_ORDER_NODE;