mm: page allocator: adjust the per-cpu counter threshold when memory is low
[~shefty/rdma-dev.git] / mm / vmscan.c
index 9ca587c692748adbc715b440ff5aa8c89357c511..5da4295e7d672e8804151773e184d65fde1ddd67 100644 (file)
@@ -2143,7 +2143,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
                if (zone->all_unreclaimable)
                        continue;
 
                if (zone->all_unreclaimable)
                        continue;
 
-               if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
+               if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
                                                                0, 0))
                        return 1;
        }
                                                                0, 0))
                        return 1;
        }
@@ -2230,7 +2230,7 @@ loop_again:
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
-                       if (!zone_watermark_ok(zone, order,
+                       if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
@@ -2276,7 +2276,7 @@ loop_again:
                         * We put equal pressure on every zone, unless one
                         * zone has way too many pages free already.
                         */
                         * We put equal pressure on every zone, unless one
                         * zone has way too many pages free already.
                         */
-                       if (!zone_watermark_ok(zone, order,
+                       if (!zone_watermark_ok_safe(zone, order,
                                        8*high_wmark_pages(zone), end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                                        8*high_wmark_pages(zone), end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
@@ -2297,7 +2297,7 @@ loop_again:
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
 
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
 
-                       if (!zone_watermark_ok(zone, order,
+                       if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;
                                /*
                                        high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;
                                /*
@@ -2305,7 +2305,7 @@ loop_again:
                                 * means that we have a GFP_ATOMIC allocation
                                 * failure risk. Hurry up!
                                 */
                                 * means that we have a GFP_ATOMIC allocation
                                 * failure risk. Hurry up!
                                 */
-                               if (!zone_watermark_ok(zone, order,
+                               if (!zone_watermark_ok_safe(zone, order,
                                            min_wmark_pages(zone), end_zone, 0))
                                        has_under_min_watermark_zone = 1;
                        } else {
                                            min_wmark_pages(zone), end_zone, 0))
                                        has_under_min_watermark_zone = 1;
                        } else {
@@ -2448,7 +2448,9 @@ static int kswapd(void *p)
                                 */
                                if (!sleeping_prematurely(pgdat, order, remaining)) {
                                        trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
                                 */
                                if (!sleeping_prematurely(pgdat, order, remaining)) {
                                        trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
+                                       restore_pgdat_percpu_threshold(pgdat);
                                        schedule();
                                        schedule();
+                                       reduce_pgdat_percpu_threshold(pgdat);
                                } else {
                                        if (remaining)
                                                count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
                                } else {
                                        if (remaining)
                                                count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
@@ -2487,16 +2489,17 @@ void wakeup_kswapd(struct zone *zone, int order)
        if (!populated_zone(zone))
                return;
 
        if (!populated_zone(zone))
                return;
 
-       pgdat = zone->zone_pgdat;
-       if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
+       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
                return;
+       pgdat = zone->zone_pgdat;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
-       trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
-       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
-               return;
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
+       if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
+               return;
+
+       trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
        wake_up_interruptible(&pgdat->kswapd_wait);
 }