swsusp: clean up shrink_all_zones()

Move local variables to innermost possible scopes and use local
variables to cache calculations/reads done more than once.

No change in functionality (intended).

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Len Brown <lenb@kernel.org>
Cc: Greg KH <gregkh@suse.de>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
0cb57258 3049103d

+11 -12
+11 -12
mm/vmscan.c
··· 2057 int pass, struct scan_control *sc) 2058 { 2059 struct zone *zone; 2060 - unsigned long nr_to_scan, ret = 0; 2061 - enum lru_list l; 2062 2063 for_each_zone(zone) { 2064 2065 if (!populated_zone(zone)) 2066 continue; 2067 - 2068 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2069 continue; 2070 2071 for_each_evictable_lru(l) { 2072 /* For pass = 0, we don't shrink the active list */ 2073 - if (pass == 0 && 2074 - (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) 2075 continue; 2076 2077 - zone->lru[l].nr_scan += 2078 - (zone_page_state(zone, NR_LRU_BASE + l) 2079 - >> prio) + 1; 2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2081 zone->lru[l].nr_scan = 0; 2082 - nr_to_scan = min(nr_pages, 2083 - zone_page_state(zone, 2084 - NR_LRU_BASE + l)); 2085 ret += shrink_list(l, nr_to_scan, zone, 2086 sc, prio); 2087 if (ret >= nr_pages) ··· 2089 } 2090 } 2091 } 2092 - 2093 return ret; 2094 } 2095
··· 2057 int pass, struct scan_control *sc) 2058 { 2059 struct zone *zone; 2060 + unsigned long ret = 0; 2061 2062 for_each_zone(zone) { 2063 + enum lru_list l; 2064 2065 if (!populated_zone(zone)) 2066 continue; 2067 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2068 continue; 2069 2070 for_each_evictable_lru(l) { 2071 + enum zone_stat_item ls = NR_LRU_BASE + l; 2072 + unsigned long lru_pages = zone_page_state(zone, ls); 2073 + 2074 /* For pass = 0, we don't shrink the active list */ 2075 + if (pass == 0 && (l == LRU_ACTIVE_ANON || 2076 + l == LRU_ACTIVE_FILE)) 2077 continue; 2078 2079 + zone->lru[l].nr_scan += (lru_pages >> prio) + 1; 2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2081 + unsigned long nr_to_scan; 2082 + 2083 zone->lru[l].nr_scan = 0; 2084 + nr_to_scan = min(nr_pages, lru_pages); 2085 ret += shrink_list(l, nr_to_scan, zone, 2086 sc, prio); 2087 if (ret >= nr_pages) ··· 2089 } 2090 } 2091 } 2092 return ret; 2093 } 2094