struct zone *zone = lruvec_zone(lruvec);
unsigned long anon_prio, file_prio;
enum scan_balance scan_balance;
- unsigned long anon, file, free;
+ unsigned long anon, file;
bool force_scan = false;
unsigned long ap, fp;
enum lru_list lru;
file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
get_lru_size(lruvec, LRU_INACTIVE_FILE);
- /*
- * If it's foreseeable that reclaiming the file cache won't be
- * enough to get the zone back into a desirable shape, we have
- * to swap. Better start now and leave the - probably heavily
- * thrashing - remaining file pages alone.
- */
- if (global_reclaim(sc)) {
- free = zone_page_state(zone, NR_FREE_PAGES);
- if (unlikely(file + free <= high_wmark_pages(zone))) {
- scan_balance = SCAN_ANON;
- goto out;
- }
- }
-
/*
* There is enough inactive page cache, do not reclaim
* anything from the anonymous working set right now.
struct shrink_control shrink = {
.gfp_mask = sc->gfp_mask,
};
+ enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
/*
* If the number of buffer_heads in the machine exceeds the maximum
* noticeable problem, like transparent huge
* page allocations.
*/
- if (compaction_ready(zone, sc)) {
+ if ((zonelist_zone_idx(z) <= requested_highidx)
+ && compaction_ready(zone, sc)) {
aborted_reclaim = true;
continue;
}