geneve: Add Geneve GRO support
[cascardo/linux.git] / mm / compaction.c
1 /*
2  * linux/mm/compaction.c
3  *
4  * Memory compaction for the reduction of external fragmentation. Note that
5  * this heavily depends upon page migration to do all the real heavy
6  * lifting
7  *
8  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9  */
10 #include <linux/swap.h>
11 #include <linux/migrate.h>
12 #include <linux/compaction.h>
13 #include <linux/mm_inline.h>
14 #include <linux/backing-dev.h>
15 #include <linux/sysctl.h>
16 #include <linux/sysfs.h>
17 #include <linux/balloon_compaction.h>
18 #include <linux/page-isolation.h>
19 #include "internal.h"
20
21 #ifdef CONFIG_COMPACTION
22 static inline void count_compact_event(enum vm_event_item item)
23 {
24         count_vm_event(item);
25 }
26
27 static inline void count_compact_events(enum vm_event_item item, long delta)
28 {
29         count_vm_events(item, delta);
30 }
31 #else
32 #define count_compact_event(item) do { } while (0)
33 #define count_compact_events(item, delta) do { } while (0)
34 #endif
35
36 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/compaction.h>
40
41 static unsigned long release_freepages(struct list_head *freelist)
42 {
43         struct page *page, *next;
44         unsigned long high_pfn = 0;
45
46         list_for_each_entry_safe(page, next, freelist, lru) {
47                 unsigned long pfn = page_to_pfn(page);
48                 list_del(&page->lru);
49                 __free_page(page);
50                 if (pfn > high_pfn)
51                         high_pfn = pfn;
52         }
53
54         return high_pfn;
55 }
56
57 static void map_pages(struct list_head *list)
58 {
59         struct page *page;
60
61         list_for_each_entry(page, list, lru) {
62                 arch_alloc_page(page, 0);
63                 kernel_map_pages(page, 1, 1);
64         }
65 }
66
67 static inline bool migrate_async_suitable(int migratetype)
68 {
69         return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
70 }
71
72 /*
73  * Check that the whole (or subset of) a pageblock given by the interval of
74  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
75  * with the migration of free compaction scanner. The scanners then need to
76  * use only pfn_valid_within() check for arches that allow holes within
77  * pageblocks.
78  *
79  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
80  *
81  * It's possible on some configurations to have a setup like node0 node1 node0
82  * i.e. it's possible that all pages within a zones range of pages do not
83  * belong to a single zone. We assume that a border between node0 and node1
84  * can occur within a single pageblock, but not a node0 node1 node0
85  * interleaving within a single pageblock. It is therefore sufficient to check
86  * the first and last page of a pageblock and avoid checking each individual
87  * page in a pageblock.
88  */
89 static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
90                                 unsigned long end_pfn, struct zone *zone)
91 {
92         struct page *start_page;
93         struct page *end_page;
94
95         /* end_pfn is one past the range we are checking */
96         end_pfn--;
97
98         if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
99                 return NULL;
100
101         start_page = pfn_to_page(start_pfn);
102
103         if (page_zone(start_page) != zone)
104                 return NULL;
105
106         end_page = pfn_to_page(end_pfn);
107
108         /* This gives a shorter code than deriving page_zone(end_page) */
109         if (page_zone_id(start_page) != page_zone_id(end_page))
110                 return NULL;
111
112         return start_page;
113 }
114
115 #ifdef CONFIG_COMPACTION
116 /* Returns true if the pageblock should be scanned for pages to isolate. */
117 static inline bool isolation_suitable(struct compact_control *cc,
118                                         struct page *page)
119 {
120         if (cc->ignore_skip_hint)
121                 return true;
122
123         return !get_pageblock_skip(page);
124 }
125
126 /*
127  * This function is called to clear all cached information on pageblocks that
128  * should be skipped for page isolation when the migrate and free page scanner
129  * meet.
130  */
131 static void __reset_isolation_suitable(struct zone *zone)
132 {
133         unsigned long start_pfn = zone->zone_start_pfn;
134         unsigned long end_pfn = zone_end_pfn(zone);
135         unsigned long pfn;
136
137         zone->compact_cached_migrate_pfn[0] = start_pfn;
138         zone->compact_cached_migrate_pfn[1] = start_pfn;
139         zone->compact_cached_free_pfn = end_pfn;
140         zone->compact_blockskip_flush = false;
141
142         /* Walk the zone and mark every pageblock as suitable for isolation */
143         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
144                 struct page *page;
145
146                 cond_resched();
147
148                 if (!pfn_valid(pfn))
149                         continue;
150
151                 page = pfn_to_page(pfn);
152                 if (zone != page_zone(page))
153                         continue;
154
155                 clear_pageblock_skip(page);
156         }
157 }
158
159 void reset_isolation_suitable(pg_data_t *pgdat)
160 {
161         int zoneid;
162
163         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
164                 struct zone *zone = &pgdat->node_zones[zoneid];
165                 if (!populated_zone(zone))
166                         continue;
167
168                 /* Only flush if a full compaction finished recently */
169                 if (zone->compact_blockskip_flush)
170                         __reset_isolation_suitable(zone);
171         }
172 }
173
174 /*
175  * If no pages were isolated then mark this pageblock to be skipped in the
176  * future. The information is later cleared by __reset_isolation_suitable().
177  */
178 static void update_pageblock_skip(struct compact_control *cc,
179                         struct page *page, unsigned long nr_isolated,
180                         bool migrate_scanner)
181 {
182         struct zone *zone = cc->zone;
183         unsigned long pfn;
184
185         if (cc->ignore_skip_hint)
186                 return;
187
188         if (!page)
189                 return;
190
191         if (nr_isolated)
192                 return;
193
194         set_pageblock_skip(page);
195
196         pfn = page_to_pfn(page);
197
198         /* Update where async and sync compaction should restart */
199         if (migrate_scanner) {
200                 if (pfn > zone->compact_cached_migrate_pfn[0])
201                         zone->compact_cached_migrate_pfn[0] = pfn;
202                 if (cc->mode != MIGRATE_ASYNC &&
203                     pfn > zone->compact_cached_migrate_pfn[1])
204                         zone->compact_cached_migrate_pfn[1] = pfn;
205         } else {
206                 if (pfn < zone->compact_cached_free_pfn)
207                         zone->compact_cached_free_pfn = pfn;
208         }
209 }
210 #else
211 static inline bool isolation_suitable(struct compact_control *cc,
212                                         struct page *page)
213 {
214         return true;
215 }
216
217 static void update_pageblock_skip(struct compact_control *cc,
218                         struct page *page, unsigned long nr_isolated,
219                         bool migrate_scanner)
220 {
221 }
222 #endif /* CONFIG_COMPACTION */
223
224 /*
225  * Compaction requires the taking of some coarse locks that are potentially
226  * very heavily contended. For async compaction, back out if the lock cannot
227  * be taken immediately. For sync compaction, spin on the lock if needed.
228  *
229  * Returns true if the lock is held
230  * Returns false if the lock is not held and compaction should abort
231  */
232 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
233                                                 struct compact_control *cc)
234 {
235         if (cc->mode == MIGRATE_ASYNC) {
236                 if (!spin_trylock_irqsave(lock, *flags)) {
237                         cc->contended = COMPACT_CONTENDED_LOCK;
238                         return false;
239                 }
240         } else {
241                 spin_lock_irqsave(lock, *flags);
242         }
243
244         return true;
245 }
246
247 /*
248  * Compaction requires the taking of some coarse locks that are potentially
249  * very heavily contended. The lock should be periodically unlocked to avoid
250  * having disabled IRQs for a long time, even when there is nobody waiting on
251  * the lock. It might also be that allowing the IRQs will result in
252  * need_resched() becoming true. If scheduling is needed, async compaction
253  * aborts. Sync compaction schedules.
254  * Either compaction type will also abort if a fatal signal is pending.
255  * In either case if the lock was locked, it is dropped and not regained.
256  *
257  * Returns true if compaction should abort due to fatal signal pending, or
258  *              async compaction due to need_resched()
259  * Returns false when compaction can continue (sync compaction might have
260  *              scheduled)
261  */
262 static bool compact_unlock_should_abort(spinlock_t *lock,
263                 unsigned long flags, bool *locked, struct compact_control *cc)
264 {
265         if (*locked) {
266                 spin_unlock_irqrestore(lock, flags);
267                 *locked = false;
268         }
269
270         if (fatal_signal_pending(current)) {
271                 cc->contended = COMPACT_CONTENDED_SCHED;
272                 return true;
273         }
274
275         if (need_resched()) {
276                 if (cc->mode == MIGRATE_ASYNC) {
277                         cc->contended = COMPACT_CONTENDED_SCHED;
278                         return true;
279                 }
280                 cond_resched();
281         }
282
283         return false;
284 }
285
286 /*
287  * Aside from avoiding lock contention, compaction also periodically checks
288  * need_resched() and either schedules in sync compaction or aborts async
289  * compaction. This is similar to what compact_unlock_should_abort() does, but
290  * is used where no lock is concerned.
291  *
292  * Returns false when no scheduling was needed, or sync compaction scheduled.
293  * Returns true when async compaction should abort.
294  */
295 static inline bool compact_should_abort(struct compact_control *cc)
296 {
297         /* async compaction aborts if contended */
298         if (need_resched()) {
299                 if (cc->mode == MIGRATE_ASYNC) {
300                         cc->contended = COMPACT_CONTENDED_SCHED;
301                         return true;
302                 }
303
304                 cond_resched();
305         }
306
307         return false;
308 }
309
310 /* Returns true if the page is within a block suitable for migration to */
311 static bool suitable_migration_target(struct page *page)
312 {
313         /* If the page is a large free page, then disallow migration */
314         if (PageBuddy(page)) {
315                 /*
316                  * We are checking page_order without zone->lock taken. But
317                  * the only small danger is that we skip a potentially suitable
318                  * pageblock, so it's not worth to check order for valid range.
319                  */
320                 if (page_order_unsafe(page) >= pageblock_order)
321                         return false;
322         }
323
324         /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
325         if (migrate_async_suitable(get_pageblock_migratetype(page)))
326                 return true;
327
328         /* Otherwise skip the block */
329         return false;
330 }
331
332 /*
333  * Isolate free pages onto a private freelist. If @strict is true, will abort
334  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
335  * (even though it may still end up isolating some pages).
336  */
337 static unsigned long isolate_freepages_block(struct compact_control *cc,
338                                 unsigned long *start_pfn,
339                                 unsigned long end_pfn,
340                                 struct list_head *freelist,
341                                 bool strict)
342 {
343         int nr_scanned = 0, total_isolated = 0;
344         struct page *cursor, *valid_page = NULL;
345         unsigned long flags = 0;
346         bool locked = false;
347         unsigned long blockpfn = *start_pfn;
348
349         cursor = pfn_to_page(blockpfn);
350
351         /* Isolate free pages. */
352         for (; blockpfn < end_pfn; blockpfn++, cursor++) {
353                 int isolated, i;
354                 struct page *page = cursor;
355
356                 /*
357                  * Periodically drop the lock (if held) regardless of its
358                  * contention, to give chance to IRQs. Abort if fatal signal
359                  * pending or async compaction detects need_resched()
360                  */
361                 if (!(blockpfn % SWAP_CLUSTER_MAX)
362                     && compact_unlock_should_abort(&cc->zone->lock, flags,
363                                                                 &locked, cc))
364                         break;
365
366                 nr_scanned++;
367                 if (!pfn_valid_within(blockpfn))
368                         goto isolate_fail;
369
370                 if (!valid_page)
371                         valid_page = page;
372                 if (!PageBuddy(page))
373                         goto isolate_fail;
374
375                 /*
376                  * If we already hold the lock, we can skip some rechecking.
377                  * Note that if we hold the lock now, checked_pageblock was
378                  * already set in some previous iteration (or strict is true),
379                  * so it is correct to skip the suitable migration target
380                  * recheck as well.
381                  */
382                 if (!locked) {
383                         /*
384                          * The zone lock must be held to isolate freepages.
385                          * Unfortunately this is a very coarse lock and can be
386                          * heavily contended if there are parallel allocations
387                          * or parallel compactions. For async compaction do not
388                          * spin on the lock and we acquire the lock as late as
389                          * possible.
390                          */
391                         locked = compact_trylock_irqsave(&cc->zone->lock,
392                                                                 &flags, cc);
393                         if (!locked)
394                                 break;
395
396                         /* Recheck this is a buddy page under lock */
397                         if (!PageBuddy(page))
398                                 goto isolate_fail;
399                 }
400
401                 /* Found a free page, break it into order-0 pages */
402                 isolated = split_free_page(page);
403                 total_isolated += isolated;
404                 for (i = 0; i < isolated; i++) {
405                         list_add(&page->lru, freelist);
406                         page++;
407                 }
408
409                 /* If a page was split, advance to the end of it */
410                 if (isolated) {
411                         blockpfn += isolated - 1;
412                         cursor += isolated - 1;
413                         continue;
414                 }
415
416 isolate_fail:
417                 if (strict)
418                         break;
419                 else
420                         continue;
421
422         }
423
424         /* Record how far we have got within the block */
425         *start_pfn = blockpfn;
426
427         trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
428
429         /*
430          * If strict isolation is requested by CMA then check that all the
431          * pages requested were isolated. If there were any failures, 0 is
432          * returned and CMA will fail.
433          */
434         if (strict && blockpfn < end_pfn)
435                 total_isolated = 0;
436
437         if (locked)
438                 spin_unlock_irqrestore(&cc->zone->lock, flags);
439
440         /* Update the pageblock-skip if the whole pageblock was scanned */
441         if (blockpfn == end_pfn)
442                 update_pageblock_skip(cc, valid_page, total_isolated, false);
443
444         count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
445         if (total_isolated)
446                 count_compact_events(COMPACTISOLATED, total_isolated);
447         return total_isolated;
448 }
449
450 /**
451  * isolate_freepages_range() - isolate free pages.
452  * @start_pfn: The first PFN to start isolating.
453  * @end_pfn:   The one-past-last PFN.
454  *
455  * Non-free pages, invalid PFNs, or zone boundaries within the
456  * [start_pfn, end_pfn) range are considered errors, cause function to
457  * undo its actions and return zero.
458  *
459  * Otherwise, function returns one-past-the-last PFN of isolated page
460  * (which may be greater then end_pfn if end fell in a middle of
461  * a free page).
462  */
463 unsigned long
464 isolate_freepages_range(struct compact_control *cc,
465                         unsigned long start_pfn, unsigned long end_pfn)
466 {
467         unsigned long isolated, pfn, block_end_pfn;
468         LIST_HEAD(freelist);
469
470         pfn = start_pfn;
471         block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
472
473         for (; pfn < end_pfn; pfn += isolated,
474                                 block_end_pfn += pageblock_nr_pages) {
475                 /* Protect pfn from changing by isolate_freepages_block */
476                 unsigned long isolate_start_pfn = pfn;
477
478                 block_end_pfn = min(block_end_pfn, end_pfn);
479
480                 /*
481                  * pfn could pass the block_end_pfn if isolated freepage
482                  * is more than pageblock order. In this case, we adjust
483                  * scanning range to right one.
484                  */
485                 if (pfn >= block_end_pfn) {
486                         block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
487                         block_end_pfn = min(block_end_pfn, end_pfn);
488                 }
489
490                 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
491                         break;
492
493                 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
494                                                 block_end_pfn, &freelist, true);
495
496                 /*
497                  * In strict mode, isolate_freepages_block() returns 0 if
498                  * there are any holes in the block (ie. invalid PFNs or
499                  * non-free pages).
500                  */
501                 if (!isolated)
502                         break;
503
504                 /*
505                  * If we managed to isolate pages, it is always (1 << n) *
506                  * pageblock_nr_pages for some non-negative n.  (Max order
507                  * page may span two pageblocks).
508                  */
509         }
510
511         /* split_free_page does not map the pages */
512         map_pages(&freelist);
513
514         if (pfn < end_pfn) {
515                 /* Loop terminated early, cleanup. */
516                 release_freepages(&freelist);
517                 return 0;
518         }
519
520         /* We don't use freelists for anything. */
521         return pfn;
522 }
523
524 /* Update the number of anon and file isolated pages in the zone */
525 static void acct_isolated(struct zone *zone, struct compact_control *cc)
526 {
527         struct page *page;
528         unsigned int count[2] = { 0, };
529
530         if (list_empty(&cc->migratepages))
531                 return;
532
533         list_for_each_entry(page, &cc->migratepages, lru)
534                 count[!!page_is_file_cache(page)]++;
535
536         mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
537         mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
538 }
539
540 /* Similar to reclaim, but different enough that they don't share logic */
541 static bool too_many_isolated(struct zone *zone)
542 {
543         unsigned long active, inactive, isolated;
544
545         inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
546                                         zone_page_state(zone, NR_INACTIVE_ANON);
547         active = zone_page_state(zone, NR_ACTIVE_FILE) +
548                                         zone_page_state(zone, NR_ACTIVE_ANON);
549         isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
550                                         zone_page_state(zone, NR_ISOLATED_ANON);
551
552         return isolated > (inactive + active) / 2;
553 }
554
555 /**
556  * isolate_migratepages_block() - isolate all migrate-able pages within
557  *                                a single pageblock
558  * @cc:         Compaction control structure.
559  * @low_pfn:    The first PFN to isolate
560  * @end_pfn:    The one-past-the-last PFN to isolate, within same pageblock
561  * @isolate_mode: Isolation mode to be used.
562  *
563  * Isolate all pages that can be migrated from the range specified by
564  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
565  * Returns zero if there is a fatal signal pending, otherwise PFN of the
566  * first page that was not scanned (which may be both less, equal to or more
567  * than end_pfn).
568  *
569  * The pages are isolated on cc->migratepages list (not required to be empty),
570  * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
571  * is neither read nor updated.
572  */
573 static unsigned long
574 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
575                         unsigned long end_pfn, isolate_mode_t isolate_mode)
576 {
577         struct zone *zone = cc->zone;
578         unsigned long nr_scanned = 0, nr_isolated = 0;
579         struct list_head *migratelist = &cc->migratepages;
580         struct lruvec *lruvec;
581         unsigned long flags = 0;
582         bool locked = false;
583         struct page *page = NULL, *valid_page = NULL;
584
585         /*
586          * Ensure that there are not too many pages isolated from the LRU
587          * list by either parallel reclaimers or compaction. If there are,
588          * delay for some time until fewer pages are isolated
589          */
590         while (unlikely(too_many_isolated(zone))) {
591                 /* async migration should just abort */
592                 if (cc->mode == MIGRATE_ASYNC)
593                         return 0;
594
595                 congestion_wait(BLK_RW_ASYNC, HZ/10);
596
597                 if (fatal_signal_pending(current))
598                         return 0;
599         }
600
601         if (compact_should_abort(cc))
602                 return 0;
603
604         /* Time to isolate some pages for migration */
605         for (; low_pfn < end_pfn; low_pfn++) {
606                 /*
607                  * Periodically drop the lock (if held) regardless of its
608                  * contention, to give chance to IRQs. Abort async compaction
609                  * if contended.
610                  */
611                 if (!(low_pfn % SWAP_CLUSTER_MAX)
612                     && compact_unlock_should_abort(&zone->lru_lock, flags,
613                                                                 &locked, cc))
614                         break;
615
616                 if (!pfn_valid_within(low_pfn))
617                         continue;
618                 nr_scanned++;
619
620                 page = pfn_to_page(low_pfn);
621
622                 if (!valid_page)
623                         valid_page = page;
624
625                 /*
626                  * Skip if free. We read page order here without zone lock
627                  * which is generally unsafe, but the race window is small and
628                  * the worst thing that can happen is that we skip some
629                  * potential isolation targets.
630                  */
631                 if (PageBuddy(page)) {
632                         unsigned long freepage_order = page_order_unsafe(page);
633
634                         /*
635                          * Without lock, we cannot be sure that what we got is
636                          * a valid page order. Consider only values in the
637                          * valid order range to prevent low_pfn overflow.
638                          */
639                         if (freepage_order > 0 && freepage_order < MAX_ORDER)
640                                 low_pfn += (1UL << freepage_order) - 1;
641                         continue;
642                 }
643
644                 /*
645                  * Check may be lockless but that's ok as we recheck later.
646                  * It's possible to migrate LRU pages and balloon pages
647                  * Skip any other type of page
648                  */
649                 if (!PageLRU(page)) {
650                         if (unlikely(balloon_page_movable(page))) {
651                                 if (balloon_page_isolate(page)) {
652                                         /* Successfully isolated */
653                                         goto isolate_success;
654                                 }
655                         }
656                         continue;
657                 }
658
659                 /*
660                  * PageLRU is set. lru_lock normally excludes isolation
661                  * splitting and collapsing (collapsing has already happened
662                  * if PageLRU is set) but the lock is not necessarily taken
663                  * here and it is wasteful to take it just to check transhuge.
664                  * Check TransHuge without lock and skip the whole pageblock if
665                  * it's either a transhuge or hugetlbfs page, as calling
666                  * compound_order() without preventing THP from splitting the
667                  * page underneath us may return surprising results.
668                  */
669                 if (PageTransHuge(page)) {
670                         if (!locked)
671                                 low_pfn = ALIGN(low_pfn + 1,
672                                                 pageblock_nr_pages) - 1;
673                         else
674                                 low_pfn += (1 << compound_order(page)) - 1;
675
676                         continue;
677                 }
678
679                 /*
680                  * Migration will fail if an anonymous page is pinned in memory,
681                  * so avoid taking lru_lock and isolating it unnecessarily in an
682                  * admittedly racy check.
683                  */
684                 if (!page_mapping(page) &&
685                     page_count(page) > page_mapcount(page))
686                         continue;
687
688                 /* If we already hold the lock, we can skip some rechecking */
689                 if (!locked) {
690                         locked = compact_trylock_irqsave(&zone->lru_lock,
691                                                                 &flags, cc);
692                         if (!locked)
693                                 break;
694
695                         /* Recheck PageLRU and PageTransHuge under lock */
696                         if (!PageLRU(page))
697                                 continue;
698                         if (PageTransHuge(page)) {
699                                 low_pfn += (1 << compound_order(page)) - 1;
700                                 continue;
701                         }
702                 }
703
704                 lruvec = mem_cgroup_page_lruvec(page, zone);
705
706                 /* Try isolate the page */
707                 if (__isolate_lru_page(page, isolate_mode) != 0)
708                         continue;
709
710                 VM_BUG_ON_PAGE(PageTransCompound(page), page);
711
712                 /* Successfully isolated */
713                 del_page_from_lru_list(page, lruvec, page_lru(page));
714
715 isolate_success:
716                 list_add(&page->lru, migratelist);
717                 cc->nr_migratepages++;
718                 nr_isolated++;
719
720                 /* Avoid isolating too much */
721                 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
722                         ++low_pfn;
723                         break;
724                 }
725         }
726
727         /*
728          * The PageBuddy() check could have potentially brought us outside
729          * the range to be scanned.
730          */
731         if (unlikely(low_pfn > end_pfn))
732                 low_pfn = end_pfn;
733
734         if (locked)
735                 spin_unlock_irqrestore(&zone->lru_lock, flags);
736
737         /*
738          * Update the pageblock-skip information and cached scanner pfn,
739          * if the whole pageblock was scanned without isolating any page.
740          */
741         if (low_pfn == end_pfn)
742                 update_pageblock_skip(cc, valid_page, nr_isolated, true);
743
744         trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
745
746         count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
747         if (nr_isolated)
748                 count_compact_events(COMPACTISOLATED, nr_isolated);
749
750         return low_pfn;
751 }
752
753 /**
754  * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
755  * @cc:        Compaction control structure.
756  * @start_pfn: The first PFN to start isolating.
757  * @end_pfn:   The one-past-last PFN.
758  *
759  * Returns zero if isolation fails fatally due to e.g. pending signal.
760  * Otherwise, function returns one-past-the-last PFN of isolated page
761  * (which may be greater than end_pfn if end fell in a middle of a THP page).
762  */
763 unsigned long
764 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
765                                                         unsigned long end_pfn)
766 {
767         unsigned long pfn, block_end_pfn;
768
769         /* Scan block by block. First and last block may be incomplete */
770         pfn = start_pfn;
771         block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
772
773         for (; pfn < end_pfn; pfn = block_end_pfn,
774                                 block_end_pfn += pageblock_nr_pages) {
775
776                 block_end_pfn = min(block_end_pfn, end_pfn);
777
778                 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
779                         continue;
780
781                 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
782                                                         ISOLATE_UNEVICTABLE);
783
784                 /*
785                  * In case of fatal failure, release everything that might
786                  * have been isolated in the previous iteration, and signal
787                  * the failure back to caller.
788                  */
789                 if (!pfn) {
790                         putback_movable_pages(&cc->migratepages);
791                         cc->nr_migratepages = 0;
792                         break;
793                 }
794
795                 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
796                         break;
797         }
798         acct_isolated(cc->zone, cc);
799
800         return pfn;
801 }
802
803 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
804 #ifdef CONFIG_COMPACTION
805 /*
806  * Based on information in the current compact_control, find blocks
807  * suitable for isolating free pages from and then isolate them.
808  */
809 static void isolate_freepages(struct compact_control *cc)
810 {
811         struct zone *zone = cc->zone;
812         struct page *page;
813         unsigned long block_start_pfn;  /* start of current pageblock */
814         unsigned long isolate_start_pfn; /* exact pfn we start at */
815         unsigned long block_end_pfn;    /* end of current pageblock */
816         unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
817         int nr_freepages = cc->nr_freepages;
818         struct list_head *freelist = &cc->freepages;
819
820         /*
821          * Initialise the free scanner. The starting point is where we last
822          * successfully isolated from, zone-cached value, or the end of the
823          * zone when isolating for the first time. For looping we also need
824          * this pfn aligned down to the pageblock boundary, because we do
825          * block_start_pfn -= pageblock_nr_pages in the for loop.
826          * For ending point, take care when isolating in last pageblock of a
827          * a zone which ends in the middle of a pageblock.
828          * The low boundary is the end of the pageblock the migration scanner
829          * is using.
830          */
831         isolate_start_pfn = cc->free_pfn;
832         block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
833         block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
834                                                 zone_end_pfn(zone));
835         low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
836
837         /*
838          * Isolate free pages until enough are available to migrate the
839          * pages on cc->migratepages. We stop searching if the migrate
840          * and free page scanners meet or enough free pages are isolated.
841          */
842         for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
843                                 block_end_pfn = block_start_pfn,
844                                 block_start_pfn -= pageblock_nr_pages,
845                                 isolate_start_pfn = block_start_pfn) {
846                 unsigned long isolated;
847
848                 /*
849                  * This can iterate a massively long zone without finding any
850                  * suitable migration targets, so periodically check if we need
851                  * to schedule, or even abort async compaction.
852                  */
853                 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
854                                                 && compact_should_abort(cc))
855                         break;
856
857                 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
858                                                                         zone);
859                 if (!page)
860                         continue;
861
862                 /* Check the block is suitable for migration */
863                 if (!suitable_migration_target(page))
864                         continue;
865
866                 /* If isolation recently failed, do not retry */
867                 if (!isolation_suitable(cc, page))
868                         continue;
869
870                 /* Found a block suitable for isolating free pages from. */
871                 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
872                                         block_end_pfn, freelist, false);
873                 nr_freepages += isolated;
874
875                 /*
876                  * Remember where the free scanner should restart next time,
877                  * which is where isolate_freepages_block() left off.
878                  * But if it scanned the whole pageblock, isolate_start_pfn
879                  * now points at block_end_pfn, which is the start of the next
880                  * pageblock.
881                  * In that case we will however want to restart at the start
882                  * of the previous pageblock.
883                  */
884                 cc->free_pfn = (isolate_start_pfn < block_end_pfn) ?
885                                 isolate_start_pfn :
886                                 block_start_pfn - pageblock_nr_pages;
887
888                 /*
889                  * isolate_freepages_block() might have aborted due to async
890                  * compaction being contended
891                  */
892                 if (cc->contended)
893                         break;
894         }
895
896         /* split_free_page does not map the pages */
897         map_pages(freelist);
898
899         /*
900          * If we crossed the migrate scanner, we want to keep it that way
901          * so that compact_finished() may detect this
902          */
903         if (block_start_pfn < low_pfn)
904                 cc->free_pfn = cc->migrate_pfn;
905
906         cc->nr_freepages = nr_freepages;
907 }
908
909 /*
910  * This is a migrate-callback that "allocates" freepages by taking pages
911  * from the isolated freelists in the block we are migrating to.
912  */
913 static struct page *compaction_alloc(struct page *migratepage,
914                                         unsigned long data,
915                                         int **result)
916 {
917         struct compact_control *cc = (struct compact_control *)data;
918         struct page *freepage;
919
920         /*
921          * Isolate free pages if necessary, and if we are not aborting due to
922          * contention.
923          */
924         if (list_empty(&cc->freepages)) {
925                 if (!cc->contended)
926                         isolate_freepages(cc);
927
928                 if (list_empty(&cc->freepages))
929                         return NULL;
930         }
931
932         freepage = list_entry(cc->freepages.next, struct page, lru);
933         list_del(&freepage->lru);
934         cc->nr_freepages--;
935
936         return freepage;
937 }
938
939 /*
940  * This is a migrate-callback that "frees" freepages back to the isolated
941  * freelist.  All pages on the freelist are from the same zone, so there is no
942  * special handling needed for NUMA.
943  */
944 static void compaction_free(struct page *page, unsigned long data)
945 {
946         struct compact_control *cc = (struct compact_control *)data;
947
948         list_add(&page->lru, &cc->freepages);
949         cc->nr_freepages++;
950 }
951
952 /* possible outcome of isolate_migratepages */
953 typedef enum {
954         ISOLATE_ABORT,          /* Abort compaction now */
955         ISOLATE_NONE,           /* No pages isolated, continue scanning */
956         ISOLATE_SUCCESS,        /* Pages isolated, migrate */
957 } isolate_migrate_t;
958
959 /*
960  * Isolate all pages that can be migrated from the first suitable block,
961  * starting at the block pointed to by the migrate scanner pfn within
962  * compact_control.
963  */
964 static isolate_migrate_t isolate_migratepages(struct zone *zone,
965                                         struct compact_control *cc)
966 {
967         unsigned long low_pfn, end_pfn;
968         struct page *page;
969         const isolate_mode_t isolate_mode =
970                 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
971
972         /*
973          * Start at where we last stopped, or beginning of the zone as
974          * initialized by compact_zone()
975          */
976         low_pfn = cc->migrate_pfn;
977
978         /* Only scan within a pageblock boundary */
979         end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
980
981         /*
982          * Iterate over whole pageblocks until we find the first suitable.
983          * Do not cross the free scanner.
984          */
985         for (; end_pfn <= cc->free_pfn;
986                         low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
987
988                 /*
989                  * This can potentially iterate a massively long zone with
990                  * many pageblocks unsuitable, so periodically check if we
991                  * need to schedule, or even abort async compaction.
992                  */
993                 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
994                                                 && compact_should_abort(cc))
995                         break;
996
997                 page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
998                 if (!page)
999                         continue;
1000
1001                 /* If isolation recently failed, do not retry */
1002                 if (!isolation_suitable(cc, page))
1003                         continue;
1004
1005                 /*
1006                  * For async compaction, also only scan in MOVABLE blocks.
1007                  * Async compaction is optimistic to see if the minimum amount
1008                  * of work satisfies the allocation.
1009                  */
1010                 if (cc->mode == MIGRATE_ASYNC &&
1011                     !migrate_async_suitable(get_pageblock_migratetype(page)))
1012                         continue;
1013
1014                 /* Perform the isolation */
1015                 low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
1016                                                                 isolate_mode);
1017
1018                 if (!low_pfn || cc->contended)
1019                         return ISOLATE_ABORT;
1020
1021                 /*
1022                  * Either we isolated something and proceed with migration. Or
1023                  * we failed and compact_zone should decide if we should
1024                  * continue or not.
1025                  */
1026                 break;
1027         }
1028
1029         acct_isolated(zone, cc);
1030         /*
1031          * Record where migration scanner will be restarted. If we end up in
1032          * the same pageblock as the free scanner, make the scanners fully
1033          * meet so that compact_finished() terminates compaction.
1034          */
1035         cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
1036
1037         return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1038 }
1039
1040 static int compact_finished(struct zone *zone, struct compact_control *cc,
1041                             const int migratetype)
1042 {
1043         unsigned int order;
1044         unsigned long watermark;
1045
1046         if (cc->contended || fatal_signal_pending(current))
1047                 return COMPACT_PARTIAL;
1048
1049         /* Compaction run completes if the migrate and free scanner meet */
1050         if (cc->free_pfn <= cc->migrate_pfn) {
1051                 /* Let the next compaction start anew. */
1052                 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
1053                 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
1054                 zone->compact_cached_free_pfn = zone_end_pfn(zone);
1055
1056                 /*
1057                  * Mark that the PG_migrate_skip information should be cleared
1058                  * by kswapd when it goes to sleep. kswapd does not set the
1059                  * flag itself as the decision to be clear should be directly
1060                  * based on an allocation request.
1061                  */
1062                 if (!current_is_kswapd())
1063                         zone->compact_blockskip_flush = true;
1064
1065                 return COMPACT_COMPLETE;
1066         }
1067
1068         /*
1069          * order == -1 is expected when compacting via
1070          * /proc/sys/vm/compact_memory
1071          */
1072         if (cc->order == -1)
1073                 return COMPACT_CONTINUE;
1074
1075         /* Compaction run is not finished if the watermark is not met */
1076         watermark = low_wmark_pages(zone);
1077
1078         if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1079                                                         cc->alloc_flags))
1080                 return COMPACT_CONTINUE;
1081
1082         /* Direct compactor: Is a suitable page free? */
1083         for (order = cc->order; order < MAX_ORDER; order++) {
1084                 struct free_area *area = &zone->free_area[order];
1085
1086                 /* Job done if page is free of the right migratetype */
1087                 if (!list_empty(&area->free_list[migratetype]))
1088                         return COMPACT_PARTIAL;
1089
1090                 /* Job done if allocation would set block type */
1091                 if (cc->order >= pageblock_order && area->nr_free)
1092                         return COMPACT_PARTIAL;
1093         }
1094
1095         return COMPACT_CONTINUE;
1096 }
1097
1098 /*
1099  * compaction_suitable: Is this suitable to run compaction on this zone now?
1100  * Returns
1101  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1102  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
1103  *   COMPACT_CONTINUE - If compaction should run now
1104  */
1105 unsigned long compaction_suitable(struct zone *zone, int order,
1106                                         int alloc_flags, int classzone_idx)
1107 {
1108         int fragindex;
1109         unsigned long watermark;
1110
1111         /*
1112          * order == -1 is expected when compacting via
1113          * /proc/sys/vm/compact_memory
1114          */
1115         if (order == -1)
1116                 return COMPACT_CONTINUE;
1117
1118         watermark = low_wmark_pages(zone);
1119         /*
1120          * If watermarks for high-order allocation are already met, there
1121          * should be no need for compaction at all.
1122          */
1123         if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1124                                                                 alloc_flags))
1125                 return COMPACT_PARTIAL;
1126
1127         /*
1128          * Watermarks for order-0 must be met for compaction. Note the 2UL.
1129          * This is because during migration, copies of pages need to be
1130          * allocated and for a short time, the footprint is higher
1131          */
1132         watermark += (2UL << order);
1133         if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
1134                 return COMPACT_SKIPPED;
1135
1136         /*
1137          * fragmentation index determines if allocation failures are due to
1138          * low memory or external fragmentation
1139          *
1140          * index of -1000 would imply allocations might succeed depending on
1141          * watermarks, but we already failed the high-order watermark check
1142          * index towards 0 implies failure is due to lack of memory
1143          * index towards 1000 implies failure is due to fragmentation
1144          *
1145          * Only compact if a failure would be due to fragmentation.
1146          */
1147         fragindex = fragmentation_index(zone, order);
1148         if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1149                 return COMPACT_SKIPPED;
1150
1151         return COMPACT_CONTINUE;
1152 }
1153
1154 static int compact_zone(struct zone *zone, struct compact_control *cc)
1155 {
1156         int ret;
1157         unsigned long start_pfn = zone->zone_start_pfn;
1158         unsigned long end_pfn = zone_end_pfn(zone);
1159         const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1160         const bool sync = cc->mode != MIGRATE_ASYNC;
1161         unsigned long last_migrated_pfn = 0;
1162
1163         ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1164                                                         cc->classzone_idx);
1165         switch (ret) {
1166         case COMPACT_PARTIAL:
1167         case COMPACT_SKIPPED:
1168                 /* Compaction is likely to fail */
1169                 return ret;
1170         case COMPACT_CONTINUE:
1171                 /* Fall through to compaction */
1172                 ;
1173         }
1174
1175         /*
1176          * Clear pageblock skip if there were failures recently and compaction
1177          * is about to be retried after being deferred. kswapd does not do
1178          * this reset as it'll reset the cached information when going to sleep.
1179          */
1180         if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
1181                 __reset_isolation_suitable(zone);
1182
1183         /*
1184          * Setup to move all movable pages to the end of the zone. Used cached
1185          * information on where the scanners should start but check that it
1186          * is initialised by ensuring the values are within zone boundaries.
1187          */
1188         cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1189         cc->free_pfn = zone->compact_cached_free_pfn;
1190         if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
1191                 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1192                 zone->compact_cached_free_pfn = cc->free_pfn;
1193         }
1194         if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1195                 cc->migrate_pfn = start_pfn;
1196                 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1197                 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1198         }
1199
1200         trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
1201
1202         migrate_prep_local();
1203
1204         while ((ret = compact_finished(zone, cc, migratetype)) ==
1205                                                 COMPACT_CONTINUE) {
1206                 int err;
1207                 unsigned long isolate_start_pfn = cc->migrate_pfn;
1208
1209                 switch (isolate_migratepages(zone, cc)) {
1210                 case ISOLATE_ABORT:
1211                         ret = COMPACT_PARTIAL;
1212                         putback_movable_pages(&cc->migratepages);
1213                         cc->nr_migratepages = 0;
1214                         goto out;
1215                 case ISOLATE_NONE:
1216                         /*
1217                          * We haven't isolated and migrated anything, but
1218                          * there might still be unflushed migrations from
1219                          * previous cc->order aligned block.
1220                          */
1221                         goto check_drain;
1222                 case ISOLATE_SUCCESS:
1223                         ;
1224                 }
1225
1226                 err = migrate_pages(&cc->migratepages, compaction_alloc,
1227                                 compaction_free, (unsigned long)cc, cc->mode,
1228                                 MR_COMPACTION);
1229
1230                 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1231                                                         &cc->migratepages);
1232
1233                 /* All pages were either migrated or will be released */
1234                 cc->nr_migratepages = 0;
1235                 if (err) {
1236                         putback_movable_pages(&cc->migratepages);
1237                         /*
1238                          * migrate_pages() may return -ENOMEM when scanners meet
1239                          * and we want compact_finished() to detect it
1240                          */
1241                         if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
1242                                 ret = COMPACT_PARTIAL;
1243                                 goto out;
1244                         }
1245                 }
1246
1247                 /*
1248                  * Record where we could have freed pages by migration and not
1249                  * yet flushed them to buddy allocator. We use the pfn that
1250                  * isolate_migratepages() started from in this loop iteration
1251                  * - this is the lowest page that could have been isolated and
1252                  * then freed by migration.
1253                  */
1254                 if (!last_migrated_pfn)
1255                         last_migrated_pfn = isolate_start_pfn;
1256
1257 check_drain:
1258                 /*
1259                  * Has the migration scanner moved away from the previous
1260                  * cc->order aligned block where we migrated from? If yes,
1261                  * flush the pages that were freed, so that they can merge and
1262                  * compact_finished() can detect immediately if allocation
1263                  * would succeed.
1264                  */
1265                 if (cc->order > 0 && last_migrated_pfn) {
1266                         int cpu;
1267                         unsigned long current_block_start =
1268                                 cc->migrate_pfn & ~((1UL << cc->order) - 1);
1269
1270                         if (last_migrated_pfn < current_block_start) {
1271                                 cpu = get_cpu();
1272                                 lru_add_drain_cpu(cpu);
1273                                 drain_local_pages(zone);
1274                                 put_cpu();
1275                                 /* No more flushing until we migrate again */
1276                                 last_migrated_pfn = 0;
1277                         }
1278                 }
1279
1280         }
1281
1282 out:
1283         /*
1284          * Release free pages and update where the free scanner should restart,
1285          * so we don't leave any returned pages behind in the next attempt.
1286          */
1287         if (cc->nr_freepages > 0) {
1288                 unsigned long free_pfn = release_freepages(&cc->freepages);
1289
1290                 cc->nr_freepages = 0;
1291                 VM_BUG_ON(free_pfn == 0);
1292                 /* The cached pfn is always the first in a pageblock */
1293                 free_pfn &= ~(pageblock_nr_pages-1);
1294                 /*
1295                  * Only go back, not forward. The cached pfn might have been
1296                  * already reset to zone end in compact_finished()
1297                  */
1298                 if (free_pfn > zone->compact_cached_free_pfn)
1299                         zone->compact_cached_free_pfn = free_pfn;
1300         }
1301
1302         trace_mm_compaction_end(ret);
1303
1304         return ret;
1305 }
1306
1307 static unsigned long compact_zone_order(struct zone *zone, int order,
1308                 gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1309                 int alloc_flags, int classzone_idx)
1310 {
1311         unsigned long ret;
1312         struct compact_control cc = {
1313                 .nr_freepages = 0,
1314                 .nr_migratepages = 0,
1315                 .order = order,
1316                 .gfp_mask = gfp_mask,
1317                 .zone = zone,
1318                 .mode = mode,
1319                 .alloc_flags = alloc_flags,
1320                 .classzone_idx = classzone_idx,
1321         };
1322         INIT_LIST_HEAD(&cc.freepages);
1323         INIT_LIST_HEAD(&cc.migratepages);
1324
1325         ret = compact_zone(zone, &cc);
1326
1327         VM_BUG_ON(!list_empty(&cc.freepages));
1328         VM_BUG_ON(!list_empty(&cc.migratepages));
1329
1330         *contended = cc.contended;
1331         return ret;
1332 }
1333
1334 int sysctl_extfrag_threshold = 500;
1335
1336 /**
1337  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1338  * @zonelist: The zonelist used for the current allocation
1339  * @order: The order of the current allocation
1340  * @gfp_mask: The GFP mask of the current allocation
1341  * @nodemask: The allowed nodes to allocate from
1342  * @mode: The migration mode for async, sync light, or sync migration
1343  * @contended: Return value that determines if compaction was aborted due to
1344  *             need_resched() or lock contention
1345  *
1346  * This is the main entry point for direct page compaction.
1347  */
1348 unsigned long try_to_compact_pages(struct zonelist *zonelist,
1349                         int order, gfp_t gfp_mask, nodemask_t *nodemask,
1350                         enum migrate_mode mode, int *contended,
1351                         int alloc_flags, int classzone_idx)
1352 {
1353         enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1354         int may_enter_fs = gfp_mask & __GFP_FS;
1355         int may_perform_io = gfp_mask & __GFP_IO;
1356         struct zoneref *z;
1357         struct zone *zone;
1358         int rc = COMPACT_DEFERRED;
1359         int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1360
1361         *contended = COMPACT_CONTENDED_NONE;
1362
1363         /* Check if the GFP flags allow compaction */
1364         if (!order || !may_enter_fs || !may_perform_io)
1365                 return COMPACT_SKIPPED;
1366
1367         /* Compact each zone in the list */
1368         for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1369                                                                 nodemask) {
1370                 int status;
1371                 int zone_contended;
1372
1373                 if (compaction_deferred(zone, order))
1374                         continue;
1375
1376                 status = compact_zone_order(zone, order, gfp_mask, mode,
1377                                 &zone_contended, alloc_flags, classzone_idx);
1378                 rc = max(status, rc);
1379                 /*
1380                  * It takes at least one zone that wasn't lock contended
1381                  * to clear all_zones_contended.
1382                  */
1383                 all_zones_contended &= zone_contended;
1384
1385                 /* If a normal allocation would succeed, stop compacting */
1386                 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1387                                         classzone_idx, alloc_flags)) {
1388                         /*
1389                          * We think the allocation will succeed in this zone,
1390                          * but it is not certain, hence the false. The caller
1391                          * will repeat this with true if allocation indeed
1392                          * succeeds in this zone.
1393                          */
1394                         compaction_defer_reset(zone, order, false);
1395                         /*
1396                          * It is possible that async compaction aborted due to
1397                          * need_resched() and the watermarks were ok thanks to
1398                          * somebody else freeing memory. The allocation can
1399                          * however still fail so we better signal the
1400                          * need_resched() contention anyway (this will not
1401                          * prevent the allocation attempt).
1402                          */
1403                         if (zone_contended == COMPACT_CONTENDED_SCHED)
1404                                 *contended = COMPACT_CONTENDED_SCHED;
1405
1406                         goto break_loop;
1407                 }
1408
1409                 if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
1410                         /*
1411                          * We think that allocation won't succeed in this zone
1412                          * so we defer compaction there. If it ends up
1413                          * succeeding after all, it will be reset.
1414                          */
1415                         defer_compaction(zone, order);
1416                 }
1417
1418                 /*
1419                  * We might have stopped compacting due to need_resched() in
1420                  * async compaction, or due to a fatal signal detected. In that
1421                  * case do not try further zones and signal need_resched()
1422                  * contention.
1423                  */
1424                 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1425                                         || fatal_signal_pending(current)) {
1426                         *contended = COMPACT_CONTENDED_SCHED;
1427                         goto break_loop;
1428                 }
1429
1430                 continue;
1431 break_loop:
1432                 /*
1433                  * We might not have tried all the zones, so  be conservative
1434                  * and assume they are not all lock contended.
1435                  */
1436                 all_zones_contended = 0;
1437                 break;
1438         }
1439
1440         /*
1441          * If at least one zone wasn't deferred or skipped, we report if all
1442          * zones that were tried were lock contended.
1443          */
1444         if (rc > COMPACT_SKIPPED && all_zones_contended)
1445                 *contended = COMPACT_CONTENDED_LOCK;
1446
1447         return rc;
1448 }
1449
1450
1451 /* Compact all zones within a node */
1452 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1453 {
1454         int zoneid;
1455         struct zone *zone;
1456
1457         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1458
1459                 zone = &pgdat->node_zones[zoneid];
1460                 if (!populated_zone(zone))
1461                         continue;
1462
1463                 cc->nr_freepages = 0;
1464                 cc->nr_migratepages = 0;
1465                 cc->zone = zone;
1466                 INIT_LIST_HEAD(&cc->freepages);
1467                 INIT_LIST_HEAD(&cc->migratepages);
1468
1469                 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
1470                         compact_zone(zone, cc);
1471
1472                 if (cc->order > 0) {
1473                         if (zone_watermark_ok(zone, cc->order,
1474                                                 low_wmark_pages(zone), 0, 0))
1475                                 compaction_defer_reset(zone, cc->order, false);
1476                 }
1477
1478                 VM_BUG_ON(!list_empty(&cc->freepages));
1479                 VM_BUG_ON(!list_empty(&cc->migratepages));
1480         }
1481 }
1482
1483 void compact_pgdat(pg_data_t *pgdat, int order)
1484 {
1485         struct compact_control cc = {
1486                 .order = order,
1487                 .mode = MIGRATE_ASYNC,
1488         };
1489
1490         if (!order)
1491                 return;
1492
1493         __compact_pgdat(pgdat, &cc);
1494 }
1495
1496 static void compact_node(int nid)
1497 {
1498         struct compact_control cc = {
1499                 .order = -1,
1500                 .mode = MIGRATE_SYNC,
1501                 .ignore_skip_hint = true,
1502         };
1503
1504         __compact_pgdat(NODE_DATA(nid), &cc);
1505 }
1506
1507 /* Compact all nodes in the system */
1508 static void compact_nodes(void)
1509 {
1510         int nid;
1511
1512         /* Flush pending updates to the LRU lists */
1513         lru_add_drain_all();
1514
1515         for_each_online_node(nid)
1516                 compact_node(nid);
1517 }
1518
1519 /* The written value is actually unused, all memory is compacted */
1520 int sysctl_compact_memory;
1521
1522 /* This is the entry point for compacting all nodes via /proc/sys/vm */
1523 int sysctl_compaction_handler(struct ctl_table *table, int write,
1524                         void __user *buffer, size_t *length, loff_t *ppos)
1525 {
1526         if (write)
1527                 compact_nodes();
1528
1529         return 0;
1530 }
1531
1532 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1533                         void __user *buffer, size_t *length, loff_t *ppos)
1534 {
1535         proc_dointvec_minmax(table, write, buffer, length, ppos);
1536
1537         return 0;
1538 }
1539
1540 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1541 static ssize_t sysfs_compact_node(struct device *dev,
1542                         struct device_attribute *attr,
1543                         const char *buf, size_t count)
1544 {
1545         int nid = dev->id;
1546
1547         if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1548                 /* Flush pending updates to the LRU lists */
1549                 lru_add_drain_all();
1550
1551                 compact_node(nid);
1552         }
1553
1554         return count;
1555 }
1556 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1557
1558 int compaction_register_node(struct node *node)
1559 {
1560         return device_create_file(&node->dev, &dev_attr_compact);
1561 }
1562
1563 void compaction_unregister_node(struct node *node)
1564 {
1565         return device_remove_file(&node->dev, &dev_attr_compact);
1566 }
1567 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1568
1569 #endif /* CONFIG_COMPACTION */