Merge tag 'pinctrl-v3.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[cascardo/linux.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/kthread.h>
20 #include <linux/khugepaged.h>
21 #include <linux/freezer.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/migrate.h>
25 #include <linux/hashtable.h>
26
27 #include <asm/tlb.h>
28 #include <asm/pgalloc.h>
29 #include "internal.h"
30
31 /*
32  * By default transparent hugepage support is disabled in order that avoid
33  * to risk increase the memory footprint of applications without a guaranteed
34  * benefit. When transparent hugepage support is enabled, is for all mappings,
35  * and khugepaged scans all mappings.
36  * Defrag is invoked by khugepaged hugepage allocations and by page faults
37  * for all hugepage allocations.
38  */
39 unsigned long transparent_hugepage_flags __read_mostly =
40 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
41         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
42 #endif
43 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
44         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
45 #endif
46         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
47         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
48         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
49
50 /* default scan 8*512 pte (or vmas) every 30 second */
51 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
52 static unsigned int khugepaged_pages_collapsed;
53 static unsigned int khugepaged_full_scans;
54 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
55 /* during fragmentation poll the hugepage allocator once every minute */
56 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
57 static struct task_struct *khugepaged_thread __read_mostly;
58 static DEFINE_MUTEX(khugepaged_mutex);
59 static DEFINE_SPINLOCK(khugepaged_mm_lock);
60 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
61 /*
62  * default collapse hugepages if there is at least one pte mapped like
63  * it would have happened if the vma was large enough during page
64  * fault.
65  */
66 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
67
68 static int khugepaged(void *none);
69 static int khugepaged_slab_init(void);
70
71 #define MM_SLOTS_HASH_BITS 10
72 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
73
74 static struct kmem_cache *mm_slot_cache __read_mostly;
75
76 /**
77  * struct mm_slot - hash lookup from mm to mm_slot
78  * @hash: hash collision list
79  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
80  * @mm: the mm that this information is valid for
81  */
82 struct mm_slot {
83         struct hlist_node hash;
84         struct list_head mm_node;
85         struct mm_struct *mm;
86 };
87
88 /**
89  * struct khugepaged_scan - cursor for scanning
90  * @mm_head: the head of the mm list to scan
91  * @mm_slot: the current mm_slot we are scanning
92  * @address: the next address inside that to be scanned
93  *
94  * There is only the one khugepaged_scan instance of this cursor structure.
95  */
96 struct khugepaged_scan {
97         struct list_head mm_head;
98         struct mm_slot *mm_slot;
99         unsigned long address;
100 };
101 static struct khugepaged_scan khugepaged_scan = {
102         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
103 };
104
105
106 static int set_recommended_min_free_kbytes(void)
107 {
108         struct zone *zone;
109         int nr_zones = 0;
110         unsigned long recommended_min;
111
112         if (!khugepaged_enabled())
113                 return 0;
114
115         for_each_populated_zone(zone)
116                 nr_zones++;
117
118         /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
119         recommended_min = pageblock_nr_pages * nr_zones * 2;
120
121         /*
122          * Make sure that on average at least two pageblocks are almost free
123          * of another type, one for a migratetype to fall back to and a
124          * second to avoid subsequent fallbacks of other types There are 3
125          * MIGRATE_TYPES we care about.
126          */
127         recommended_min += pageblock_nr_pages * nr_zones *
128                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
129
130         /* don't ever allow to reserve more than 5% of the lowmem */
131         recommended_min = min(recommended_min,
132                               (unsigned long) nr_free_buffer_pages() / 20);
133         recommended_min <<= (PAGE_SHIFT-10);
134
135         if (recommended_min > min_free_kbytes) {
136                 if (user_min_free_kbytes >= 0)
137                         pr_info("raising min_free_kbytes from %d to %lu "
138                                 "to help transparent hugepage allocations\n",
139                                 min_free_kbytes, recommended_min);
140
141                 min_free_kbytes = recommended_min;
142         }
143         setup_per_zone_wmarks();
144         return 0;
145 }
146 late_initcall(set_recommended_min_free_kbytes);
147
148 static int start_khugepaged(void)
149 {
150         int err = 0;
151         if (khugepaged_enabled()) {
152                 if (!khugepaged_thread)
153                         khugepaged_thread = kthread_run(khugepaged, NULL,
154                                                         "khugepaged");
155                 if (unlikely(IS_ERR(khugepaged_thread))) {
156                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
157                         err = PTR_ERR(khugepaged_thread);
158                         khugepaged_thread = NULL;
159                 }
160
161                 if (!list_empty(&khugepaged_scan.mm_head))
162                         wake_up_interruptible(&khugepaged_wait);
163
164                 set_recommended_min_free_kbytes();
165         } else if (khugepaged_thread) {
166                 kthread_stop(khugepaged_thread);
167                 khugepaged_thread = NULL;
168         }
169
170         return err;
171 }
172
173 static atomic_t huge_zero_refcount;
174 static struct page *huge_zero_page __read_mostly;
175
176 static inline bool is_huge_zero_page(struct page *page)
177 {
178         return ACCESS_ONCE(huge_zero_page) == page;
179 }
180
181 static inline bool is_huge_zero_pmd(pmd_t pmd)
182 {
183         return is_huge_zero_page(pmd_page(pmd));
184 }
185
186 static struct page *get_huge_zero_page(void)
187 {
188         struct page *zero_page;
189 retry:
190         if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
191                 return ACCESS_ONCE(huge_zero_page);
192
193         zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
194                         HPAGE_PMD_ORDER);
195         if (!zero_page) {
196                 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
197                 return NULL;
198         }
199         count_vm_event(THP_ZERO_PAGE_ALLOC);
200         preempt_disable();
201         if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
202                 preempt_enable();
203                 __free_pages(zero_page, compound_order(zero_page));
204                 goto retry;
205         }
206
207         /* We take additional reference here. It will be put back by shrinker */
208         atomic_set(&huge_zero_refcount, 2);
209         preempt_enable();
210         return ACCESS_ONCE(huge_zero_page);
211 }
212
213 static void put_huge_zero_page(void)
214 {
215         /*
216          * Counter should never go to zero here. Only shrinker can put
217          * last reference.
218          */
219         BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
220 }
221
222 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
223                                         struct shrink_control *sc)
224 {
225         /* we can free zero page only if last reference remains */
226         return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
227 }
228
229 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
230                                        struct shrink_control *sc)
231 {
232         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
233                 struct page *zero_page = xchg(&huge_zero_page, NULL);
234                 BUG_ON(zero_page == NULL);
235                 __free_pages(zero_page, compound_order(zero_page));
236                 return HPAGE_PMD_NR;
237         }
238
239         return 0;
240 }
241
242 static struct shrinker huge_zero_page_shrinker = {
243         .count_objects = shrink_huge_zero_page_count,
244         .scan_objects = shrink_huge_zero_page_scan,
245         .seeks = DEFAULT_SEEKS,
246 };
247
248 #ifdef CONFIG_SYSFS
249
250 static ssize_t double_flag_show(struct kobject *kobj,
251                                 struct kobj_attribute *attr, char *buf,
252                                 enum transparent_hugepage_flag enabled,
253                                 enum transparent_hugepage_flag req_madv)
254 {
255         if (test_bit(enabled, &transparent_hugepage_flags)) {
256                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
257                 return sprintf(buf, "[always] madvise never\n");
258         } else if (test_bit(req_madv, &transparent_hugepage_flags))
259                 return sprintf(buf, "always [madvise] never\n");
260         else
261                 return sprintf(buf, "always madvise [never]\n");
262 }
263 static ssize_t double_flag_store(struct kobject *kobj,
264                                  struct kobj_attribute *attr,
265                                  const char *buf, size_t count,
266                                  enum transparent_hugepage_flag enabled,
267                                  enum transparent_hugepage_flag req_madv)
268 {
269         if (!memcmp("always", buf,
270                     min(sizeof("always")-1, count))) {
271                 set_bit(enabled, &transparent_hugepage_flags);
272                 clear_bit(req_madv, &transparent_hugepage_flags);
273         } else if (!memcmp("madvise", buf,
274                            min(sizeof("madvise")-1, count))) {
275                 clear_bit(enabled, &transparent_hugepage_flags);
276                 set_bit(req_madv, &transparent_hugepage_flags);
277         } else if (!memcmp("never", buf,
278                            min(sizeof("never")-1, count))) {
279                 clear_bit(enabled, &transparent_hugepage_flags);
280                 clear_bit(req_madv, &transparent_hugepage_flags);
281         } else
282                 return -EINVAL;
283
284         return count;
285 }
286
287 static ssize_t enabled_show(struct kobject *kobj,
288                             struct kobj_attribute *attr, char *buf)
289 {
290         return double_flag_show(kobj, attr, buf,
291                                 TRANSPARENT_HUGEPAGE_FLAG,
292                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
293 }
294 static ssize_t enabled_store(struct kobject *kobj,
295                              struct kobj_attribute *attr,
296                              const char *buf, size_t count)
297 {
298         ssize_t ret;
299
300         ret = double_flag_store(kobj, attr, buf, count,
301                                 TRANSPARENT_HUGEPAGE_FLAG,
302                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
303
304         if (ret > 0) {
305                 int err;
306
307                 mutex_lock(&khugepaged_mutex);
308                 err = start_khugepaged();
309                 mutex_unlock(&khugepaged_mutex);
310
311                 if (err)
312                         ret = err;
313         }
314
315         return ret;
316 }
317 static struct kobj_attribute enabled_attr =
318         __ATTR(enabled, 0644, enabled_show, enabled_store);
319
320 static ssize_t single_flag_show(struct kobject *kobj,
321                                 struct kobj_attribute *attr, char *buf,
322                                 enum transparent_hugepage_flag flag)
323 {
324         return sprintf(buf, "%d\n",
325                        !!test_bit(flag, &transparent_hugepage_flags));
326 }
327
328 static ssize_t single_flag_store(struct kobject *kobj,
329                                  struct kobj_attribute *attr,
330                                  const char *buf, size_t count,
331                                  enum transparent_hugepage_flag flag)
332 {
333         unsigned long value;
334         int ret;
335
336         ret = kstrtoul(buf, 10, &value);
337         if (ret < 0)
338                 return ret;
339         if (value > 1)
340                 return -EINVAL;
341
342         if (value)
343                 set_bit(flag, &transparent_hugepage_flags);
344         else
345                 clear_bit(flag, &transparent_hugepage_flags);
346
347         return count;
348 }
349
350 /*
351  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
352  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
353  * memory just to allocate one more hugepage.
354  */
355 static ssize_t defrag_show(struct kobject *kobj,
356                            struct kobj_attribute *attr, char *buf)
357 {
358         return double_flag_show(kobj, attr, buf,
359                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
360                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
361 }
362 static ssize_t defrag_store(struct kobject *kobj,
363                             struct kobj_attribute *attr,
364                             const char *buf, size_t count)
365 {
366         return double_flag_store(kobj, attr, buf, count,
367                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
368                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
369 }
370 static struct kobj_attribute defrag_attr =
371         __ATTR(defrag, 0644, defrag_show, defrag_store);
372
373 static ssize_t use_zero_page_show(struct kobject *kobj,
374                 struct kobj_attribute *attr, char *buf)
375 {
376         return single_flag_show(kobj, attr, buf,
377                                 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
378 }
379 static ssize_t use_zero_page_store(struct kobject *kobj,
380                 struct kobj_attribute *attr, const char *buf, size_t count)
381 {
382         return single_flag_store(kobj, attr, buf, count,
383                                  TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
384 }
385 static struct kobj_attribute use_zero_page_attr =
386         __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
387 #ifdef CONFIG_DEBUG_VM
388 static ssize_t debug_cow_show(struct kobject *kobj,
389                                 struct kobj_attribute *attr, char *buf)
390 {
391         return single_flag_show(kobj, attr, buf,
392                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
393 }
394 static ssize_t debug_cow_store(struct kobject *kobj,
395                                struct kobj_attribute *attr,
396                                const char *buf, size_t count)
397 {
398         return single_flag_store(kobj, attr, buf, count,
399                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
400 }
401 static struct kobj_attribute debug_cow_attr =
402         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
403 #endif /* CONFIG_DEBUG_VM */
404
405 static struct attribute *hugepage_attr[] = {
406         &enabled_attr.attr,
407         &defrag_attr.attr,
408         &use_zero_page_attr.attr,
409 #ifdef CONFIG_DEBUG_VM
410         &debug_cow_attr.attr,
411 #endif
412         NULL,
413 };
414
415 static struct attribute_group hugepage_attr_group = {
416         .attrs = hugepage_attr,
417 };
418
419 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
420                                          struct kobj_attribute *attr,
421                                          char *buf)
422 {
423         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
424 }
425
426 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
427                                           struct kobj_attribute *attr,
428                                           const char *buf, size_t count)
429 {
430         unsigned long msecs;
431         int err;
432
433         err = kstrtoul(buf, 10, &msecs);
434         if (err || msecs > UINT_MAX)
435                 return -EINVAL;
436
437         khugepaged_scan_sleep_millisecs = msecs;
438         wake_up_interruptible(&khugepaged_wait);
439
440         return count;
441 }
442 static struct kobj_attribute scan_sleep_millisecs_attr =
443         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
444                scan_sleep_millisecs_store);
445
446 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
447                                           struct kobj_attribute *attr,
448                                           char *buf)
449 {
450         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
451 }
452
453 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
454                                            struct kobj_attribute *attr,
455                                            const char *buf, size_t count)
456 {
457         unsigned long msecs;
458         int err;
459
460         err = kstrtoul(buf, 10, &msecs);
461         if (err || msecs > UINT_MAX)
462                 return -EINVAL;
463
464         khugepaged_alloc_sleep_millisecs = msecs;
465         wake_up_interruptible(&khugepaged_wait);
466
467         return count;
468 }
469 static struct kobj_attribute alloc_sleep_millisecs_attr =
470         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
471                alloc_sleep_millisecs_store);
472
473 static ssize_t pages_to_scan_show(struct kobject *kobj,
474                                   struct kobj_attribute *attr,
475                                   char *buf)
476 {
477         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
478 }
479 static ssize_t pages_to_scan_store(struct kobject *kobj,
480                                    struct kobj_attribute *attr,
481                                    const char *buf, size_t count)
482 {
483         int err;
484         unsigned long pages;
485
486         err = kstrtoul(buf, 10, &pages);
487         if (err || !pages || pages > UINT_MAX)
488                 return -EINVAL;
489
490         khugepaged_pages_to_scan = pages;
491
492         return count;
493 }
494 static struct kobj_attribute pages_to_scan_attr =
495         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
496                pages_to_scan_store);
497
498 static ssize_t pages_collapsed_show(struct kobject *kobj,
499                                     struct kobj_attribute *attr,
500                                     char *buf)
501 {
502         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
503 }
504 static struct kobj_attribute pages_collapsed_attr =
505         __ATTR_RO(pages_collapsed);
506
507 static ssize_t full_scans_show(struct kobject *kobj,
508                                struct kobj_attribute *attr,
509                                char *buf)
510 {
511         return sprintf(buf, "%u\n", khugepaged_full_scans);
512 }
513 static struct kobj_attribute full_scans_attr =
514         __ATTR_RO(full_scans);
515
516 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
517                                       struct kobj_attribute *attr, char *buf)
518 {
519         return single_flag_show(kobj, attr, buf,
520                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
521 }
522 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
523                                        struct kobj_attribute *attr,
524                                        const char *buf, size_t count)
525 {
526         return single_flag_store(kobj, attr, buf, count,
527                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
528 }
529 static struct kobj_attribute khugepaged_defrag_attr =
530         __ATTR(defrag, 0644, khugepaged_defrag_show,
531                khugepaged_defrag_store);
532
533 /*
534  * max_ptes_none controls if khugepaged should collapse hugepages over
535  * any unmapped ptes in turn potentially increasing the memory
536  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
537  * reduce the available free memory in the system as it
538  * runs. Increasing max_ptes_none will instead potentially reduce the
539  * free memory in the system during the khugepaged scan.
540  */
541 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
542                                              struct kobj_attribute *attr,
543                                              char *buf)
544 {
545         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
546 }
547 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
548                                               struct kobj_attribute *attr,
549                                               const char *buf, size_t count)
550 {
551         int err;
552         unsigned long max_ptes_none;
553
554         err = kstrtoul(buf, 10, &max_ptes_none);
555         if (err || max_ptes_none > HPAGE_PMD_NR-1)
556                 return -EINVAL;
557
558         khugepaged_max_ptes_none = max_ptes_none;
559
560         return count;
561 }
562 static struct kobj_attribute khugepaged_max_ptes_none_attr =
563         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
564                khugepaged_max_ptes_none_store);
565
566 static struct attribute *khugepaged_attr[] = {
567         &khugepaged_defrag_attr.attr,
568         &khugepaged_max_ptes_none_attr.attr,
569         &pages_to_scan_attr.attr,
570         &pages_collapsed_attr.attr,
571         &full_scans_attr.attr,
572         &scan_sleep_millisecs_attr.attr,
573         &alloc_sleep_millisecs_attr.attr,
574         NULL,
575 };
576
577 static struct attribute_group khugepaged_attr_group = {
578         .attrs = khugepaged_attr,
579         .name = "khugepaged",
580 };
581
582 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
583 {
584         int err;
585
586         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
587         if (unlikely(!*hugepage_kobj)) {
588                 pr_err("failed to create transparent hugepage kobject\n");
589                 return -ENOMEM;
590         }
591
592         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
593         if (err) {
594                 pr_err("failed to register transparent hugepage group\n");
595                 goto delete_obj;
596         }
597
598         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
599         if (err) {
600                 pr_err("failed to register transparent hugepage group\n");
601                 goto remove_hp_group;
602         }
603
604         return 0;
605
606 remove_hp_group:
607         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
608 delete_obj:
609         kobject_put(*hugepage_kobj);
610         return err;
611 }
612
613 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
614 {
615         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
616         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
617         kobject_put(hugepage_kobj);
618 }
619 #else
620 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
621 {
622         return 0;
623 }
624
625 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
626 {
627 }
628 #endif /* CONFIG_SYSFS */
629
630 static int __init hugepage_init(void)
631 {
632         int err;
633         struct kobject *hugepage_kobj;
634
635         if (!has_transparent_hugepage()) {
636                 transparent_hugepage_flags = 0;
637                 return -EINVAL;
638         }
639
640         err = hugepage_init_sysfs(&hugepage_kobj);
641         if (err)
642                 return err;
643
644         err = khugepaged_slab_init();
645         if (err)
646                 goto out;
647
648         register_shrinker(&huge_zero_page_shrinker);
649
650         /*
651          * By default disable transparent hugepages on smaller systems,
652          * where the extra memory used could hurt more than TLB overhead
653          * is likely to save.  The admin can still enable it through /sys.
654          */
655         if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
656                 transparent_hugepage_flags = 0;
657
658         start_khugepaged();
659
660         return 0;
661 out:
662         hugepage_exit_sysfs(hugepage_kobj);
663         return err;
664 }
665 subsys_initcall(hugepage_init);
666
667 static int __init setup_transparent_hugepage(char *str)
668 {
669         int ret = 0;
670         if (!str)
671                 goto out;
672         if (!strcmp(str, "always")) {
673                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
674                         &transparent_hugepage_flags);
675                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
676                           &transparent_hugepage_flags);
677                 ret = 1;
678         } else if (!strcmp(str, "madvise")) {
679                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
680                           &transparent_hugepage_flags);
681                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
682                         &transparent_hugepage_flags);
683                 ret = 1;
684         } else if (!strcmp(str, "never")) {
685                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
686                           &transparent_hugepage_flags);
687                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
688                           &transparent_hugepage_flags);
689                 ret = 1;
690         }
691 out:
692         if (!ret)
693                 pr_warn("transparent_hugepage= cannot parse, ignored\n");
694         return ret;
695 }
696 __setup("transparent_hugepage=", setup_transparent_hugepage);
697
698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
699 {
700         if (likely(vma->vm_flags & VM_WRITE))
701                 pmd = pmd_mkwrite(pmd);
702         return pmd;
703 }
704
705 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
706 {
707         pmd_t entry;
708         entry = mk_pmd(page, prot);
709         entry = pmd_mkhuge(entry);
710         return entry;
711 }
712
713 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
714                                         struct vm_area_struct *vma,
715                                         unsigned long haddr, pmd_t *pmd,
716                                         struct page *page)
717 {
718         struct mem_cgroup *memcg;
719         pgtable_t pgtable;
720         spinlock_t *ptl;
721
722         VM_BUG_ON_PAGE(!PageCompound(page), page);
723
724         if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg))
725                 return VM_FAULT_OOM;
726
727         pgtable = pte_alloc_one(mm, haddr);
728         if (unlikely(!pgtable)) {
729                 mem_cgroup_cancel_charge(page, memcg);
730                 return VM_FAULT_OOM;
731         }
732
733         clear_huge_page(page, haddr, HPAGE_PMD_NR);
734         /*
735          * The memory barrier inside __SetPageUptodate makes sure that
736          * clear_huge_page writes become visible before the set_pmd_at()
737          * write.
738          */
739         __SetPageUptodate(page);
740
741         ptl = pmd_lock(mm, pmd);
742         if (unlikely(!pmd_none(*pmd))) {
743                 spin_unlock(ptl);
744                 mem_cgroup_cancel_charge(page, memcg);
745                 put_page(page);
746                 pte_free(mm, pgtable);
747         } else {
748                 pmd_t entry;
749                 entry = mk_huge_pmd(page, vma->vm_page_prot);
750                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
751                 page_add_new_anon_rmap(page, vma, haddr);
752                 mem_cgroup_commit_charge(page, memcg, false);
753                 lru_cache_add_active_or_unevictable(page, vma);
754                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
755                 set_pmd_at(mm, haddr, pmd, entry);
756                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
757                 atomic_long_inc(&mm->nr_ptes);
758                 spin_unlock(ptl);
759         }
760
761         return 0;
762 }
763
764 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
765 {
766         return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
767 }
768
769 static inline struct page *alloc_hugepage_vma(int defrag,
770                                               struct vm_area_struct *vma,
771                                               unsigned long haddr, int nd,
772                                               gfp_t extra_gfp)
773 {
774         return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
775                                HPAGE_PMD_ORDER, vma, haddr, nd);
776 }
777
778 /* Caller must hold page table lock. */
779 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
780                 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
781                 struct page *zero_page)
782 {
783         pmd_t entry;
784         if (!pmd_none(*pmd))
785                 return false;
786         entry = mk_pmd(zero_page, vma->vm_page_prot);
787         entry = pmd_mkhuge(entry);
788         pgtable_trans_huge_deposit(mm, pmd, pgtable);
789         set_pmd_at(mm, haddr, pmd, entry);
790         atomic_long_inc(&mm->nr_ptes);
791         return true;
792 }
793
794 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
795                                unsigned long address, pmd_t *pmd,
796                                unsigned int flags)
797 {
798         struct page *page;
799         unsigned long haddr = address & HPAGE_PMD_MASK;
800
801         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
802                 return VM_FAULT_FALLBACK;
803         if (unlikely(anon_vma_prepare(vma)))
804                 return VM_FAULT_OOM;
805         if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
806                 return VM_FAULT_OOM;
807         if (!(flags & FAULT_FLAG_WRITE) &&
808                         transparent_hugepage_use_zero_page()) {
809                 spinlock_t *ptl;
810                 pgtable_t pgtable;
811                 struct page *zero_page;
812                 bool set;
813                 pgtable = pte_alloc_one(mm, haddr);
814                 if (unlikely(!pgtable))
815                         return VM_FAULT_OOM;
816                 zero_page = get_huge_zero_page();
817                 if (unlikely(!zero_page)) {
818                         pte_free(mm, pgtable);
819                         count_vm_event(THP_FAULT_FALLBACK);
820                         return VM_FAULT_FALLBACK;
821                 }
822                 ptl = pmd_lock(mm, pmd);
823                 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
824                                 zero_page);
825                 spin_unlock(ptl);
826                 if (!set) {
827                         pte_free(mm, pgtable);
828                         put_huge_zero_page();
829                 }
830                 return 0;
831         }
832         page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
833                         vma, haddr, numa_node_id(), 0);
834         if (unlikely(!page)) {
835                 count_vm_event(THP_FAULT_FALLBACK);
836                 return VM_FAULT_FALLBACK;
837         }
838         if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
839                 put_page(page);
840                 count_vm_event(THP_FAULT_FALLBACK);
841                 return VM_FAULT_FALLBACK;
842         }
843
844         count_vm_event(THP_FAULT_ALLOC);
845         return 0;
846 }
847
848 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
849                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
850                   struct vm_area_struct *vma)
851 {
852         spinlock_t *dst_ptl, *src_ptl;
853         struct page *src_page;
854         pmd_t pmd;
855         pgtable_t pgtable;
856         int ret;
857
858         ret = -ENOMEM;
859         pgtable = pte_alloc_one(dst_mm, addr);
860         if (unlikely(!pgtable))
861                 goto out;
862
863         dst_ptl = pmd_lock(dst_mm, dst_pmd);
864         src_ptl = pmd_lockptr(src_mm, src_pmd);
865         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
866
867         ret = -EAGAIN;
868         pmd = *src_pmd;
869         if (unlikely(!pmd_trans_huge(pmd))) {
870                 pte_free(dst_mm, pgtable);
871                 goto out_unlock;
872         }
873         /*
874          * When page table lock is held, the huge zero pmd should not be
875          * under splitting since we don't split the page itself, only pmd to
876          * a page table.
877          */
878         if (is_huge_zero_pmd(pmd)) {
879                 struct page *zero_page;
880                 bool set;
881                 /*
882                  * get_huge_zero_page() will never allocate a new page here,
883                  * since we already have a zero page to copy. It just takes a
884                  * reference.
885                  */
886                 zero_page = get_huge_zero_page();
887                 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
888                                 zero_page);
889                 BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
890                 ret = 0;
891                 goto out_unlock;
892         }
893
894         if (unlikely(pmd_trans_splitting(pmd))) {
895                 /* split huge page running from under us */
896                 spin_unlock(src_ptl);
897                 spin_unlock(dst_ptl);
898                 pte_free(dst_mm, pgtable);
899
900                 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
901                 goto out;
902         }
903         src_page = pmd_page(pmd);
904         VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
905         get_page(src_page);
906         page_dup_rmap(src_page);
907         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
908
909         pmdp_set_wrprotect(src_mm, addr, src_pmd);
910         pmd = pmd_mkold(pmd_wrprotect(pmd));
911         pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
912         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
913         atomic_long_inc(&dst_mm->nr_ptes);
914
915         ret = 0;
916 out_unlock:
917         spin_unlock(src_ptl);
918         spin_unlock(dst_ptl);
919 out:
920         return ret;
921 }
922
923 void huge_pmd_set_accessed(struct mm_struct *mm,
924                            struct vm_area_struct *vma,
925                            unsigned long address,
926                            pmd_t *pmd, pmd_t orig_pmd,
927                            int dirty)
928 {
929         spinlock_t *ptl;
930         pmd_t entry;
931         unsigned long haddr;
932
933         ptl = pmd_lock(mm, pmd);
934         if (unlikely(!pmd_same(*pmd, orig_pmd)))
935                 goto unlock;
936
937         entry = pmd_mkyoung(orig_pmd);
938         haddr = address & HPAGE_PMD_MASK;
939         if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
940                 update_mmu_cache_pmd(vma, address, pmd);
941
942 unlock:
943         spin_unlock(ptl);
944 }
945
946 /*
947  * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
948  * during copy_user_huge_page()'s copy_page_rep(): in the case when
949  * the source page gets split and a tail freed before copy completes.
950  * Called under pmd_lock of checked pmd, so safe from splitting itself.
951  */
952 static void get_user_huge_page(struct page *page)
953 {
954         if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
955                 struct page *endpage = page + HPAGE_PMD_NR;
956
957                 atomic_add(HPAGE_PMD_NR, &page->_count);
958                 while (++page < endpage)
959                         get_huge_page_tail(page);
960         } else {
961                 get_page(page);
962         }
963 }
964
965 static void put_user_huge_page(struct page *page)
966 {
967         if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
968                 struct page *endpage = page + HPAGE_PMD_NR;
969
970                 while (page < endpage)
971                         put_page(page++);
972         } else {
973                 put_page(page);
974         }
975 }
976
977 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
978                                         struct vm_area_struct *vma,
979                                         unsigned long address,
980                                         pmd_t *pmd, pmd_t orig_pmd,
981                                         struct page *page,
982                                         unsigned long haddr)
983 {
984         struct mem_cgroup *memcg;
985         spinlock_t *ptl;
986         pgtable_t pgtable;
987         pmd_t _pmd;
988         int ret = 0, i;
989         struct page **pages;
990         unsigned long mmun_start;       /* For mmu_notifiers */
991         unsigned long mmun_end;         /* For mmu_notifiers */
992
993         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
994                         GFP_KERNEL);
995         if (unlikely(!pages)) {
996                 ret |= VM_FAULT_OOM;
997                 goto out;
998         }
999
1000         for (i = 0; i < HPAGE_PMD_NR; i++) {
1001                 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1002                                                __GFP_OTHER_NODE,
1003                                                vma, address, page_to_nid(page));
1004                 if (unlikely(!pages[i] ||
1005                              mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1006                                                    &memcg))) {
1007                         if (pages[i])
1008                                 put_page(pages[i]);
1009                         while (--i >= 0) {
1010                                 memcg = (void *)page_private(pages[i]);
1011                                 set_page_private(pages[i], 0);
1012                                 mem_cgroup_cancel_charge(pages[i], memcg);
1013                                 put_page(pages[i]);
1014                         }
1015                         kfree(pages);
1016                         ret |= VM_FAULT_OOM;
1017                         goto out;
1018                 }
1019                 set_page_private(pages[i], (unsigned long)memcg);
1020         }
1021
1022         for (i = 0; i < HPAGE_PMD_NR; i++) {
1023                 copy_user_highpage(pages[i], page + i,
1024                                    haddr + PAGE_SIZE * i, vma);
1025                 __SetPageUptodate(pages[i]);
1026                 cond_resched();
1027         }
1028
1029         mmun_start = haddr;
1030         mmun_end   = haddr + HPAGE_PMD_SIZE;
1031         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1032
1033         ptl = pmd_lock(mm, pmd);
1034         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1035                 goto out_free_pages;
1036         VM_BUG_ON_PAGE(!PageHead(page), page);
1037
1038         pmdp_clear_flush(vma, haddr, pmd);
1039         /* leave pmd empty until pte is filled */
1040
1041         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1042         pmd_populate(mm, &_pmd, pgtable);
1043
1044         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1045                 pte_t *pte, entry;
1046                 entry = mk_pte(pages[i], vma->vm_page_prot);
1047                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1048                 memcg = (void *)page_private(pages[i]);
1049                 set_page_private(pages[i], 0);
1050                 page_add_new_anon_rmap(pages[i], vma, haddr);
1051                 mem_cgroup_commit_charge(pages[i], memcg, false);
1052                 lru_cache_add_active_or_unevictable(pages[i], vma);
1053                 pte = pte_offset_map(&_pmd, haddr);
1054                 VM_BUG_ON(!pte_none(*pte));
1055                 set_pte_at(mm, haddr, pte, entry);
1056                 pte_unmap(pte);
1057         }
1058         kfree(pages);
1059
1060         smp_wmb(); /* make pte visible before pmd */
1061         pmd_populate(mm, pmd, pgtable);
1062         page_remove_rmap(page);
1063         spin_unlock(ptl);
1064
1065         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1066
1067         ret |= VM_FAULT_WRITE;
1068         put_page(page);
1069
1070 out:
1071         return ret;
1072
1073 out_free_pages:
1074         spin_unlock(ptl);
1075         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1076         for (i = 0; i < HPAGE_PMD_NR; i++) {
1077                 memcg = (void *)page_private(pages[i]);
1078                 set_page_private(pages[i], 0);
1079                 mem_cgroup_cancel_charge(pages[i], memcg);
1080                 put_page(pages[i]);
1081         }
1082         kfree(pages);
1083         goto out;
1084 }
1085
1086 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1087                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1088 {
1089         spinlock_t *ptl;
1090         int ret = 0;
1091         struct page *page = NULL, *new_page;
1092         struct mem_cgroup *memcg;
1093         unsigned long haddr;
1094         unsigned long mmun_start;       /* For mmu_notifiers */
1095         unsigned long mmun_end;         /* For mmu_notifiers */
1096
1097         ptl = pmd_lockptr(mm, pmd);
1098         VM_BUG_ON_VMA(!vma->anon_vma, vma);
1099         haddr = address & HPAGE_PMD_MASK;
1100         if (is_huge_zero_pmd(orig_pmd))
1101                 goto alloc;
1102         spin_lock(ptl);
1103         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1104                 goto out_unlock;
1105
1106         page = pmd_page(orig_pmd);
1107         VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1108         if (page_mapcount(page) == 1) {
1109                 pmd_t entry;
1110                 entry = pmd_mkyoung(orig_pmd);
1111                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1112                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
1113                         update_mmu_cache_pmd(vma, address, pmd);
1114                 ret |= VM_FAULT_WRITE;
1115                 goto out_unlock;
1116         }
1117         get_user_huge_page(page);
1118         spin_unlock(ptl);
1119 alloc:
1120         if (transparent_hugepage_enabled(vma) &&
1121             !transparent_hugepage_debug_cow())
1122                 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
1123                                               vma, haddr, numa_node_id(), 0);
1124         else
1125                 new_page = NULL;
1126
1127         if (unlikely(!new_page)) {
1128                 if (!page) {
1129                         split_huge_page_pmd(vma, address, pmd);
1130                         ret |= VM_FAULT_FALLBACK;
1131                 } else {
1132                         ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1133                                         pmd, orig_pmd, page, haddr);
1134                         if (ret & VM_FAULT_OOM) {
1135                                 split_huge_page(page);
1136                                 ret |= VM_FAULT_FALLBACK;
1137                         }
1138                         put_user_huge_page(page);
1139                 }
1140                 count_vm_event(THP_FAULT_FALLBACK);
1141                 goto out;
1142         }
1143
1144         if (unlikely(mem_cgroup_try_charge(new_page, mm,
1145                                            GFP_TRANSHUGE, &memcg))) {
1146                 put_page(new_page);
1147                 if (page) {
1148                         split_huge_page(page);
1149                         put_user_huge_page(page);
1150                 } else
1151                         split_huge_page_pmd(vma, address, pmd);
1152                 ret |= VM_FAULT_FALLBACK;
1153                 count_vm_event(THP_FAULT_FALLBACK);
1154                 goto out;
1155         }
1156
1157         count_vm_event(THP_FAULT_ALLOC);
1158
1159         if (!page)
1160                 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1161         else
1162                 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1163         __SetPageUptodate(new_page);
1164
1165         mmun_start = haddr;
1166         mmun_end   = haddr + HPAGE_PMD_SIZE;
1167         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1168
1169         spin_lock(ptl);
1170         if (page)
1171                 put_user_huge_page(page);
1172         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1173                 spin_unlock(ptl);
1174                 mem_cgroup_cancel_charge(new_page, memcg);
1175                 put_page(new_page);
1176                 goto out_mn;
1177         } else {
1178                 pmd_t entry;
1179                 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1180                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1181                 pmdp_clear_flush(vma, haddr, pmd);
1182                 page_add_new_anon_rmap(new_page, vma, haddr);
1183                 mem_cgroup_commit_charge(new_page, memcg, false);
1184                 lru_cache_add_active_or_unevictable(new_page, vma);
1185                 set_pmd_at(mm, haddr, pmd, entry);
1186                 update_mmu_cache_pmd(vma, address, pmd);
1187                 if (!page) {
1188                         add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1189                         put_huge_zero_page();
1190                 } else {
1191                         VM_BUG_ON_PAGE(!PageHead(page), page);
1192                         page_remove_rmap(page);
1193                         put_page(page);
1194                 }
1195                 ret |= VM_FAULT_WRITE;
1196         }
1197         spin_unlock(ptl);
1198 out_mn:
1199         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1200 out:
1201         return ret;
1202 out_unlock:
1203         spin_unlock(ptl);
1204         return ret;
1205 }
1206
1207 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1208                                    unsigned long addr,
1209                                    pmd_t *pmd,
1210                                    unsigned int flags)
1211 {
1212         struct mm_struct *mm = vma->vm_mm;
1213         struct page *page = NULL;
1214
1215         assert_spin_locked(pmd_lockptr(mm, pmd));
1216
1217         if (flags & FOLL_WRITE && !pmd_write(*pmd))
1218                 goto out;
1219
1220         /* Avoid dumping huge zero page */
1221         if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1222                 return ERR_PTR(-EFAULT);
1223
1224         /* Full NUMA hinting faults to serialise migration in fault paths */
1225         if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
1226                 goto out;
1227
1228         page = pmd_page(*pmd);
1229         VM_BUG_ON_PAGE(!PageHead(page), page);
1230         if (flags & FOLL_TOUCH) {
1231                 pmd_t _pmd;
1232                 /*
1233                  * We should set the dirty bit only for FOLL_WRITE but
1234                  * for now the dirty bit in the pmd is meaningless.
1235                  * And if the dirty bit will become meaningful and
1236                  * we'll only set it with FOLL_WRITE, an atomic
1237                  * set_bit will be required on the pmd to set the
1238                  * young bit, instead of the current set_pmd_at.
1239                  */
1240                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1241                 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1242                                           pmd, _pmd,  1))
1243                         update_mmu_cache_pmd(vma, addr, pmd);
1244         }
1245         if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1246                 if (page->mapping && trylock_page(page)) {
1247                         lru_add_drain();
1248                         if (page->mapping)
1249                                 mlock_vma_page(page);
1250                         unlock_page(page);
1251                 }
1252         }
1253         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1254         VM_BUG_ON_PAGE(!PageCompound(page), page);
1255         if (flags & FOLL_GET)
1256                 get_page_foll(page);
1257
1258 out:
1259         return page;
1260 }
1261
1262 /* NUMA hinting page fault entry point for trans huge pmds */
1263 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1264                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1265 {
1266         spinlock_t *ptl;
1267         struct anon_vma *anon_vma = NULL;
1268         struct page *page;
1269         unsigned long haddr = addr & HPAGE_PMD_MASK;
1270         int page_nid = -1, this_nid = numa_node_id();
1271         int target_nid, last_cpupid = -1;
1272         bool page_locked;
1273         bool migrated = false;
1274         int flags = 0;
1275
1276         ptl = pmd_lock(mm, pmdp);
1277         if (unlikely(!pmd_same(pmd, *pmdp)))
1278                 goto out_unlock;
1279
1280         /*
1281          * If there are potential migrations, wait for completion and retry
1282          * without disrupting NUMA hinting information. Do not relock and
1283          * check_same as the page may no longer be mapped.
1284          */
1285         if (unlikely(pmd_trans_migrating(*pmdp))) {
1286                 spin_unlock(ptl);
1287                 wait_migrate_huge_page(vma->anon_vma, pmdp);
1288                 goto out;
1289         }
1290
1291         page = pmd_page(pmd);
1292         BUG_ON(is_huge_zero_page(page));
1293         page_nid = page_to_nid(page);
1294         last_cpupid = page_cpupid_last(page);
1295         count_vm_numa_event(NUMA_HINT_FAULTS);
1296         if (page_nid == this_nid) {
1297                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1298                 flags |= TNF_FAULT_LOCAL;
1299         }
1300
1301         /*
1302          * Avoid grouping on DSO/COW pages in specific and RO pages
1303          * in general, RO pages shouldn't hurt as much anyway since
1304          * they can be in shared cache state.
1305          */
1306         if (!pmd_write(pmd))
1307                 flags |= TNF_NO_GROUP;
1308
1309         /*
1310          * Acquire the page lock to serialise THP migrations but avoid dropping
1311          * page_table_lock if at all possible
1312          */
1313         page_locked = trylock_page(page);
1314         target_nid = mpol_misplaced(page, vma, haddr);
1315         if (target_nid == -1) {
1316                 /* If the page was locked, there are no parallel migrations */
1317                 if (page_locked)
1318                         goto clear_pmdnuma;
1319         }
1320
1321         /* Migration could have started since the pmd_trans_migrating check */
1322         if (!page_locked) {
1323                 spin_unlock(ptl);
1324                 wait_on_page_locked(page);
1325                 page_nid = -1;
1326                 goto out;
1327         }
1328
1329         /*
1330          * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1331          * to serialises splits
1332          */
1333         get_page(page);
1334         spin_unlock(ptl);
1335         anon_vma = page_lock_anon_vma_read(page);
1336
1337         /* Confirm the PMD did not change while page_table_lock was released */
1338         spin_lock(ptl);
1339         if (unlikely(!pmd_same(pmd, *pmdp))) {
1340                 unlock_page(page);
1341                 put_page(page);
1342                 page_nid = -1;
1343                 goto out_unlock;
1344         }
1345
1346         /* Bail if we fail to protect against THP splits for any reason */
1347         if (unlikely(!anon_vma)) {
1348                 put_page(page);
1349                 page_nid = -1;
1350                 goto clear_pmdnuma;
1351         }
1352
1353         /*
1354          * Migrate the THP to the requested node, returns with page unlocked
1355          * and pmd_numa cleared.
1356          */
1357         spin_unlock(ptl);
1358         migrated = migrate_misplaced_transhuge_page(mm, vma,
1359                                 pmdp, pmd, addr, page, target_nid);
1360         if (migrated) {
1361                 flags |= TNF_MIGRATED;
1362                 page_nid = target_nid;
1363         }
1364
1365         goto out;
1366 clear_pmdnuma:
1367         BUG_ON(!PageLocked(page));
1368         pmd = pmd_mknonnuma(pmd);
1369         set_pmd_at(mm, haddr, pmdp, pmd);
1370         VM_BUG_ON(pmd_numa(*pmdp));
1371         update_mmu_cache_pmd(vma, addr, pmdp);
1372         unlock_page(page);
1373 out_unlock:
1374         spin_unlock(ptl);
1375
1376 out:
1377         if (anon_vma)
1378                 page_unlock_anon_vma_read(anon_vma);
1379
1380         if (page_nid != -1)
1381                 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1382
1383         return 0;
1384 }
1385
1386 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1387                  pmd_t *pmd, unsigned long addr)
1388 {
1389         spinlock_t *ptl;
1390         int ret = 0;
1391
1392         if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1393                 struct page *page;
1394                 pgtable_t pgtable;
1395                 pmd_t orig_pmd;
1396                 /*
1397                  * For architectures like ppc64 we look at deposited pgtable
1398                  * when calling pmdp_get_and_clear. So do the
1399                  * pgtable_trans_huge_withdraw after finishing pmdp related
1400                  * operations.
1401                  */
1402                 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
1403                 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1404                 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1405                 if (is_huge_zero_pmd(orig_pmd)) {
1406                         atomic_long_dec(&tlb->mm->nr_ptes);
1407                         spin_unlock(ptl);
1408                         put_huge_zero_page();
1409                 } else {
1410                         page = pmd_page(orig_pmd);
1411                         page_remove_rmap(page);
1412                         VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1413                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1414                         VM_BUG_ON_PAGE(!PageHead(page), page);
1415                         atomic_long_dec(&tlb->mm->nr_ptes);
1416                         spin_unlock(ptl);
1417                         tlb_remove_page(tlb, page);
1418                 }
1419                 pte_free(tlb->mm, pgtable);
1420                 ret = 1;
1421         }
1422         return ret;
1423 }
1424
1425 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1426                 unsigned long addr, unsigned long end,
1427                 unsigned char *vec)
1428 {
1429         spinlock_t *ptl;
1430         int ret = 0;
1431
1432         if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1433                 /*
1434                  * All logical pages in the range are present
1435                  * if backed by a huge page.
1436                  */
1437                 spin_unlock(ptl);
1438                 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1439                 ret = 1;
1440         }
1441
1442         return ret;
1443 }
1444
1445 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1446                   unsigned long old_addr,
1447                   unsigned long new_addr, unsigned long old_end,
1448                   pmd_t *old_pmd, pmd_t *new_pmd)
1449 {
1450         spinlock_t *old_ptl, *new_ptl;
1451         int ret = 0;
1452         pmd_t pmd;
1453
1454         struct mm_struct *mm = vma->vm_mm;
1455
1456         if ((old_addr & ~HPAGE_PMD_MASK) ||
1457             (new_addr & ~HPAGE_PMD_MASK) ||
1458             old_end - old_addr < HPAGE_PMD_SIZE ||
1459             (new_vma->vm_flags & VM_NOHUGEPAGE))
1460                 goto out;
1461
1462         /*
1463          * The destination pmd shouldn't be established, free_pgtables()
1464          * should have release it.
1465          */
1466         if (WARN_ON(!pmd_none(*new_pmd))) {
1467                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1468                 goto out;
1469         }
1470
1471         /*
1472          * We don't have to worry about the ordering of src and dst
1473          * ptlocks because exclusive mmap_sem prevents deadlock.
1474          */
1475         ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
1476         if (ret == 1) {
1477                 new_ptl = pmd_lockptr(mm, new_pmd);
1478                 if (new_ptl != old_ptl)
1479                         spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1480                 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1481                 VM_BUG_ON(!pmd_none(*new_pmd));
1482
1483                 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1484                         pgtable_t pgtable;
1485                         pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1486                         pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1487                 }
1488                 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1489                 if (new_ptl != old_ptl)
1490                         spin_unlock(new_ptl);
1491                 spin_unlock(old_ptl);
1492         }
1493 out:
1494         return ret;
1495 }
1496
1497 /*
1498  * Returns
1499  *  - 0 if PMD could not be locked
1500  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1501  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1502  */
1503 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1504                 unsigned long addr, pgprot_t newprot, int prot_numa)
1505 {
1506         struct mm_struct *mm = vma->vm_mm;
1507         spinlock_t *ptl;
1508         int ret = 0;
1509
1510         if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1511                 pmd_t entry;
1512                 ret = 1;
1513                 if (!prot_numa) {
1514                         entry = pmdp_get_and_clear(mm, addr, pmd);
1515                         if (pmd_numa(entry))
1516                                 entry = pmd_mknonnuma(entry);
1517                         entry = pmd_modify(entry, newprot);
1518                         ret = HPAGE_PMD_NR;
1519                         set_pmd_at(mm, addr, pmd, entry);
1520                         BUG_ON(pmd_write(entry));
1521                 } else {
1522                         struct page *page = pmd_page(*pmd);
1523
1524                         /*
1525                          * Do not trap faults against the zero page. The
1526                          * read-only data is likely to be read-cached on the
1527                          * local CPU cache and it is less useful to know about
1528                          * local vs remote hits on the zero page.
1529                          */
1530                         if (!is_huge_zero_page(page) &&
1531                             !pmd_numa(*pmd)) {
1532                                 pmdp_set_numa(mm, addr, pmd);
1533                                 ret = HPAGE_PMD_NR;
1534                         }
1535                 }
1536                 spin_unlock(ptl);
1537         }
1538
1539         return ret;
1540 }
1541
1542 /*
1543  * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1544  * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1545  *
1546  * Note that if it returns 1, this routine returns without unlocking page
1547  * table locks. So callers must unlock them.
1548  */
1549 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1550                 spinlock_t **ptl)
1551 {
1552         *ptl = pmd_lock(vma->vm_mm, pmd);
1553         if (likely(pmd_trans_huge(*pmd))) {
1554                 if (unlikely(pmd_trans_splitting(*pmd))) {
1555                         spin_unlock(*ptl);
1556                         wait_split_huge_page(vma->anon_vma, pmd);
1557                         return -1;
1558                 } else {
1559                         /* Thp mapped by 'pmd' is stable, so we can
1560                          * handle it as it is. */
1561                         return 1;
1562                 }
1563         }
1564         spin_unlock(*ptl);
1565         return 0;
1566 }
1567
1568 /*
1569  * This function returns whether a given @page is mapped onto the @address
1570  * in the virtual space of @mm.
1571  *
1572  * When it's true, this function returns *pmd with holding the page table lock
1573  * and passing it back to the caller via @ptl.
1574  * If it's false, returns NULL without holding the page table lock.
1575  */
1576 pmd_t *page_check_address_pmd(struct page *page,
1577                               struct mm_struct *mm,
1578                               unsigned long address,
1579                               enum page_check_address_pmd_flag flag,
1580                               spinlock_t **ptl)
1581 {
1582         pgd_t *pgd;
1583         pud_t *pud;
1584         pmd_t *pmd;
1585
1586         if (address & ~HPAGE_PMD_MASK)
1587                 return NULL;
1588
1589         pgd = pgd_offset(mm, address);
1590         if (!pgd_present(*pgd))
1591                 return NULL;
1592         pud = pud_offset(pgd, address);
1593         if (!pud_present(*pud))
1594                 return NULL;
1595         pmd = pmd_offset(pud, address);
1596
1597         *ptl = pmd_lock(mm, pmd);
1598         if (!pmd_present(*pmd))
1599                 goto unlock;
1600         if (pmd_page(*pmd) != page)
1601                 goto unlock;
1602         /*
1603          * split_vma() may create temporary aliased mappings. There is
1604          * no risk as long as all huge pmd are found and have their
1605          * splitting bit set before __split_huge_page_refcount
1606          * runs. Finding the same huge pmd more than once during the
1607          * same rmap walk is not a problem.
1608          */
1609         if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1610             pmd_trans_splitting(*pmd))
1611                 goto unlock;
1612         if (pmd_trans_huge(*pmd)) {
1613                 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1614                           !pmd_trans_splitting(*pmd));
1615                 return pmd;
1616         }
1617 unlock:
1618         spin_unlock(*ptl);
1619         return NULL;
1620 }
1621
1622 static int __split_huge_page_splitting(struct page *page,
1623                                        struct vm_area_struct *vma,
1624                                        unsigned long address)
1625 {
1626         struct mm_struct *mm = vma->vm_mm;
1627         spinlock_t *ptl;
1628         pmd_t *pmd;
1629         int ret = 0;
1630         /* For mmu_notifiers */
1631         const unsigned long mmun_start = address;
1632         const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
1633
1634         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1635         pmd = page_check_address_pmd(page, mm, address,
1636                         PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
1637         if (pmd) {
1638                 /*
1639                  * We can't temporarily set the pmd to null in order
1640                  * to split it, the pmd must remain marked huge at all
1641                  * times or the VM won't take the pmd_trans_huge paths
1642                  * and it won't wait on the anon_vma->root->rwsem to
1643                  * serialize against split_huge_page*.
1644                  */
1645                 pmdp_splitting_flush(vma, address, pmd);
1646                 ret = 1;
1647                 spin_unlock(ptl);
1648         }
1649         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1650
1651         return ret;
1652 }
1653
1654 static void __split_huge_page_refcount(struct page *page,
1655                                        struct list_head *list)
1656 {
1657         int i;
1658         struct zone *zone = page_zone(page);
1659         struct lruvec *lruvec;
1660         int tail_count = 0;
1661
1662         /* prevent PageLRU to go away from under us, and freeze lru stats */
1663         spin_lock_irq(&zone->lru_lock);
1664         lruvec = mem_cgroup_page_lruvec(page, zone);
1665
1666         compound_lock(page);
1667         /* complete memcg works before add pages to LRU */
1668         mem_cgroup_split_huge_fixup(page);
1669
1670         for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1671                 struct page *page_tail = page + i;
1672
1673                 /* tail_page->_mapcount cannot change */
1674                 BUG_ON(page_mapcount(page_tail) < 0);
1675                 tail_count += page_mapcount(page_tail);
1676                 /* check for overflow */
1677                 BUG_ON(tail_count < 0);
1678                 BUG_ON(atomic_read(&page_tail->_count) != 0);
1679                 /*
1680                  * tail_page->_count is zero and not changing from
1681                  * under us. But get_page_unless_zero() may be running
1682                  * from under us on the tail_page. If we used
1683                  * atomic_set() below instead of atomic_add(), we
1684                  * would then run atomic_set() concurrently with
1685                  * get_page_unless_zero(), and atomic_set() is
1686                  * implemented in C not using locked ops. spin_unlock
1687                  * on x86 sometime uses locked ops because of PPro
1688                  * errata 66, 92, so unless somebody can guarantee
1689                  * atomic_set() here would be safe on all archs (and
1690                  * not only on x86), it's safer to use atomic_add().
1691                  */
1692                 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1693                            &page_tail->_count);
1694
1695                 /* after clearing PageTail the gup refcount can be released */
1696                 smp_mb__after_atomic();
1697
1698                 /*
1699                  * retain hwpoison flag of the poisoned tail page:
1700                  *   fix for the unsuitable process killed on Guest Machine(KVM)
1701                  *   by the memory-failure.
1702                  */
1703                 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1704                 page_tail->flags |= (page->flags &
1705                                      ((1L << PG_referenced) |
1706                                       (1L << PG_swapbacked) |
1707                                       (1L << PG_mlocked) |
1708                                       (1L << PG_uptodate) |
1709                                       (1L << PG_active) |
1710                                       (1L << PG_unevictable)));
1711                 page_tail->flags |= (1L << PG_dirty);
1712
1713                 /* clear PageTail before overwriting first_page */
1714                 smp_wmb();
1715
1716                 /*
1717                  * __split_huge_page_splitting() already set the
1718                  * splitting bit in all pmd that could map this
1719                  * hugepage, that will ensure no CPU can alter the
1720                  * mapcount on the head page. The mapcount is only
1721                  * accounted in the head page and it has to be
1722                  * transferred to all tail pages in the below code. So
1723                  * for this code to be safe, the split the mapcount
1724                  * can't change. But that doesn't mean userland can't
1725                  * keep changing and reading the page contents while
1726                  * we transfer the mapcount, so the pmd splitting
1727                  * status is achieved setting a reserved bit in the
1728                  * pmd, not by clearing the present bit.
1729                 */
1730                 page_tail->_mapcount = page->_mapcount;
1731
1732                 BUG_ON(page_tail->mapping);
1733                 page_tail->mapping = page->mapping;
1734
1735                 page_tail->index = page->index + i;
1736                 page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
1737
1738                 BUG_ON(!PageAnon(page_tail));
1739                 BUG_ON(!PageUptodate(page_tail));
1740                 BUG_ON(!PageDirty(page_tail));
1741                 BUG_ON(!PageSwapBacked(page_tail));
1742
1743                 lru_add_page_tail(page, page_tail, lruvec, list);
1744         }
1745         atomic_sub(tail_count, &page->_count);
1746         BUG_ON(atomic_read(&page->_count) <= 0);
1747
1748         __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1749
1750         ClearPageCompound(page);
1751         compound_unlock(page);
1752         spin_unlock_irq(&zone->lru_lock);
1753
1754         for (i = 1; i < HPAGE_PMD_NR; i++) {
1755                 struct page *page_tail = page + i;
1756                 BUG_ON(page_count(page_tail) <= 0);
1757                 /*
1758                  * Tail pages may be freed if there wasn't any mapping
1759                  * like if add_to_swap() is running on a lru page that
1760                  * had its mapping zapped. And freeing these pages
1761                  * requires taking the lru_lock so we do the put_page
1762                  * of the tail pages after the split is complete.
1763                  */
1764                 put_page(page_tail);
1765         }
1766
1767         /*
1768          * Only the head page (now become a regular page) is required
1769          * to be pinned by the caller.
1770          */
1771         BUG_ON(page_count(page) <= 0);
1772 }
1773
1774 static int __split_huge_page_map(struct page *page,
1775                                  struct vm_area_struct *vma,
1776                                  unsigned long address)
1777 {
1778         struct mm_struct *mm = vma->vm_mm;
1779         spinlock_t *ptl;
1780         pmd_t *pmd, _pmd;
1781         int ret = 0, i;
1782         pgtable_t pgtable;
1783         unsigned long haddr;
1784
1785         pmd = page_check_address_pmd(page, mm, address,
1786                         PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
1787         if (pmd) {
1788                 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1789                 pmd_populate(mm, &_pmd, pgtable);
1790                 if (pmd_write(*pmd))
1791                         BUG_ON(page_mapcount(page) != 1);
1792
1793                 haddr = address;
1794                 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1795                         pte_t *pte, entry;
1796                         BUG_ON(PageCompound(page+i));
1797                         /*
1798                          * Note that pmd_numa is not transferred deliberately
1799                          * to avoid any possibility that pte_numa leaks to
1800                          * a PROT_NONE VMA by accident.
1801                          */
1802                         entry = mk_pte(page + i, vma->vm_page_prot);
1803                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1804                         if (!pmd_write(*pmd))
1805                                 entry = pte_wrprotect(entry);
1806                         if (!pmd_young(*pmd))
1807                                 entry = pte_mkold(entry);
1808                         pte = pte_offset_map(&_pmd, haddr);
1809                         BUG_ON(!pte_none(*pte));
1810                         set_pte_at(mm, haddr, pte, entry);
1811                         pte_unmap(pte);
1812                 }
1813
1814                 smp_wmb(); /* make pte visible before pmd */
1815                 /*
1816                  * Up to this point the pmd is present and huge and
1817                  * userland has the whole access to the hugepage
1818                  * during the split (which happens in place). If we
1819                  * overwrite the pmd with the not-huge version
1820                  * pointing to the pte here (which of course we could
1821                  * if all CPUs were bug free), userland could trigger
1822                  * a small page size TLB miss on the small sized TLB
1823                  * while the hugepage TLB entry is still established
1824                  * in the huge TLB. Some CPU doesn't like that. See
1825                  * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1826                  * Erratum 383 on page 93. Intel should be safe but is
1827                  * also warns that it's only safe if the permission
1828                  * and cache attributes of the two entries loaded in
1829                  * the two TLB is identical (which should be the case
1830                  * here). But it is generally safer to never allow
1831                  * small and huge TLB entries for the same virtual
1832                  * address to be loaded simultaneously. So instead of
1833                  * doing "pmd_populate(); flush_tlb_range();" we first
1834                  * mark the current pmd notpresent (atomically because
1835                  * here the pmd_trans_huge and pmd_trans_splitting
1836                  * must remain set at all times on the pmd until the
1837                  * split is complete for this pmd), then we flush the
1838                  * SMP TLB and finally we write the non-huge version
1839                  * of the pmd entry with pmd_populate.
1840                  */
1841                 pmdp_invalidate(vma, address, pmd);
1842                 pmd_populate(mm, pmd, pgtable);
1843                 ret = 1;
1844                 spin_unlock(ptl);
1845         }
1846
1847         return ret;
1848 }
1849
1850 /* must be called with anon_vma->root->rwsem held */
1851 static void __split_huge_page(struct page *page,
1852                               struct anon_vma *anon_vma,
1853                               struct list_head *list)
1854 {
1855         int mapcount, mapcount2;
1856         pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1857         struct anon_vma_chain *avc;
1858
1859         BUG_ON(!PageHead(page));
1860         BUG_ON(PageTail(page));
1861
1862         mapcount = 0;
1863         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1864                 struct vm_area_struct *vma = avc->vma;
1865                 unsigned long addr = vma_address(page, vma);
1866                 BUG_ON(is_vma_temporary_stack(vma));
1867                 mapcount += __split_huge_page_splitting(page, vma, addr);
1868         }
1869         /*
1870          * It is critical that new vmas are added to the tail of the
1871          * anon_vma list. This guarantes that if copy_huge_pmd() runs
1872          * and establishes a child pmd before
1873          * __split_huge_page_splitting() freezes the parent pmd (so if
1874          * we fail to prevent copy_huge_pmd() from running until the
1875          * whole __split_huge_page() is complete), we will still see
1876          * the newly established pmd of the child later during the
1877          * walk, to be able to set it as pmd_trans_splitting too.
1878          */
1879         if (mapcount != page_mapcount(page)) {
1880                 pr_err("mapcount %d page_mapcount %d\n",
1881                         mapcount, page_mapcount(page));
1882                 BUG();
1883         }
1884
1885         __split_huge_page_refcount(page, list);
1886
1887         mapcount2 = 0;
1888         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1889                 struct vm_area_struct *vma = avc->vma;
1890                 unsigned long addr = vma_address(page, vma);
1891                 BUG_ON(is_vma_temporary_stack(vma));
1892                 mapcount2 += __split_huge_page_map(page, vma, addr);
1893         }
1894         if (mapcount != mapcount2) {
1895                 pr_err("mapcount %d mapcount2 %d page_mapcount %d\n",
1896                         mapcount, mapcount2, page_mapcount(page));
1897                 BUG();
1898         }
1899 }
1900
1901 /*
1902  * Split a hugepage into normal pages. This doesn't change the position of head
1903  * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1904  * @list. Both head page and tail pages will inherit mapping, flags, and so on
1905  * from the hugepage.
1906  * Return 0 if the hugepage is split successfully otherwise return 1.
1907  */
1908 int split_huge_page_to_list(struct page *page, struct list_head *list)
1909 {
1910         struct anon_vma *anon_vma;
1911         int ret = 1;
1912
1913         BUG_ON(is_huge_zero_page(page));
1914         BUG_ON(!PageAnon(page));
1915
1916         /*
1917          * The caller does not necessarily hold an mmap_sem that would prevent
1918          * the anon_vma disappearing so we first we take a reference to it
1919          * and then lock the anon_vma for write. This is similar to
1920          * page_lock_anon_vma_read except the write lock is taken to serialise
1921          * against parallel split or collapse operations.
1922          */
1923         anon_vma = page_get_anon_vma(page);
1924         if (!anon_vma)
1925                 goto out;
1926         anon_vma_lock_write(anon_vma);
1927
1928         ret = 0;
1929         if (!PageCompound(page))
1930                 goto out_unlock;
1931
1932         BUG_ON(!PageSwapBacked(page));
1933         __split_huge_page(page, anon_vma, list);
1934         count_vm_event(THP_SPLIT);
1935
1936         BUG_ON(PageCompound(page));
1937 out_unlock:
1938         anon_vma_unlock_write(anon_vma);
1939         put_anon_vma(anon_vma);
1940 out:
1941         return ret;
1942 }
1943
1944 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
1945
1946 int hugepage_madvise(struct vm_area_struct *vma,
1947                      unsigned long *vm_flags, int advice)
1948 {
1949         switch (advice) {
1950         case MADV_HUGEPAGE:
1951 #ifdef CONFIG_S390
1952                 /*
1953                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1954                  * can't handle this properly after s390_enable_sie, so we simply
1955                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
1956                  */
1957                 if (mm_has_pgste(vma->vm_mm))
1958                         return 0;
1959 #endif
1960                 /*
1961                  * Be somewhat over-protective like KSM for now!
1962                  */
1963                 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1964                         return -EINVAL;
1965                 *vm_flags &= ~VM_NOHUGEPAGE;
1966                 *vm_flags |= VM_HUGEPAGE;
1967                 /*
1968                  * If the vma become good for khugepaged to scan,
1969                  * register it here without waiting a page fault that
1970                  * may not happen any time soon.
1971                  */
1972                 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1973                         return -ENOMEM;
1974                 break;
1975         case MADV_NOHUGEPAGE:
1976                 /*
1977                  * Be somewhat over-protective like KSM for now!
1978                  */
1979                 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1980                         return -EINVAL;
1981                 *vm_flags &= ~VM_HUGEPAGE;
1982                 *vm_flags |= VM_NOHUGEPAGE;
1983                 /*
1984                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1985                  * this vma even if we leave the mm registered in khugepaged if
1986                  * it got registered before VM_NOHUGEPAGE was set.
1987                  */
1988                 break;
1989         }
1990
1991         return 0;
1992 }
1993
1994 static int __init khugepaged_slab_init(void)
1995 {
1996         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1997                                           sizeof(struct mm_slot),
1998                                           __alignof__(struct mm_slot), 0, NULL);
1999         if (!mm_slot_cache)
2000                 return -ENOMEM;
2001
2002         return 0;
2003 }
2004
2005 static inline struct mm_slot *alloc_mm_slot(void)
2006 {
2007         if (!mm_slot_cache)     /* initialization failed */
2008                 return NULL;
2009         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
2010 }
2011
2012 static inline void free_mm_slot(struct mm_slot *mm_slot)
2013 {
2014         kmem_cache_free(mm_slot_cache, mm_slot);
2015 }
2016
2017 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
2018 {
2019         struct mm_slot *mm_slot;
2020
2021         hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
2022                 if (mm == mm_slot->mm)
2023                         return mm_slot;
2024
2025         return NULL;
2026 }
2027
2028 static void insert_to_mm_slots_hash(struct mm_struct *mm,
2029                                     struct mm_slot *mm_slot)
2030 {
2031         mm_slot->mm = mm;
2032         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
2033 }
2034
2035 static inline int khugepaged_test_exit(struct mm_struct *mm)
2036 {
2037         return atomic_read(&mm->mm_users) == 0;
2038 }
2039
2040 int __khugepaged_enter(struct mm_struct *mm)
2041 {
2042         struct mm_slot *mm_slot;
2043         int wakeup;
2044
2045         mm_slot = alloc_mm_slot();
2046         if (!mm_slot)
2047                 return -ENOMEM;
2048
2049         /* __khugepaged_exit() must not run from under us */
2050         VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
2051         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2052                 free_mm_slot(mm_slot);
2053                 return 0;
2054         }
2055
2056         spin_lock(&khugepaged_mm_lock);
2057         insert_to_mm_slots_hash(mm, mm_slot);
2058         /*
2059          * Insert just behind the scanning cursor, to let the area settle
2060          * down a little.
2061          */
2062         wakeup = list_empty(&khugepaged_scan.mm_head);
2063         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
2064         spin_unlock(&khugepaged_mm_lock);
2065
2066         atomic_inc(&mm->mm_count);
2067         if (wakeup)
2068                 wake_up_interruptible(&khugepaged_wait);
2069
2070         return 0;
2071 }
2072
2073 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
2074                                unsigned long vm_flags)
2075 {
2076         unsigned long hstart, hend;
2077         if (!vma->anon_vma)
2078                 /*
2079                  * Not yet faulted in so we will register later in the
2080                  * page fault if needed.
2081                  */
2082                 return 0;
2083         if (vma->vm_ops)
2084                 /* khugepaged not yet working on file or special mappings */
2085                 return 0;
2086         VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
2087         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2088         hend = vma->vm_end & HPAGE_PMD_MASK;
2089         if (hstart < hend)
2090                 return khugepaged_enter(vma, vm_flags);
2091         return 0;
2092 }
2093
2094 void __khugepaged_exit(struct mm_struct *mm)
2095 {
2096         struct mm_slot *mm_slot;
2097         int free = 0;
2098
2099         spin_lock(&khugepaged_mm_lock);
2100         mm_slot = get_mm_slot(mm);
2101         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
2102                 hash_del(&mm_slot->hash);
2103                 list_del(&mm_slot->mm_node);
2104                 free = 1;
2105         }
2106         spin_unlock(&khugepaged_mm_lock);
2107
2108         if (free) {
2109                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2110                 free_mm_slot(mm_slot);
2111                 mmdrop(mm);
2112         } else if (mm_slot) {
2113                 /*
2114                  * This is required to serialize against
2115                  * khugepaged_test_exit() (which is guaranteed to run
2116                  * under mmap sem read mode). Stop here (after we
2117                  * return all pagetables will be destroyed) until
2118                  * khugepaged has finished working on the pagetables
2119                  * under the mmap_sem.
2120                  */
2121                 down_write(&mm->mmap_sem);
2122                 up_write(&mm->mmap_sem);
2123         }
2124 }
2125
2126 static void release_pte_page(struct page *page)
2127 {
2128         /* 0 stands for page_is_file_cache(page) == false */
2129         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2130         unlock_page(page);
2131         putback_lru_page(page);
2132 }
2133
2134 static void release_pte_pages(pte_t *pte, pte_t *_pte)
2135 {
2136         while (--_pte >= pte) {
2137                 pte_t pteval = *_pte;
2138                 if (!pte_none(pteval))
2139                         release_pte_page(pte_page(pteval));
2140         }
2141 }
2142
2143 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2144                                         unsigned long address,
2145                                         pte_t *pte)
2146 {
2147         struct page *page;
2148         pte_t *_pte;
2149         int referenced = 0, none = 0;
2150         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2151              _pte++, address += PAGE_SIZE) {
2152                 pte_t pteval = *_pte;
2153                 if (pte_none(pteval)) {
2154                         if (++none <= khugepaged_max_ptes_none)
2155                                 continue;
2156                         else
2157                                 goto out;
2158                 }
2159                 if (!pte_present(pteval) || !pte_write(pteval))
2160                         goto out;
2161                 page = vm_normal_page(vma, address, pteval);
2162                 if (unlikely(!page))
2163                         goto out;
2164
2165                 VM_BUG_ON_PAGE(PageCompound(page), page);
2166                 VM_BUG_ON_PAGE(!PageAnon(page), page);
2167                 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2168
2169                 /* cannot use mapcount: can't collapse if there's a gup pin */
2170                 if (page_count(page) != 1)
2171                         goto out;
2172                 /*
2173                  * We can do it before isolate_lru_page because the
2174                  * page can't be freed from under us. NOTE: PG_lock
2175                  * is needed to serialize against split_huge_page
2176                  * when invoked from the VM.
2177                  */
2178                 if (!trylock_page(page))
2179                         goto out;
2180                 /*
2181                  * Isolate the page to avoid collapsing an hugepage
2182                  * currently in use by the VM.
2183                  */
2184                 if (isolate_lru_page(page)) {
2185                         unlock_page(page);
2186                         goto out;
2187                 }
2188                 /* 0 stands for page_is_file_cache(page) == false */
2189                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2190                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2191                 VM_BUG_ON_PAGE(PageLRU(page), page);
2192
2193                 /* If there is no mapped pte young don't collapse the page */
2194                 if (pte_young(pteval) || PageReferenced(page) ||
2195                     mmu_notifier_test_young(vma->vm_mm, address))
2196                         referenced = 1;
2197         }
2198         if (likely(referenced))
2199                 return 1;
2200 out:
2201         release_pte_pages(pte, _pte);
2202         return 0;
2203 }
2204
2205 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2206                                       struct vm_area_struct *vma,
2207                                       unsigned long address,
2208                                       spinlock_t *ptl)
2209 {
2210         pte_t *_pte;
2211         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2212                 pte_t pteval = *_pte;
2213                 struct page *src_page;
2214
2215                 if (pte_none(pteval)) {
2216                         clear_user_highpage(page, address);
2217                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2218                 } else {
2219                         src_page = pte_page(pteval);
2220                         copy_user_highpage(page, src_page, address, vma);
2221                         VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2222                         release_pte_page(src_page);
2223                         /*
2224                          * ptl mostly unnecessary, but preempt has to
2225                          * be disabled to update the per-cpu stats
2226                          * inside page_remove_rmap().
2227                          */
2228                         spin_lock(ptl);
2229                         /*
2230                          * paravirt calls inside pte_clear here are
2231                          * superfluous.
2232                          */
2233                         pte_clear(vma->vm_mm, address, _pte);
2234                         page_remove_rmap(src_page);
2235                         spin_unlock(ptl);
2236                         free_page_and_swap_cache(src_page);
2237                 }
2238
2239                 address += PAGE_SIZE;
2240                 page++;
2241         }
2242 }
2243
2244 static void khugepaged_alloc_sleep(void)
2245 {
2246         wait_event_freezable_timeout(khugepaged_wait, false,
2247                         msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2248 }
2249
2250 static int khugepaged_node_load[MAX_NUMNODES];
2251
2252 static bool khugepaged_scan_abort(int nid)
2253 {
2254         int i;
2255
2256         /*
2257          * If zone_reclaim_mode is disabled, then no extra effort is made to
2258          * allocate memory locally.
2259          */
2260         if (!zone_reclaim_mode)
2261                 return false;
2262
2263         /* If there is a count for this node already, it must be acceptable */
2264         if (khugepaged_node_load[nid])
2265                 return false;
2266
2267         for (i = 0; i < MAX_NUMNODES; i++) {
2268                 if (!khugepaged_node_load[i])
2269                         continue;
2270                 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2271                         return true;
2272         }
2273         return false;
2274 }
2275
2276 #ifdef CONFIG_NUMA
2277 static int khugepaged_find_target_node(void)
2278 {
2279         static int last_khugepaged_target_node = NUMA_NO_NODE;
2280         int nid, target_node = 0, max_value = 0;
2281
2282         /* find first node with max normal pages hit */
2283         for (nid = 0; nid < MAX_NUMNODES; nid++)
2284                 if (khugepaged_node_load[nid] > max_value) {
2285                         max_value = khugepaged_node_load[nid];
2286                         target_node = nid;
2287                 }
2288
2289         /* do some balance if several nodes have the same hit record */
2290         if (target_node <= last_khugepaged_target_node)
2291                 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2292                                 nid++)
2293                         if (max_value == khugepaged_node_load[nid]) {
2294                                 target_node = nid;
2295                                 break;
2296                         }
2297
2298         last_khugepaged_target_node = target_node;
2299         return target_node;
2300 }
2301
2302 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2303 {
2304         if (IS_ERR(*hpage)) {
2305                 if (!*wait)
2306                         return false;
2307
2308                 *wait = false;
2309                 *hpage = NULL;
2310                 khugepaged_alloc_sleep();
2311         } else if (*hpage) {
2312                 put_page(*hpage);
2313                 *hpage = NULL;
2314         }
2315
2316         return true;
2317 }
2318
2319 static struct page
2320 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2321                        struct vm_area_struct *vma, unsigned long address,
2322                        int node)
2323 {
2324         VM_BUG_ON_PAGE(*hpage, *hpage);
2325
2326         /*
2327          * Before allocating the hugepage, release the mmap_sem read lock.
2328          * The allocation can take potentially a long time if it involves
2329          * sync compaction, and we do not need to hold the mmap_sem during
2330          * that. We will recheck the vma after taking it again in write mode.
2331          */
2332         up_read(&mm->mmap_sem);
2333
2334         *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
2335                 khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
2336         if (unlikely(!*hpage)) {
2337                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2338                 *hpage = ERR_PTR(-ENOMEM);
2339                 return NULL;
2340         }
2341
2342         count_vm_event(THP_COLLAPSE_ALLOC);
2343         return *hpage;
2344 }
2345 #else
2346 static int khugepaged_find_target_node(void)
2347 {
2348         return 0;
2349 }
2350
2351 static inline struct page *alloc_hugepage(int defrag)
2352 {
2353         return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
2354                            HPAGE_PMD_ORDER);
2355 }
2356
2357 static struct page *khugepaged_alloc_hugepage(bool *wait)
2358 {
2359         struct page *hpage;
2360
2361         do {
2362                 hpage = alloc_hugepage(khugepaged_defrag());
2363                 if (!hpage) {
2364                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2365                         if (!*wait)
2366                                 return NULL;
2367
2368                         *wait = false;
2369                         khugepaged_alloc_sleep();
2370                 } else
2371                         count_vm_event(THP_COLLAPSE_ALLOC);
2372         } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2373
2374         return hpage;
2375 }
2376
2377 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2378 {
2379         if (!*hpage)
2380                 *hpage = khugepaged_alloc_hugepage(wait);
2381
2382         if (unlikely(!*hpage))
2383                 return false;
2384
2385         return true;
2386 }
2387
2388 static struct page
2389 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2390                        struct vm_area_struct *vma, unsigned long address,
2391                        int node)
2392 {
2393         up_read(&mm->mmap_sem);
2394         VM_BUG_ON(!*hpage);
2395         return  *hpage;
2396 }
2397 #endif
2398
2399 static bool hugepage_vma_check(struct vm_area_struct *vma)
2400 {
2401         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2402             (vma->vm_flags & VM_NOHUGEPAGE))
2403                 return false;
2404
2405         if (!vma->anon_vma || vma->vm_ops)
2406                 return false;
2407         if (is_vma_temporary_stack(vma))
2408                 return false;
2409         VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
2410         return true;
2411 }
2412
2413 static void collapse_huge_page(struct mm_struct *mm,
2414                                    unsigned long address,
2415                                    struct page **hpage,
2416                                    struct vm_area_struct *vma,
2417                                    int node)
2418 {
2419         pmd_t *pmd, _pmd;
2420         pte_t *pte;
2421         pgtable_t pgtable;
2422         struct page *new_page;
2423         spinlock_t *pmd_ptl, *pte_ptl;
2424         int isolated;
2425         unsigned long hstart, hend;
2426         struct mem_cgroup *memcg;
2427         unsigned long mmun_start;       /* For mmu_notifiers */
2428         unsigned long mmun_end;         /* For mmu_notifiers */
2429
2430         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2431
2432         /* release the mmap_sem read lock. */
2433         new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
2434         if (!new_page)
2435                 return;
2436
2437         if (unlikely(mem_cgroup_try_charge(new_page, mm,
2438                                            GFP_TRANSHUGE, &memcg)))
2439                 return;
2440
2441         /*
2442          * Prevent all access to pagetables with the exception of
2443          * gup_fast later hanlded by the ptep_clear_flush and the VM
2444          * handled by the anon_vma lock + PG_lock.
2445          */
2446         down_write(&mm->mmap_sem);
2447         if (unlikely(khugepaged_test_exit(mm)))
2448                 goto out;
2449
2450         vma = find_vma(mm, address);
2451         if (!vma)
2452                 goto out;
2453         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2454         hend = vma->vm_end & HPAGE_PMD_MASK;
2455         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
2456                 goto out;
2457         if (!hugepage_vma_check(vma))
2458                 goto out;
2459         pmd = mm_find_pmd(mm, address);
2460         if (!pmd)
2461                 goto out;
2462
2463         anon_vma_lock_write(vma->anon_vma);
2464
2465         pte = pte_offset_map(pmd, address);
2466         pte_ptl = pte_lockptr(mm, pmd);
2467
2468         mmun_start = address;
2469         mmun_end   = address + HPAGE_PMD_SIZE;
2470         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2471         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2472         /*
2473          * After this gup_fast can't run anymore. This also removes
2474          * any huge TLB entry from the CPU so we won't allow
2475          * huge and small TLB entries for the same virtual address
2476          * to avoid the risk of CPU bugs in that area.
2477          */
2478         _pmd = pmdp_clear_flush(vma, address, pmd);
2479         spin_unlock(pmd_ptl);
2480         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2481
2482         spin_lock(pte_ptl);
2483         isolated = __collapse_huge_page_isolate(vma, address, pte);
2484         spin_unlock(pte_ptl);
2485
2486         if (unlikely(!isolated)) {
2487                 pte_unmap(pte);
2488                 spin_lock(pmd_ptl);
2489                 BUG_ON(!pmd_none(*pmd));
2490                 /*
2491                  * We can only use set_pmd_at when establishing
2492                  * hugepmds and never for establishing regular pmds that
2493                  * points to regular pagetables. Use pmd_populate for that
2494                  */
2495                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2496                 spin_unlock(pmd_ptl);
2497                 anon_vma_unlock_write(vma->anon_vma);
2498                 goto out;
2499         }
2500
2501         /*
2502          * All pages are isolated and locked so anon_vma rmap
2503          * can't run anymore.
2504          */
2505         anon_vma_unlock_write(vma->anon_vma);
2506
2507         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2508         pte_unmap(pte);
2509         __SetPageUptodate(new_page);
2510         pgtable = pmd_pgtable(_pmd);
2511
2512         _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2513         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2514
2515         /*
2516          * spin_lock() below is not the equivalent of smp_wmb(), so
2517          * this is needed to avoid the copy_huge_page writes to become
2518          * visible after the set_pmd_at() write.
2519          */
2520         smp_wmb();
2521
2522         spin_lock(pmd_ptl);
2523         BUG_ON(!pmd_none(*pmd));
2524         page_add_new_anon_rmap(new_page, vma, address);
2525         mem_cgroup_commit_charge(new_page, memcg, false);
2526         lru_cache_add_active_or_unevictable(new_page, vma);
2527         pgtable_trans_huge_deposit(mm, pmd, pgtable);
2528         set_pmd_at(mm, address, pmd, _pmd);
2529         update_mmu_cache_pmd(vma, address, pmd);
2530         spin_unlock(pmd_ptl);
2531
2532         *hpage = NULL;
2533
2534         khugepaged_pages_collapsed++;
2535 out_up_write:
2536         up_write(&mm->mmap_sem);
2537         return;
2538
2539 out:
2540         mem_cgroup_cancel_charge(new_page, memcg);
2541         goto out_up_write;
2542 }
2543
2544 static int khugepaged_scan_pmd(struct mm_struct *mm,
2545                                struct vm_area_struct *vma,
2546                                unsigned long address,
2547                                struct page **hpage)
2548 {
2549         pmd_t *pmd;
2550         pte_t *pte, *_pte;
2551         int ret = 0, referenced = 0, none = 0;
2552         struct page *page;
2553         unsigned long _address;
2554         spinlock_t *ptl;
2555         int node = NUMA_NO_NODE;
2556
2557         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2558
2559         pmd = mm_find_pmd(mm, address);
2560         if (!pmd)
2561                 goto out;
2562
2563         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2564         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2565         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2566              _pte++, _address += PAGE_SIZE) {
2567                 pte_t pteval = *_pte;
2568                 if (pte_none(pteval)) {
2569                         if (++none <= khugepaged_max_ptes_none)
2570                                 continue;
2571                         else
2572                                 goto out_unmap;
2573                 }
2574                 if (!pte_present(pteval) || !pte_write(pteval))
2575                         goto out_unmap;
2576                 page = vm_normal_page(vma, _address, pteval);
2577                 if (unlikely(!page))
2578                         goto out_unmap;
2579                 /*
2580                  * Record which node the original page is from and save this
2581                  * information to khugepaged_node_load[].
2582                  * Khupaged will allocate hugepage from the node has the max
2583                  * hit record.
2584                  */
2585                 node = page_to_nid(page);
2586                 if (khugepaged_scan_abort(node))
2587                         goto out_unmap;
2588                 khugepaged_node_load[node]++;
2589                 VM_BUG_ON_PAGE(PageCompound(page), page);
2590                 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2591                         goto out_unmap;
2592                 /* cannot use mapcount: can't collapse if there's a gup pin */
2593                 if (page_count(page) != 1)
2594                         goto out_unmap;
2595                 if (pte_young(pteval) || PageReferenced(page) ||
2596                     mmu_notifier_test_young(vma->vm_mm, address))
2597                         referenced = 1;
2598         }
2599         if (referenced)
2600                 ret = 1;
2601 out_unmap:
2602         pte_unmap_unlock(pte, ptl);
2603         if (ret) {
2604                 node = khugepaged_find_target_node();
2605                 /* collapse_huge_page will return with the mmap_sem released */
2606                 collapse_huge_page(mm, address, hpage, vma, node);
2607         }
2608 out:
2609         return ret;
2610 }
2611
2612 static void collect_mm_slot(struct mm_slot *mm_slot)
2613 {
2614         struct mm_struct *mm = mm_slot->mm;
2615
2616         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2617
2618         if (khugepaged_test_exit(mm)) {
2619                 /* free mm_slot */
2620                 hash_del(&mm_slot->hash);
2621                 list_del(&mm_slot->mm_node);
2622
2623                 /*
2624                  * Not strictly needed because the mm exited already.
2625                  *
2626                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2627                  */
2628
2629                 /* khugepaged_mm_lock actually not necessary for the below */
2630                 free_mm_slot(mm_slot);
2631                 mmdrop(mm);
2632         }
2633 }
2634
2635 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2636                                             struct page **hpage)
2637         __releases(&khugepaged_mm_lock)
2638         __acquires(&khugepaged_mm_lock)
2639 {
2640         struct mm_slot *mm_slot;
2641         struct mm_struct *mm;
2642         struct vm_area_struct *vma;
2643         int progress = 0;
2644
2645         VM_BUG_ON(!pages);
2646         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2647
2648         if (khugepaged_scan.mm_slot)
2649                 mm_slot = khugepaged_scan.mm_slot;
2650         else {
2651                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2652                                      struct mm_slot, mm_node);
2653                 khugepaged_scan.address = 0;
2654                 khugepaged_scan.mm_slot = mm_slot;
2655         }
2656         spin_unlock(&khugepaged_mm_lock);
2657
2658         mm = mm_slot->mm;
2659         down_read(&mm->mmap_sem);
2660         if (unlikely(khugepaged_test_exit(mm)))
2661                 vma = NULL;
2662         else
2663                 vma = find_vma(mm, khugepaged_scan.address);
2664
2665         progress++;
2666         for (; vma; vma = vma->vm_next) {
2667                 unsigned long hstart, hend;
2668
2669                 cond_resched();
2670                 if (unlikely(khugepaged_test_exit(mm))) {
2671                         progress++;
2672                         break;
2673                 }
2674                 if (!hugepage_vma_check(vma)) {
2675 skip:
2676                         progress++;
2677                         continue;
2678                 }
2679                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2680                 hend = vma->vm_end & HPAGE_PMD_MASK;
2681                 if (hstart >= hend)
2682                         goto skip;
2683                 if (khugepaged_scan.address > hend)
2684                         goto skip;
2685                 if (khugepaged_scan.address < hstart)
2686                         khugepaged_scan.address = hstart;
2687                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2688
2689                 while (khugepaged_scan.address < hend) {
2690                         int ret;
2691                         cond_resched();
2692                         if (unlikely(khugepaged_test_exit(mm)))
2693                                 goto breakouterloop;
2694
2695                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2696                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2697                                   hend);
2698                         ret = khugepaged_scan_pmd(mm, vma,
2699                                                   khugepaged_scan.address,
2700                                                   hpage);
2701                         /* move to next address */
2702                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2703                         progress += HPAGE_PMD_NR;
2704                         if (ret)
2705                                 /* we released mmap_sem so break loop */
2706                                 goto breakouterloop_mmap_sem;
2707                         if (progress >= pages)
2708                                 goto breakouterloop;
2709                 }
2710         }
2711 breakouterloop:
2712         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2713 breakouterloop_mmap_sem:
2714
2715         spin_lock(&khugepaged_mm_lock);
2716         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2717         /*
2718          * Release the current mm_slot if this mm is about to die, or
2719          * if we scanned all vmas of this mm.
2720          */
2721         if (khugepaged_test_exit(mm) || !vma) {
2722                 /*
2723                  * Make sure that if mm_users is reaching zero while
2724                  * khugepaged runs here, khugepaged_exit will find
2725                  * mm_slot not pointing to the exiting mm.
2726                  */
2727                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2728                         khugepaged_scan.mm_slot = list_entry(
2729                                 mm_slot->mm_node.next,
2730                                 struct mm_slot, mm_node);
2731                         khugepaged_scan.address = 0;
2732                 } else {
2733                         khugepaged_scan.mm_slot = NULL;
2734                         khugepaged_full_scans++;
2735                 }
2736
2737                 collect_mm_slot(mm_slot);
2738         }
2739
2740         return progress;
2741 }
2742
2743 static int khugepaged_has_work(void)
2744 {
2745         return !list_empty(&khugepaged_scan.mm_head) &&
2746                 khugepaged_enabled();
2747 }
2748
2749 static int khugepaged_wait_event(void)
2750 {
2751         return !list_empty(&khugepaged_scan.mm_head) ||
2752                 kthread_should_stop();
2753 }
2754
2755 static void khugepaged_do_scan(void)
2756 {
2757         struct page *hpage = NULL;
2758         unsigned int progress = 0, pass_through_head = 0;
2759         unsigned int pages = khugepaged_pages_to_scan;
2760         bool wait = true;
2761
2762         barrier(); /* write khugepaged_pages_to_scan to local stack */
2763
2764         while (progress < pages) {
2765                 if (!khugepaged_prealloc_page(&hpage, &wait))
2766                         break;
2767
2768                 cond_resched();
2769
2770                 if (unlikely(kthread_should_stop() || freezing(current)))
2771                         break;
2772
2773                 spin_lock(&khugepaged_mm_lock);
2774                 if (!khugepaged_scan.mm_slot)
2775                         pass_through_head++;
2776                 if (khugepaged_has_work() &&
2777                     pass_through_head < 2)
2778                         progress += khugepaged_scan_mm_slot(pages - progress,
2779                                                             &hpage);
2780                 else
2781                         progress = pages;
2782                 spin_unlock(&khugepaged_mm_lock);
2783         }
2784
2785         if (!IS_ERR_OR_NULL(hpage))
2786                 put_page(hpage);
2787 }
2788
2789 static void khugepaged_wait_work(void)
2790 {
2791         try_to_freeze();
2792
2793         if (khugepaged_has_work()) {
2794                 if (!khugepaged_scan_sleep_millisecs)
2795                         return;
2796
2797                 wait_event_freezable_timeout(khugepaged_wait,
2798                                              kthread_should_stop(),
2799                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2800                 return;
2801         }
2802
2803         if (khugepaged_enabled())
2804                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2805 }
2806
2807 static int khugepaged(void *none)
2808 {
2809         struct mm_slot *mm_slot;
2810
2811         set_freezable();
2812         set_user_nice(current, MAX_NICE);
2813
2814         while (!kthread_should_stop()) {
2815                 khugepaged_do_scan();
2816                 khugepaged_wait_work();
2817         }
2818
2819         spin_lock(&khugepaged_mm_lock);
2820         mm_slot = khugepaged_scan.mm_slot;
2821         khugepaged_scan.mm_slot = NULL;
2822         if (mm_slot)
2823                 collect_mm_slot(mm_slot);
2824         spin_unlock(&khugepaged_mm_lock);
2825         return 0;
2826 }
2827
2828 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2829                 unsigned long haddr, pmd_t *pmd)
2830 {
2831         struct mm_struct *mm = vma->vm_mm;
2832         pgtable_t pgtable;
2833         pmd_t _pmd;
2834         int i;
2835
2836         pmdp_clear_flush(vma, haddr, pmd);
2837         /* leave pmd empty until pte is filled */
2838
2839         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2840         pmd_populate(mm, &_pmd, pgtable);
2841
2842         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2843                 pte_t *pte, entry;
2844                 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2845                 entry = pte_mkspecial(entry);
2846                 pte = pte_offset_map(&_pmd, haddr);
2847                 VM_BUG_ON(!pte_none(*pte));
2848                 set_pte_at(mm, haddr, pte, entry);
2849                 pte_unmap(pte);
2850         }
2851         smp_wmb(); /* make pte visible before pmd */
2852         pmd_populate(mm, pmd, pgtable);
2853         put_huge_zero_page();
2854 }
2855
2856 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2857                 pmd_t *pmd)
2858 {
2859         spinlock_t *ptl;
2860         struct page *page;
2861         struct mm_struct *mm = vma->vm_mm;
2862         unsigned long haddr = address & HPAGE_PMD_MASK;
2863         unsigned long mmun_start;       /* For mmu_notifiers */
2864         unsigned long mmun_end;         /* For mmu_notifiers */
2865
2866         BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
2867
2868         mmun_start = haddr;
2869         mmun_end   = haddr + HPAGE_PMD_SIZE;
2870 again:
2871         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2872         ptl = pmd_lock(mm, pmd);
2873         if (unlikely(!pmd_trans_huge(*pmd))) {
2874                 spin_unlock(ptl);
2875                 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2876                 return;
2877         }
2878         if (is_huge_zero_pmd(*pmd)) {
2879                 __split_huge_zero_page_pmd(vma, haddr, pmd);
2880                 spin_unlock(ptl);
2881                 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2882                 return;
2883         }
2884         page = pmd_page(*pmd);
2885         VM_BUG_ON_PAGE(!page_count(page), page);
2886         get_page(page);
2887         spin_unlock(ptl);
2888         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2889
2890         split_huge_page(page);
2891
2892         put_page(page);
2893
2894         /*
2895          * We don't always have down_write of mmap_sem here: a racing
2896          * do_huge_pmd_wp_page() might have copied-on-write to another
2897          * huge page before our split_huge_page() got the anon_vma lock.
2898          */
2899         if (unlikely(pmd_trans_huge(*pmd)))
2900                 goto again;
2901 }
2902
2903 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2904                 pmd_t *pmd)
2905 {
2906         struct vm_area_struct *vma;
2907
2908         vma = find_vma(mm, address);
2909         BUG_ON(vma == NULL);
2910         split_huge_page_pmd(vma, address, pmd);
2911 }
2912
2913 static void split_huge_page_address(struct mm_struct *mm,
2914                                     unsigned long address)
2915 {
2916         pgd_t *pgd;
2917         pud_t *pud;
2918         pmd_t *pmd;
2919
2920         VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2921
2922         pgd = pgd_offset(mm, address);
2923         if (!pgd_present(*pgd))
2924                 return;
2925
2926         pud = pud_offset(pgd, address);
2927         if (!pud_present(*pud))
2928                 return;
2929
2930         pmd = pmd_offset(pud, address);
2931         if (!pmd_present(*pmd))
2932                 return;
2933         /*
2934          * Caller holds the mmap_sem write mode, so a huge pmd cannot
2935          * materialize from under us.
2936          */
2937         split_huge_page_pmd_mm(mm, address, pmd);
2938 }
2939
2940 void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2941                              unsigned long start,
2942                              unsigned long end,
2943                              long adjust_next)
2944 {
2945         /*
2946          * If the new start address isn't hpage aligned and it could
2947          * previously contain an hugepage: check if we need to split
2948          * an huge pmd.
2949          */
2950         if (start & ~HPAGE_PMD_MASK &&
2951             (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2952             (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2953                 split_huge_page_address(vma->vm_mm, start);
2954
2955         /*
2956          * If the new end address isn't hpage aligned and it could
2957          * previously contain an hugepage: check if we need to split
2958          * an huge pmd.
2959          */
2960         if (end & ~HPAGE_PMD_MASK &&
2961             (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2962             (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2963                 split_huge_page_address(vma->vm_mm, end);
2964
2965         /*
2966          * If we're also updating the vma->vm_next->vm_start, if the new
2967          * vm_next->vm_start isn't page aligned and it could previously
2968          * contain an hugepage: check if we need to split an huge pmd.
2969          */
2970         if (adjust_next > 0) {
2971                 struct vm_area_struct *next = vma->vm_next;
2972                 unsigned long nstart = next->vm_start;
2973                 nstart += adjust_next << PAGE_SHIFT;
2974                 if (nstart & ~HPAGE_PMD_MASK &&
2975                     (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2976                     (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2977                         split_huge_page_address(next->vm_mm, nstart);
2978         }
2979 }