Merge tag 'socfpga_updates_v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/page-isolation.h>
25 #include <linux/jhash.h>
26
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/tlb.h>
30
31 #include <linux/io.h>
32 #include <linux/hugetlb.h>
33 #include <linux/hugetlb_cgroup.h>
34 #include <linux/node.h>
35 #include "internal.h"
36
37 int hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 /*
43  * Minimum page order among possible hugepage sizes, set to a proper value
44  * at boot time.
45  */
46 static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48 __initdata LIST_HEAD(huge_boot_pages);
49
50 /* for command line parsing */
51 static struct hstate * __initdata parsed_hstate;
52 static unsigned long __initdata default_hstate_max_huge_pages;
53 static unsigned long __initdata default_hstate_size;
54 static bool __initdata parsed_valid_hugepagesz = true;
55
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74         bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76         spin_unlock(&spool->lock);
77
78         /* If no pages are used, and no other handles to the subpool
79          * remain, give up any reservations mased on minimum size and
80          * free the subpool */
81         if (free) {
82                 if (spool->min_hpages != -1)
83                         hugetlb_acct_memory(spool->hstate,
84                                                 -spool->min_hpages);
85                 kfree(spool);
86         }
87 }
88
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90                                                 long min_hpages)
91 {
92         struct hugepage_subpool *spool;
93
94         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95         if (!spool)
96                 return NULL;
97
98         spin_lock_init(&spool->lock);
99         spool->count = 1;
100         spool->max_hpages = max_hpages;
101         spool->hstate = h;
102         spool->min_hpages = min_hpages;
103
104         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105                 kfree(spool);
106                 return NULL;
107         }
108         spool->rsv_hpages = min_hpages;
109
110         return spool;
111 }
112
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115         spin_lock(&spool->lock);
116         BUG_ON(!spool->count);
117         spool->count--;
118         unlock_or_release_subpool(spool);
119 }
120
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130                                       long delta)
131 {
132         long ret = delta;
133
134         if (!spool)
135                 return ret;
136
137         spin_lock(&spool->lock);
138
139         if (spool->max_hpages != -1) {          /* maximum size accounting */
140                 if ((spool->used_hpages + delta) <= spool->max_hpages)
141                         spool->used_hpages += delta;
142                 else {
143                         ret = -ENOMEM;
144                         goto unlock_ret;
145                 }
146         }
147
148         /* minimum size accounting */
149         if (spool->min_hpages != -1 && spool->rsv_hpages) {
150                 if (delta > spool->rsv_hpages) {
151                         /*
152                          * Asking for more reserves than those already taken on
153                          * behalf of subpool.  Return difference.
154                          */
155                         ret = delta - spool->rsv_hpages;
156                         spool->rsv_hpages = 0;
157                 } else {
158                         ret = 0;        /* reserves already accounted for */
159                         spool->rsv_hpages -= delta;
160                 }
161         }
162
163 unlock_ret:
164         spin_unlock(&spool->lock);
165         return ret;
166 }
167
168 /*
169  * Subpool accounting for freeing and unreserving pages.
170  * Return the number of global page reservations that must be dropped.
171  * The return value may only be different than the passed value (delta)
172  * in the case where a subpool minimum size must be maintained.
173  */
174 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
175                                        long delta)
176 {
177         long ret = delta;
178
179         if (!spool)
180                 return delta;
181
182         spin_lock(&spool->lock);
183
184         if (spool->max_hpages != -1)            /* maximum size accounting */
185                 spool->used_hpages -= delta;
186
187          /* minimum size accounting */
188         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
189                 if (spool->rsv_hpages + delta <= spool->min_hpages)
190                         ret = 0;
191                 else
192                         ret = spool->rsv_hpages + delta - spool->min_hpages;
193
194                 spool->rsv_hpages += delta;
195                 if (spool->rsv_hpages > spool->min_hpages)
196                         spool->rsv_hpages = spool->min_hpages;
197         }
198
199         /*
200          * If hugetlbfs_put_super couldn't free spool due to an outstanding
201          * quota reference, free it now.
202          */
203         unlock_or_release_subpool(spool);
204
205         return ret;
206 }
207
208 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
209 {
210         return HUGETLBFS_SB(inode->i_sb)->spool;
211 }
212
213 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
214 {
215         return subpool_inode(file_inode(vma->vm_file));
216 }
217
218 /*
219  * Region tracking -- allows tracking of reservations and instantiated pages
220  *                    across the pages in a mapping.
221  *
222  * The region data structures are embedded into a resv_map and protected
223  * by a resv_map's lock.  The set of regions within the resv_map represent
224  * reservations for huge pages, or huge pages that have already been
225  * instantiated within the map.  The from and to elements are huge page
226  * indicies into the associated mapping.  from indicates the starting index
227  * of the region.  to represents the first index past the end of  the region.
228  *
229  * For example, a file region structure with from == 0 and to == 4 represents
230  * four huge pages in a mapping.  It is important to note that the to element
231  * represents the first element past the end of the region. This is used in
232  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
233  *
234  * Interval notation of the form [from, to) will be used to indicate that
235  * the endpoint from is inclusive and to is exclusive.
236  */
237 struct file_region {
238         struct list_head link;
239         long from;
240         long to;
241 };
242
243 /*
244  * Add the huge page range represented by [f, t) to the reserve
245  * map.  In the normal case, existing regions will be expanded
246  * to accommodate the specified range.  Sufficient regions should
247  * exist for expansion due to the previous call to region_chg
248  * with the same range.  However, it is possible that region_del
249  * could have been called after region_chg and modifed the map
250  * in such a way that no region exists to be expanded.  In this
251  * case, pull a region descriptor from the cache associated with
252  * the map and use that for the new range.
253  *
254  * Return the number of new huge pages added to the map.  This
255  * number is greater than or equal to zero.
256  */
257 static long region_add(struct resv_map *resv, long f, long t)
258 {
259         struct list_head *head = &resv->regions;
260         struct file_region *rg, *nrg, *trg;
261         long add = 0;
262
263         spin_lock(&resv->lock);
264         /* Locate the region we are either in or before. */
265         list_for_each_entry(rg, head, link)
266                 if (f <= rg->to)
267                         break;
268
269         /*
270          * If no region exists which can be expanded to include the
271          * specified range, the list must have been modified by an
272          * interleving call to region_del().  Pull a region descriptor
273          * from the cache and use it for this range.
274          */
275         if (&rg->link == head || t < rg->from) {
276                 VM_BUG_ON(resv->region_cache_count <= 0);
277
278                 resv->region_cache_count--;
279                 nrg = list_first_entry(&resv->region_cache, struct file_region,
280                                         link);
281                 list_del(&nrg->link);
282
283                 nrg->from = f;
284                 nrg->to = t;
285                 list_add(&nrg->link, rg->link.prev);
286
287                 add += t - f;
288                 goto out_locked;
289         }
290
291         /* Round our left edge to the current segment if it encloses us. */
292         if (f > rg->from)
293                 f = rg->from;
294
295         /* Check for and consume any regions we now overlap with. */
296         nrg = rg;
297         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
298                 if (&rg->link == head)
299                         break;
300                 if (rg->from > t)
301                         break;
302
303                 /* If this area reaches higher then extend our area to
304                  * include it completely.  If this is not the first area
305                  * which we intend to reuse, free it. */
306                 if (rg->to > t)
307                         t = rg->to;
308                 if (rg != nrg) {
309                         /* Decrement return value by the deleted range.
310                          * Another range will span this area so that by
311                          * end of routine add will be >= zero
312                          */
313                         add -= (rg->to - rg->from);
314                         list_del(&rg->link);
315                         kfree(rg);
316                 }
317         }
318
319         add += (nrg->from - f);         /* Added to beginning of region */
320         nrg->from = f;
321         add += t - nrg->to;             /* Added to end of region */
322         nrg->to = t;
323
324 out_locked:
325         resv->adds_in_progress--;
326         spin_unlock(&resv->lock);
327         VM_BUG_ON(add < 0);
328         return add;
329 }
330
331 /*
332  * Examine the existing reserve map and determine how many
333  * huge pages in the specified range [f, t) are NOT currently
334  * represented.  This routine is called before a subsequent
335  * call to region_add that will actually modify the reserve
336  * map to add the specified range [f, t).  region_chg does
337  * not change the number of huge pages represented by the
338  * map.  However, if the existing regions in the map can not
339  * be expanded to represent the new range, a new file_region
340  * structure is added to the map as a placeholder.  This is
341  * so that the subsequent region_add call will have all the
342  * regions it needs and will not fail.
343  *
344  * Upon entry, region_chg will also examine the cache of region descriptors
345  * associated with the map.  If there are not enough descriptors cached, one
346  * will be allocated for the in progress add operation.
347  *
348  * Returns the number of huge pages that need to be added to the existing
349  * reservation map for the range [f, t).  This number is greater or equal to
350  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
351  * is needed and can not be allocated.
352  */
353 static long region_chg(struct resv_map *resv, long f, long t)
354 {
355         struct list_head *head = &resv->regions;
356         struct file_region *rg, *nrg = NULL;
357         long chg = 0;
358
359 retry:
360         spin_lock(&resv->lock);
361 retry_locked:
362         resv->adds_in_progress++;
363
364         /*
365          * Check for sufficient descriptors in the cache to accommodate
366          * the number of in progress add operations.
367          */
368         if (resv->adds_in_progress > resv->region_cache_count) {
369                 struct file_region *trg;
370
371                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
372                 /* Must drop lock to allocate a new descriptor. */
373                 resv->adds_in_progress--;
374                 spin_unlock(&resv->lock);
375
376                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377                 if (!trg) {
378                         kfree(nrg);
379                         return -ENOMEM;
380                 }
381
382                 spin_lock(&resv->lock);
383                 list_add(&trg->link, &resv->region_cache);
384                 resv->region_cache_count++;
385                 goto retry_locked;
386         }
387
388         /* Locate the region we are before or in. */
389         list_for_each_entry(rg, head, link)
390                 if (f <= rg->to)
391                         break;
392
393         /* If we are below the current region then a new region is required.
394          * Subtle, allocate a new region at the position but make it zero
395          * size such that we can guarantee to record the reservation. */
396         if (&rg->link == head || t < rg->from) {
397                 if (!nrg) {
398                         resv->adds_in_progress--;
399                         spin_unlock(&resv->lock);
400                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
401                         if (!nrg)
402                                 return -ENOMEM;
403
404                         nrg->from = f;
405                         nrg->to   = f;
406                         INIT_LIST_HEAD(&nrg->link);
407                         goto retry;
408                 }
409
410                 list_add(&nrg->link, rg->link.prev);
411                 chg = t - f;
412                 goto out_nrg;
413         }
414
415         /* Round our left edge to the current segment if it encloses us. */
416         if (f > rg->from)
417                 f = rg->from;
418         chg = t - f;
419
420         /* Check for and consume any regions we now overlap with. */
421         list_for_each_entry(rg, rg->link.prev, link) {
422                 if (&rg->link == head)
423                         break;
424                 if (rg->from > t)
425                         goto out;
426
427                 /* We overlap with this area, if it extends further than
428                  * us then we must extend ourselves.  Account for its
429                  * existing reservation. */
430                 if (rg->to > t) {
431                         chg += rg->to - t;
432                         t = rg->to;
433                 }
434                 chg -= rg->to - rg->from;
435         }
436
437 out:
438         spin_unlock(&resv->lock);
439         /*  We already know we raced and no longer need the new region */
440         kfree(nrg);
441         return chg;
442 out_nrg:
443         spin_unlock(&resv->lock);
444         return chg;
445 }
446
447 /*
448  * Abort the in progress add operation.  The adds_in_progress field
449  * of the resv_map keeps track of the operations in progress between
450  * calls to region_chg and region_add.  Operations are sometimes
451  * aborted after the call to region_chg.  In such cases, region_abort
452  * is called to decrement the adds_in_progress counter.
453  *
454  * NOTE: The range arguments [f, t) are not needed or used in this
455  * routine.  They are kept to make reading the calling code easier as
456  * arguments will match the associated region_chg call.
457  */
458 static void region_abort(struct resv_map *resv, long f, long t)
459 {
460         spin_lock(&resv->lock);
461         VM_BUG_ON(!resv->region_cache_count);
462         resv->adds_in_progress--;
463         spin_unlock(&resv->lock);
464 }
465
466 /*
467  * Delete the specified range [f, t) from the reserve map.  If the
468  * t parameter is LONG_MAX, this indicates that ALL regions after f
469  * should be deleted.  Locate the regions which intersect [f, t)
470  * and either trim, delete or split the existing regions.
471  *
472  * Returns the number of huge pages deleted from the reserve map.
473  * In the normal case, the return value is zero or more.  In the
474  * case where a region must be split, a new region descriptor must
475  * be allocated.  If the allocation fails, -ENOMEM will be returned.
476  * NOTE: If the parameter t == LONG_MAX, then we will never split
477  * a region and possibly return -ENOMEM.  Callers specifying
478  * t == LONG_MAX do not need to check for -ENOMEM error.
479  */
480 static long region_del(struct resv_map *resv, long f, long t)
481 {
482         struct list_head *head = &resv->regions;
483         struct file_region *rg, *trg;
484         struct file_region *nrg = NULL;
485         long del = 0;
486
487 retry:
488         spin_lock(&resv->lock);
489         list_for_each_entry_safe(rg, trg, head, link) {
490                 /*
491                  * Skip regions before the range to be deleted.  file_region
492                  * ranges are normally of the form [from, to).  However, there
493                  * may be a "placeholder" entry in the map which is of the form
494                  * (from, to) with from == to.  Check for placeholder entries
495                  * at the beginning of the range to be deleted.
496                  */
497                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498                         continue;
499
500                 if (rg->from >= t)
501                         break;
502
503                 if (f > rg->from && t < rg->to) { /* Must split region */
504                         /*
505                          * Check for an entry in the cache before dropping
506                          * lock and attempting allocation.
507                          */
508                         if (!nrg &&
509                             resv->region_cache_count > resv->adds_in_progress) {
510                                 nrg = list_first_entry(&resv->region_cache,
511                                                         struct file_region,
512                                                         link);
513                                 list_del(&nrg->link);
514                                 resv->region_cache_count--;
515                         }
516
517                         if (!nrg) {
518                                 spin_unlock(&resv->lock);
519                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520                                 if (!nrg)
521                                         return -ENOMEM;
522                                 goto retry;
523                         }
524
525                         del += t - f;
526
527                         /* New entry for end of split region */
528                         nrg->from = t;
529                         nrg->to = rg->to;
530                         INIT_LIST_HEAD(&nrg->link);
531
532                         /* Original entry is trimmed */
533                         rg->to = f;
534
535                         list_add(&nrg->link, &rg->link);
536                         nrg = NULL;
537                         break;
538                 }
539
540                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
541                         del += rg->to - rg->from;
542                         list_del(&rg->link);
543                         kfree(rg);
544                         continue;
545                 }
546
547                 if (f <= rg->from) {    /* Trim beginning of region */
548                         del += t - rg->from;
549                         rg->from = t;
550                 } else {                /* Trim end of region */
551                         del += rg->to - f;
552                         rg->to = f;
553                 }
554         }
555
556         spin_unlock(&resv->lock);
557         kfree(nrg);
558         return del;
559 }
560
561 /*
562  * A rare out of memory error was encountered which prevented removal of
563  * the reserve map region for a page.  The huge page itself was free'ed
564  * and removed from the page cache.  This routine will adjust the subpool
565  * usage count, and the global reserve count if needed.  By incrementing
566  * these counts, the reserve map entry which could not be deleted will
567  * appear as a "reserved" entry instead of simply dangling with incorrect
568  * counts.
569  */
570 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
571 {
572         struct hugepage_subpool *spool = subpool_inode(inode);
573         long rsv_adjust;
574
575         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
576         if (restore_reserve && rsv_adjust) {
577                 struct hstate *h = hstate_inode(inode);
578
579                 hugetlb_acct_memory(h, 1);
580         }
581 }
582
583 /*
584  * Count and return the number of huge pages in the reserve map
585  * that intersect with the range [f, t).
586  */
587 static long region_count(struct resv_map *resv, long f, long t)
588 {
589         struct list_head *head = &resv->regions;
590         struct file_region *rg;
591         long chg = 0;
592
593         spin_lock(&resv->lock);
594         /* Locate each segment we overlap with, and count that overlap. */
595         list_for_each_entry(rg, head, link) {
596                 long seg_from;
597                 long seg_to;
598
599                 if (rg->to <= f)
600                         continue;
601                 if (rg->from >= t)
602                         break;
603
604                 seg_from = max(rg->from, f);
605                 seg_to = min(rg->to, t);
606
607                 chg += seg_to - seg_from;
608         }
609         spin_unlock(&resv->lock);
610
611         return chg;
612 }
613
614 /*
615  * Convert the address within this vma to the page offset within
616  * the mapping, in pagecache page units; huge pages here.
617  */
618 static pgoff_t vma_hugecache_offset(struct hstate *h,
619                         struct vm_area_struct *vma, unsigned long address)
620 {
621         return ((address - vma->vm_start) >> huge_page_shift(h)) +
622                         (vma->vm_pgoff >> huge_page_order(h));
623 }
624
625 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
626                                      unsigned long address)
627 {
628         return vma_hugecache_offset(hstate_vma(vma), vma, address);
629 }
630 EXPORT_SYMBOL_GPL(linear_hugepage_index);
631
632 /*
633  * Return the size of the pages allocated when backing a VMA. In the majority
634  * cases this will be same size as used by the page table entries.
635  */
636 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
637 {
638         struct hstate *hstate;
639
640         if (!is_vm_hugetlb_page(vma))
641                 return PAGE_SIZE;
642
643         hstate = hstate_vma(vma);
644
645         return 1UL << huge_page_shift(hstate);
646 }
647 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
648
649 /*
650  * Return the page size being used by the MMU to back a VMA. In the majority
651  * of cases, the page size used by the kernel matches the MMU size. On
652  * architectures where it differs, an architecture-specific version of this
653  * function is required.
654  */
655 #ifndef vma_mmu_pagesize
656 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
657 {
658         return vma_kernel_pagesize(vma);
659 }
660 #endif
661
662 /*
663  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
664  * bits of the reservation map pointer, which are always clear due to
665  * alignment.
666  */
667 #define HPAGE_RESV_OWNER    (1UL << 0)
668 #define HPAGE_RESV_UNMAPPED (1UL << 1)
669 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
670
671 /*
672  * These helpers are used to track how many pages are reserved for
673  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
674  * is guaranteed to have their future faults succeed.
675  *
676  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
677  * the reserve counters are updated with the hugetlb_lock held. It is safe
678  * to reset the VMA at fork() time as it is not in use yet and there is no
679  * chance of the global counters getting corrupted as a result of the values.
680  *
681  * The private mapping reservation is represented in a subtly different
682  * manner to a shared mapping.  A shared mapping has a region map associated
683  * with the underlying file, this region map represents the backing file
684  * pages which have ever had a reservation assigned which this persists even
685  * after the page is instantiated.  A private mapping has a region map
686  * associated with the original mmap which is attached to all VMAs which
687  * reference it, this region map represents those offsets which have consumed
688  * reservation ie. where pages have been instantiated.
689  */
690 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
691 {
692         return (unsigned long)vma->vm_private_data;
693 }
694
695 static void set_vma_private_data(struct vm_area_struct *vma,
696                                                         unsigned long value)
697 {
698         vma->vm_private_data = (void *)value;
699 }
700
701 struct resv_map *resv_map_alloc(void)
702 {
703         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
704         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
705
706         if (!resv_map || !rg) {
707                 kfree(resv_map);
708                 kfree(rg);
709                 return NULL;
710         }
711
712         kref_init(&resv_map->refs);
713         spin_lock_init(&resv_map->lock);
714         INIT_LIST_HEAD(&resv_map->regions);
715
716         resv_map->adds_in_progress = 0;
717
718         INIT_LIST_HEAD(&resv_map->region_cache);
719         list_add(&rg->link, &resv_map->region_cache);
720         resv_map->region_cache_count = 1;
721
722         return resv_map;
723 }
724
725 void resv_map_release(struct kref *ref)
726 {
727         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
728         struct list_head *head = &resv_map->region_cache;
729         struct file_region *rg, *trg;
730
731         /* Clear out any active regions before we release the map. */
732         region_del(resv_map, 0, LONG_MAX);
733
734         /* ... and any entries left in the cache */
735         list_for_each_entry_safe(rg, trg, head, link) {
736                 list_del(&rg->link);
737                 kfree(rg);
738         }
739
740         VM_BUG_ON(resv_map->adds_in_progress);
741
742         kfree(resv_map);
743 }
744
745 static inline struct resv_map *inode_resv_map(struct inode *inode)
746 {
747         return inode->i_mapping->private_data;
748 }
749
750 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
751 {
752         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
753         if (vma->vm_flags & VM_MAYSHARE) {
754                 struct address_space *mapping = vma->vm_file->f_mapping;
755                 struct inode *inode = mapping->host;
756
757                 return inode_resv_map(inode);
758
759         } else {
760                 return (struct resv_map *)(get_vma_private_data(vma) &
761                                                         ~HPAGE_RESV_MASK);
762         }
763 }
764
765 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
766 {
767         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
768         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
769
770         set_vma_private_data(vma, (get_vma_private_data(vma) &
771                                 HPAGE_RESV_MASK) | (unsigned long)map);
772 }
773
774 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
775 {
776         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
777         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
778
779         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
780 }
781
782 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
783 {
784         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785
786         return (get_vma_private_data(vma) & flag) != 0;
787 }
788
789 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
790 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
791 {
792         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
793         if (!(vma->vm_flags & VM_MAYSHARE))
794                 vma->vm_private_data = (void *)0;
795 }
796
797 /* Returns true if the VMA has associated reserve pages */
798 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
799 {
800         if (vma->vm_flags & VM_NORESERVE) {
801                 /*
802                  * This address is already reserved by other process(chg == 0),
803                  * so, we should decrement reserved count. Without decrementing,
804                  * reserve count remains after releasing inode, because this
805                  * allocated page will go into page cache and is regarded as
806                  * coming from reserved pool in releasing step.  Currently, we
807                  * don't have any other solution to deal with this situation
808                  * properly, so add work-around here.
809                  */
810                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
811                         return true;
812                 else
813                         return false;
814         }
815
816         /* Shared mappings always use reserves */
817         if (vma->vm_flags & VM_MAYSHARE) {
818                 /*
819                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
820                  * be a region map for all pages.  The only situation where
821                  * there is no region map is if a hole was punched via
822                  * fallocate.  In this case, there really are no reverves to
823                  * use.  This situation is indicated if chg != 0.
824                  */
825                 if (chg)
826                         return false;
827                 else
828                         return true;
829         }
830
831         /*
832          * Only the process that called mmap() has reserves for
833          * private mappings.
834          */
835         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
836                 /*
837                  * Like the shared case above, a hole punch or truncate
838                  * could have been performed on the private mapping.
839                  * Examine the value of chg to determine if reserves
840                  * actually exist or were previously consumed.
841                  * Very Subtle - The value of chg comes from a previous
842                  * call to vma_needs_reserves().  The reserve map for
843                  * private mappings has different (opposite) semantics
844                  * than that of shared mappings.  vma_needs_reserves()
845                  * has already taken this difference in semantics into
846                  * account.  Therefore, the meaning of chg is the same
847                  * as in the shared case above.  Code could easily be
848                  * combined, but keeping it separate draws attention to
849                  * subtle differences.
850                  */
851                 if (chg)
852                         return false;
853                 else
854                         return true;
855         }
856
857         return false;
858 }
859
860 static void enqueue_huge_page(struct hstate *h, struct page *page)
861 {
862         int nid = page_to_nid(page);
863         list_move(&page->lru, &h->hugepage_freelists[nid]);
864         h->free_huge_pages++;
865         h->free_huge_pages_node[nid]++;
866 }
867
868 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
869 {
870         struct page *page;
871
872         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
873                 if (!is_migrate_isolate_page(page))
874                         break;
875         /*
876          * if 'non-isolated free hugepage' not found on the list,
877          * the allocation fails.
878          */
879         if (&h->hugepage_freelists[nid] == &page->lru)
880                 return NULL;
881         list_move(&page->lru, &h->hugepage_activelist);
882         set_page_refcounted(page);
883         h->free_huge_pages--;
884         h->free_huge_pages_node[nid]--;
885         return page;
886 }
887
888 /* Movability of hugepages depends on migration support. */
889 static inline gfp_t htlb_alloc_mask(struct hstate *h)
890 {
891         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
892                 return GFP_HIGHUSER_MOVABLE;
893         else
894                 return GFP_HIGHUSER;
895 }
896
897 static struct page *dequeue_huge_page_vma(struct hstate *h,
898                                 struct vm_area_struct *vma,
899                                 unsigned long address, int avoid_reserve,
900                                 long chg)
901 {
902         struct page *page = NULL;
903         struct mempolicy *mpol;
904         nodemask_t *nodemask;
905         struct zonelist *zonelist;
906         struct zone *zone;
907         struct zoneref *z;
908         unsigned int cpuset_mems_cookie;
909
910         /*
911          * A child process with MAP_PRIVATE mappings created by their parent
912          * have no page reserves. This check ensures that reservations are
913          * not "stolen". The child may still get SIGKILLed
914          */
915         if (!vma_has_reserves(vma, chg) &&
916                         h->free_huge_pages - h->resv_huge_pages == 0)
917                 goto err;
918
919         /* If reserves cannot be used, ensure enough pages are in the pool */
920         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
921                 goto err;
922
923 retry_cpuset:
924         cpuset_mems_cookie = read_mems_allowed_begin();
925         zonelist = huge_zonelist(vma, address,
926                                         htlb_alloc_mask(h), &mpol, &nodemask);
927
928         for_each_zone_zonelist_nodemask(zone, z, zonelist,
929                                                 MAX_NR_ZONES - 1, nodemask) {
930                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
931                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
932                         if (page) {
933                                 if (avoid_reserve)
934                                         break;
935                                 if (!vma_has_reserves(vma, chg))
936                                         break;
937
938                                 SetPagePrivate(page);
939                                 h->resv_huge_pages--;
940                                 break;
941                         }
942                 }
943         }
944
945         mpol_cond_put(mpol);
946         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
947                 goto retry_cpuset;
948         return page;
949
950 err:
951         return NULL;
952 }
953
954 /*
955  * common helper functions for hstate_next_node_to_{alloc|free}.
956  * We may have allocated or freed a huge page based on a different
957  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
958  * be outside of *nodes_allowed.  Ensure that we use an allowed
959  * node for alloc or free.
960  */
961 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
962 {
963         nid = next_node_in(nid, *nodes_allowed);
964         VM_BUG_ON(nid >= MAX_NUMNODES);
965
966         return nid;
967 }
968
969 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
970 {
971         if (!node_isset(nid, *nodes_allowed))
972                 nid = next_node_allowed(nid, nodes_allowed);
973         return nid;
974 }
975
976 /*
977  * returns the previously saved node ["this node"] from which to
978  * allocate a persistent huge page for the pool and advance the
979  * next node from which to allocate, handling wrap at end of node
980  * mask.
981  */
982 static int hstate_next_node_to_alloc(struct hstate *h,
983                                         nodemask_t *nodes_allowed)
984 {
985         int nid;
986
987         VM_BUG_ON(!nodes_allowed);
988
989         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
990         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
991
992         return nid;
993 }
994
995 /*
996  * helper for free_pool_huge_page() - return the previously saved
997  * node ["this node"] from which to free a huge page.  Advance the
998  * next node id whether or not we find a free huge page to free so
999  * that the next attempt to free addresses the next node.
1000  */
1001 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1002 {
1003         int nid;
1004
1005         VM_BUG_ON(!nodes_allowed);
1006
1007         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1008         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1009
1010         return nid;
1011 }
1012
1013 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1014         for (nr_nodes = nodes_weight(*mask);                            \
1015                 nr_nodes > 0 &&                                         \
1016                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1017                 nr_nodes--)
1018
1019 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1020         for (nr_nodes = nodes_weight(*mask);                            \
1021                 nr_nodes > 0 &&                                         \
1022                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1023                 nr_nodes--)
1024
1025 #if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
1026 static void destroy_compound_gigantic_page(struct page *page,
1027                                         unsigned int order)
1028 {
1029         int i;
1030         int nr_pages = 1 << order;
1031         struct page *p = page + 1;
1032
1033         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1034                 clear_compound_head(p);
1035                 set_page_refcounted(p);
1036         }
1037
1038         set_compound_order(page, 0);
1039         __ClearPageHead(page);
1040 }
1041
1042 static void free_gigantic_page(struct page *page, unsigned int order)
1043 {
1044         free_contig_range(page_to_pfn(page), 1 << order);
1045 }
1046
1047 static int __alloc_gigantic_page(unsigned long start_pfn,
1048                                 unsigned long nr_pages)
1049 {
1050         unsigned long end_pfn = start_pfn + nr_pages;
1051         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1052 }
1053
1054 static bool pfn_range_valid_gigantic(struct zone *z,
1055                         unsigned long start_pfn, unsigned long nr_pages)
1056 {
1057         unsigned long i, end_pfn = start_pfn + nr_pages;
1058         struct page *page;
1059
1060         for (i = start_pfn; i < end_pfn; i++) {
1061                 if (!pfn_valid(i))
1062                         return false;
1063
1064                 page = pfn_to_page(i);
1065
1066                 if (page_zone(page) != z)
1067                         return false;
1068
1069                 if (PageReserved(page))
1070                         return false;
1071
1072                 if (page_count(page) > 0)
1073                         return false;
1074
1075                 if (PageHuge(page))
1076                         return false;
1077         }
1078
1079         return true;
1080 }
1081
1082 static bool zone_spans_last_pfn(const struct zone *zone,
1083                         unsigned long start_pfn, unsigned long nr_pages)
1084 {
1085         unsigned long last_pfn = start_pfn + nr_pages - 1;
1086         return zone_spans_pfn(zone, last_pfn);
1087 }
1088
1089 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1090 {
1091         unsigned long nr_pages = 1 << order;
1092         unsigned long ret, pfn, flags;
1093         struct zone *z;
1094
1095         z = NODE_DATA(nid)->node_zones;
1096         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1097                 spin_lock_irqsave(&z->lock, flags);
1098
1099                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1100                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1101                         if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
1102                                 /*
1103                                  * We release the zone lock here because
1104                                  * alloc_contig_range() will also lock the zone
1105                                  * at some point. If there's an allocation
1106                                  * spinning on this lock, it may win the race
1107                                  * and cause alloc_contig_range() to fail...
1108                                  */
1109                                 spin_unlock_irqrestore(&z->lock, flags);
1110                                 ret = __alloc_gigantic_page(pfn, nr_pages);
1111                                 if (!ret)
1112                                         return pfn_to_page(pfn);
1113                                 spin_lock_irqsave(&z->lock, flags);
1114                         }
1115                         pfn += nr_pages;
1116                 }
1117
1118                 spin_unlock_irqrestore(&z->lock, flags);
1119         }
1120
1121         return NULL;
1122 }
1123
1124 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1125 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1126
1127 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1128 {
1129         struct page *page;
1130
1131         page = alloc_gigantic_page(nid, huge_page_order(h));
1132         if (page) {
1133                 prep_compound_gigantic_page(page, huge_page_order(h));
1134                 prep_new_huge_page(h, page, nid);
1135         }
1136
1137         return page;
1138 }
1139
1140 static int alloc_fresh_gigantic_page(struct hstate *h,
1141                                 nodemask_t *nodes_allowed)
1142 {
1143         struct page *page = NULL;
1144         int nr_nodes, node;
1145
1146         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1147                 page = alloc_fresh_gigantic_page_node(h, node);
1148                 if (page)
1149                         return 1;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static inline bool gigantic_page_supported(void) { return true; }
1156 #else
1157 static inline bool gigantic_page_supported(void) { return false; }
1158 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1159 static inline void destroy_compound_gigantic_page(struct page *page,
1160                                                 unsigned int order) { }
1161 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1162                                         nodemask_t *nodes_allowed) { return 0; }
1163 #endif
1164
1165 static void update_and_free_page(struct hstate *h, struct page *page)
1166 {
1167         int i;
1168
1169         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1170                 return;
1171
1172         h->nr_huge_pages--;
1173         h->nr_huge_pages_node[page_to_nid(page)]--;
1174         for (i = 0; i < pages_per_huge_page(h); i++) {
1175                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1176                                 1 << PG_referenced | 1 << PG_dirty |
1177                                 1 << PG_active | 1 << PG_private |
1178                                 1 << PG_writeback);
1179         }
1180         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1181         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1182         set_page_refcounted(page);
1183         if (hstate_is_gigantic(h)) {
1184                 destroy_compound_gigantic_page(page, huge_page_order(h));
1185                 free_gigantic_page(page, huge_page_order(h));
1186         } else {
1187                 __free_pages(page, huge_page_order(h));
1188         }
1189 }
1190
1191 struct hstate *size_to_hstate(unsigned long size)
1192 {
1193         struct hstate *h;
1194
1195         for_each_hstate(h) {
1196                 if (huge_page_size(h) == size)
1197                         return h;
1198         }
1199         return NULL;
1200 }
1201
1202 /*
1203  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1204  * to hstate->hugepage_activelist.)
1205  *
1206  * This function can be called for tail pages, but never returns true for them.
1207  */
1208 bool page_huge_active(struct page *page)
1209 {
1210         VM_BUG_ON_PAGE(!PageHuge(page), page);
1211         return PageHead(page) && PagePrivate(&page[1]);
1212 }
1213
1214 /* never called for tail page */
1215 static void set_page_huge_active(struct page *page)
1216 {
1217         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1218         SetPagePrivate(&page[1]);
1219 }
1220
1221 static void clear_page_huge_active(struct page *page)
1222 {
1223         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1224         ClearPagePrivate(&page[1]);
1225 }
1226
1227 void free_huge_page(struct page *page)
1228 {
1229         /*
1230          * Can't pass hstate in here because it is called from the
1231          * compound page destructor.
1232          */
1233         struct hstate *h = page_hstate(page);
1234         int nid = page_to_nid(page);
1235         struct hugepage_subpool *spool =
1236                 (struct hugepage_subpool *)page_private(page);
1237         bool restore_reserve;
1238
1239         set_page_private(page, 0);
1240         page->mapping = NULL;
1241         VM_BUG_ON_PAGE(page_count(page), page);
1242         VM_BUG_ON_PAGE(page_mapcount(page), page);
1243         restore_reserve = PagePrivate(page);
1244         ClearPagePrivate(page);
1245
1246         /*
1247          * A return code of zero implies that the subpool will be under its
1248          * minimum size if the reservation is not restored after page is free.
1249          * Therefore, force restore_reserve operation.
1250          */
1251         if (hugepage_subpool_put_pages(spool, 1) == 0)
1252                 restore_reserve = true;
1253
1254         spin_lock(&hugetlb_lock);
1255         clear_page_huge_active(page);
1256         hugetlb_cgroup_uncharge_page(hstate_index(h),
1257                                      pages_per_huge_page(h), page);
1258         if (restore_reserve)
1259                 h->resv_huge_pages++;
1260
1261         if (h->surplus_huge_pages_node[nid]) {
1262                 /* remove the page from active list */
1263                 list_del(&page->lru);
1264                 update_and_free_page(h, page);
1265                 h->surplus_huge_pages--;
1266                 h->surplus_huge_pages_node[nid]--;
1267         } else {
1268                 arch_clear_hugepage_flags(page);
1269                 enqueue_huge_page(h, page);
1270         }
1271         spin_unlock(&hugetlb_lock);
1272 }
1273
1274 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1275 {
1276         INIT_LIST_HEAD(&page->lru);
1277         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1278         spin_lock(&hugetlb_lock);
1279         set_hugetlb_cgroup(page, NULL);
1280         h->nr_huge_pages++;
1281         h->nr_huge_pages_node[nid]++;
1282         spin_unlock(&hugetlb_lock);
1283         put_page(page); /* free it into the hugepage allocator */
1284 }
1285
1286 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1287 {
1288         int i;
1289         int nr_pages = 1 << order;
1290         struct page *p = page + 1;
1291
1292         /* we rely on prep_new_huge_page to set the destructor */
1293         set_compound_order(page, order);
1294         __ClearPageReserved(page);
1295         __SetPageHead(page);
1296         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1297                 /*
1298                  * For gigantic hugepages allocated through bootmem at
1299                  * boot, it's safer to be consistent with the not-gigantic
1300                  * hugepages and clear the PG_reserved bit from all tail pages
1301                  * too.  Otherwse drivers using get_user_pages() to access tail
1302                  * pages may get the reference counting wrong if they see
1303                  * PG_reserved set on a tail page (despite the head page not
1304                  * having PG_reserved set).  Enforcing this consistency between
1305                  * head and tail pages allows drivers to optimize away a check
1306                  * on the head page when they need know if put_page() is needed
1307                  * after get_user_pages().
1308                  */
1309                 __ClearPageReserved(p);
1310                 set_page_count(p, 0);
1311                 set_compound_head(p, page);
1312         }
1313         atomic_set(compound_mapcount_ptr(page), -1);
1314 }
1315
1316 /*
1317  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1318  * transparent huge pages.  See the PageTransHuge() documentation for more
1319  * details.
1320  */
1321 int PageHuge(struct page *page)
1322 {
1323         if (!PageCompound(page))
1324                 return 0;
1325
1326         page = compound_head(page);
1327         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1328 }
1329 EXPORT_SYMBOL_GPL(PageHuge);
1330
1331 /*
1332  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1333  * normal or transparent huge pages.
1334  */
1335 int PageHeadHuge(struct page *page_head)
1336 {
1337         if (!PageHead(page_head))
1338                 return 0;
1339
1340         return get_compound_page_dtor(page_head) == free_huge_page;
1341 }
1342
1343 pgoff_t __basepage_index(struct page *page)
1344 {
1345         struct page *page_head = compound_head(page);
1346         pgoff_t index = page_index(page_head);
1347         unsigned long compound_idx;
1348
1349         if (!PageHuge(page_head))
1350                 return page_index(page);
1351
1352         if (compound_order(page_head) >= MAX_ORDER)
1353                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1354         else
1355                 compound_idx = page - page_head;
1356
1357         return (index << compound_order(page_head)) + compound_idx;
1358 }
1359
1360 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1361 {
1362         struct page *page;
1363
1364         page = __alloc_pages_node(nid,
1365                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1366                                                 __GFP_REPEAT|__GFP_NOWARN,
1367                 huge_page_order(h));
1368         if (page) {
1369                 prep_new_huge_page(h, page, nid);
1370         }
1371
1372         return page;
1373 }
1374
1375 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1376 {
1377         struct page *page;
1378         int nr_nodes, node;
1379         int ret = 0;
1380
1381         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1382                 page = alloc_fresh_huge_page_node(h, node);
1383                 if (page) {
1384                         ret = 1;
1385                         break;
1386                 }
1387         }
1388
1389         if (ret)
1390                 count_vm_event(HTLB_BUDDY_PGALLOC);
1391         else
1392                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1393
1394         return ret;
1395 }
1396
1397 /*
1398  * Free huge page from pool from next node to free.
1399  * Attempt to keep persistent huge pages more or less
1400  * balanced over allowed nodes.
1401  * Called with hugetlb_lock locked.
1402  */
1403 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1404                                                          bool acct_surplus)
1405 {
1406         int nr_nodes, node;
1407         int ret = 0;
1408
1409         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1410                 /*
1411                  * If we're returning unused surplus pages, only examine
1412                  * nodes with surplus pages.
1413                  */
1414                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1415                     !list_empty(&h->hugepage_freelists[node])) {
1416                         struct page *page =
1417                                 list_entry(h->hugepage_freelists[node].next,
1418                                           struct page, lru);
1419                         list_del(&page->lru);
1420                         h->free_huge_pages--;
1421                         h->free_huge_pages_node[node]--;
1422                         if (acct_surplus) {
1423                                 h->surplus_huge_pages--;
1424                                 h->surplus_huge_pages_node[node]--;
1425                         }
1426                         update_and_free_page(h, page);
1427                         ret = 1;
1428                         break;
1429                 }
1430         }
1431
1432         return ret;
1433 }
1434
1435 /*
1436  * Dissolve a given free hugepage into free buddy pages. This function does
1437  * nothing for in-use (including surplus) hugepages.
1438  */
1439 static void dissolve_free_huge_page(struct page *page)
1440 {
1441         spin_lock(&hugetlb_lock);
1442         if (PageHuge(page) && !page_count(page)) {
1443                 struct hstate *h = page_hstate(page);
1444                 int nid = page_to_nid(page);
1445                 list_del(&page->lru);
1446                 h->free_huge_pages--;
1447                 h->free_huge_pages_node[nid]--;
1448                 update_and_free_page(h, page);
1449         }
1450         spin_unlock(&hugetlb_lock);
1451 }
1452
1453 /*
1454  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1455  * make specified memory blocks removable from the system.
1456  * Note that start_pfn should aligned with (minimum) hugepage size.
1457  */
1458 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1459 {
1460         unsigned long pfn;
1461
1462         if (!hugepages_supported())
1463                 return;
1464
1465         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1466         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1467                 dissolve_free_huge_page(pfn_to_page(pfn));
1468 }
1469
1470 /*
1471  * There are 3 ways this can get called:
1472  * 1. With vma+addr: we use the VMA's memory policy
1473  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1474  *    page from any node, and let the buddy allocator itself figure
1475  *    it out.
1476  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1477  *    strictly from 'nid'
1478  */
1479 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1480                 struct vm_area_struct *vma, unsigned long addr, int nid)
1481 {
1482         int order = huge_page_order(h);
1483         gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1484         unsigned int cpuset_mems_cookie;
1485
1486         /*
1487          * We need a VMA to get a memory policy.  If we do not
1488          * have one, we use the 'nid' argument.
1489          *
1490          * The mempolicy stuff below has some non-inlined bits
1491          * and calls ->vm_ops.  That makes it hard to optimize at
1492          * compile-time, even when NUMA is off and it does
1493          * nothing.  This helps the compiler optimize it out.
1494          */
1495         if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1496                 /*
1497                  * If a specific node is requested, make sure to
1498                  * get memory from there, but only when a node
1499                  * is explicitly specified.
1500                  */
1501                 if (nid != NUMA_NO_NODE)
1502                         gfp |= __GFP_THISNODE;
1503                 /*
1504                  * Make sure to call something that can handle
1505                  * nid=NUMA_NO_NODE
1506                  */
1507                 return alloc_pages_node(nid, gfp, order);
1508         }
1509
1510         /*
1511          * OK, so we have a VMA.  Fetch the mempolicy and try to
1512          * allocate a huge page with it.  We will only reach this
1513          * when CONFIG_NUMA=y.
1514          */
1515         do {
1516                 struct page *page;
1517                 struct mempolicy *mpol;
1518                 struct zonelist *zl;
1519                 nodemask_t *nodemask;
1520
1521                 cpuset_mems_cookie = read_mems_allowed_begin();
1522                 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1523                 mpol_cond_put(mpol);
1524                 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1525                 if (page)
1526                         return page;
1527         } while (read_mems_allowed_retry(cpuset_mems_cookie));
1528
1529         return NULL;
1530 }
1531
1532 /*
1533  * There are two ways to allocate a huge page:
1534  * 1. When you have a VMA and an address (like a fault)
1535  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1536  *
1537  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1538  * this case which signifies that the allocation should be done with
1539  * respect for the VMA's memory policy.
1540  *
1541  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1542  * implies that memory policies will not be taken in to account.
1543  */
1544 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1545                 struct vm_area_struct *vma, unsigned long addr, int nid)
1546 {
1547         struct page *page;
1548         unsigned int r_nid;
1549
1550         if (hstate_is_gigantic(h))
1551                 return NULL;
1552
1553         /*
1554          * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1555          * This makes sure the caller is picking _one_ of the modes with which
1556          * we can call this function, not both.
1557          */
1558         if (vma || (addr != -1)) {
1559                 VM_WARN_ON_ONCE(addr == -1);
1560                 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1561         }
1562         /*
1563          * Assume we will successfully allocate the surplus page to
1564          * prevent racing processes from causing the surplus to exceed
1565          * overcommit
1566          *
1567          * This however introduces a different race, where a process B
1568          * tries to grow the static hugepage pool while alloc_pages() is
1569          * called by process A. B will only examine the per-node
1570          * counters in determining if surplus huge pages can be
1571          * converted to normal huge pages in adjust_pool_surplus(). A
1572          * won't be able to increment the per-node counter, until the
1573          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1574          * no more huge pages can be converted from surplus to normal
1575          * state (and doesn't try to convert again). Thus, we have a
1576          * case where a surplus huge page exists, the pool is grown, and
1577          * the surplus huge page still exists after, even though it
1578          * should just have been converted to a normal huge page. This
1579          * does not leak memory, though, as the hugepage will be freed
1580          * once it is out of use. It also does not allow the counters to
1581          * go out of whack in adjust_pool_surplus() as we don't modify
1582          * the node values until we've gotten the hugepage and only the
1583          * per-node value is checked there.
1584          */
1585         spin_lock(&hugetlb_lock);
1586         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1587                 spin_unlock(&hugetlb_lock);
1588                 return NULL;
1589         } else {
1590                 h->nr_huge_pages++;
1591                 h->surplus_huge_pages++;
1592         }
1593         spin_unlock(&hugetlb_lock);
1594
1595         page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1596
1597         spin_lock(&hugetlb_lock);
1598         if (page) {
1599                 INIT_LIST_HEAD(&page->lru);
1600                 r_nid = page_to_nid(page);
1601                 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1602                 set_hugetlb_cgroup(page, NULL);
1603                 /*
1604                  * We incremented the global counters already
1605                  */
1606                 h->nr_huge_pages_node[r_nid]++;
1607                 h->surplus_huge_pages_node[r_nid]++;
1608                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1609         } else {
1610                 h->nr_huge_pages--;
1611                 h->surplus_huge_pages--;
1612                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1613         }
1614         spin_unlock(&hugetlb_lock);
1615
1616         return page;
1617 }
1618
1619 /*
1620  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1621  * NUMA_NO_NODE, which means that it may be allocated
1622  * anywhere.
1623  */
1624 static
1625 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1626 {
1627         unsigned long addr = -1;
1628
1629         return __alloc_buddy_huge_page(h, NULL, addr, nid);
1630 }
1631
1632 /*
1633  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1634  */
1635 static
1636 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1637                 struct vm_area_struct *vma, unsigned long addr)
1638 {
1639         return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1640 }
1641
1642 /*
1643  * This allocation function is useful in the context where vma is irrelevant.
1644  * E.g. soft-offlining uses this function because it only cares physical
1645  * address of error page.
1646  */
1647 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1648 {
1649         struct page *page = NULL;
1650
1651         spin_lock(&hugetlb_lock);
1652         if (h->free_huge_pages - h->resv_huge_pages > 0)
1653                 page = dequeue_huge_page_node(h, nid);
1654         spin_unlock(&hugetlb_lock);
1655
1656         if (!page)
1657                 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1658
1659         return page;
1660 }
1661
1662 /*
1663  * Increase the hugetlb pool such that it can accommodate a reservation
1664  * of size 'delta'.
1665  */
1666 static int gather_surplus_pages(struct hstate *h, int delta)
1667 {
1668         struct list_head surplus_list;
1669         struct page *page, *tmp;
1670         int ret, i;
1671         int needed, allocated;
1672         bool alloc_ok = true;
1673
1674         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1675         if (needed <= 0) {
1676                 h->resv_huge_pages += delta;
1677                 return 0;
1678         }
1679
1680         allocated = 0;
1681         INIT_LIST_HEAD(&surplus_list);
1682
1683         ret = -ENOMEM;
1684 retry:
1685         spin_unlock(&hugetlb_lock);
1686         for (i = 0; i < needed; i++) {
1687                 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1688                 if (!page) {
1689                         alloc_ok = false;
1690                         break;
1691                 }
1692                 list_add(&page->lru, &surplus_list);
1693         }
1694         allocated += i;
1695
1696         /*
1697          * After retaking hugetlb_lock, we need to recalculate 'needed'
1698          * because either resv_huge_pages or free_huge_pages may have changed.
1699          */
1700         spin_lock(&hugetlb_lock);
1701         needed = (h->resv_huge_pages + delta) -
1702                         (h->free_huge_pages + allocated);
1703         if (needed > 0) {
1704                 if (alloc_ok)
1705                         goto retry;
1706                 /*
1707                  * We were not able to allocate enough pages to
1708                  * satisfy the entire reservation so we free what
1709                  * we've allocated so far.
1710                  */
1711                 goto free;
1712         }
1713         /*
1714          * The surplus_list now contains _at_least_ the number of extra pages
1715          * needed to accommodate the reservation.  Add the appropriate number
1716          * of pages to the hugetlb pool and free the extras back to the buddy
1717          * allocator.  Commit the entire reservation here to prevent another
1718          * process from stealing the pages as they are added to the pool but
1719          * before they are reserved.
1720          */
1721         needed += allocated;
1722         h->resv_huge_pages += delta;
1723         ret = 0;
1724
1725         /* Free the needed pages to the hugetlb pool */
1726         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1727                 if ((--needed) < 0)
1728                         break;
1729                 /*
1730                  * This page is now managed by the hugetlb allocator and has
1731                  * no users -- drop the buddy allocator's reference.
1732                  */
1733                 put_page_testzero(page);
1734                 VM_BUG_ON_PAGE(page_count(page), page);
1735                 enqueue_huge_page(h, page);
1736         }
1737 free:
1738         spin_unlock(&hugetlb_lock);
1739
1740         /* Free unnecessary surplus pages to the buddy allocator */
1741         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1742                 put_page(page);
1743         spin_lock(&hugetlb_lock);
1744
1745         return ret;
1746 }
1747
1748 /*
1749  * When releasing a hugetlb pool reservation, any surplus pages that were
1750  * allocated to satisfy the reservation must be explicitly freed if they were
1751  * never used.
1752  * Called with hugetlb_lock held.
1753  */
1754 static void return_unused_surplus_pages(struct hstate *h,
1755                                         unsigned long unused_resv_pages)
1756 {
1757         unsigned long nr_pages;
1758
1759         /* Uncommit the reservation */
1760         h->resv_huge_pages -= unused_resv_pages;
1761
1762         /* Cannot return gigantic pages currently */
1763         if (hstate_is_gigantic(h))
1764                 return;
1765
1766         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1767
1768         /*
1769          * We want to release as many surplus pages as possible, spread
1770          * evenly across all nodes with memory. Iterate across these nodes
1771          * until we can no longer free unreserved surplus pages. This occurs
1772          * when the nodes with surplus pages have no free pages.
1773          * free_pool_huge_page() will balance the the freed pages across the
1774          * on-line nodes with memory and will handle the hstate accounting.
1775          */
1776         while (nr_pages--) {
1777                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1778                         break;
1779                 cond_resched_lock(&hugetlb_lock);
1780         }
1781 }
1782
1783
1784 /*
1785  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1786  * are used by the huge page allocation routines to manage reservations.
1787  *
1788  * vma_needs_reservation is called to determine if the huge page at addr
1789  * within the vma has an associated reservation.  If a reservation is
1790  * needed, the value 1 is returned.  The caller is then responsible for
1791  * managing the global reservation and subpool usage counts.  After
1792  * the huge page has been allocated, vma_commit_reservation is called
1793  * to add the page to the reservation map.  If the page allocation fails,
1794  * the reservation must be ended instead of committed.  vma_end_reservation
1795  * is called in such cases.
1796  *
1797  * In the normal case, vma_commit_reservation returns the same value
1798  * as the preceding vma_needs_reservation call.  The only time this
1799  * is not the case is if a reserve map was changed between calls.  It
1800  * is the responsibility of the caller to notice the difference and
1801  * take appropriate action.
1802  */
1803 enum vma_resv_mode {
1804         VMA_NEEDS_RESV,
1805         VMA_COMMIT_RESV,
1806         VMA_END_RESV,
1807 };
1808 static long __vma_reservation_common(struct hstate *h,
1809                                 struct vm_area_struct *vma, unsigned long addr,
1810                                 enum vma_resv_mode mode)
1811 {
1812         struct resv_map *resv;
1813         pgoff_t idx;
1814         long ret;
1815
1816         resv = vma_resv_map(vma);
1817         if (!resv)
1818                 return 1;
1819
1820         idx = vma_hugecache_offset(h, vma, addr);
1821         switch (mode) {
1822         case VMA_NEEDS_RESV:
1823                 ret = region_chg(resv, idx, idx + 1);
1824                 break;
1825         case VMA_COMMIT_RESV:
1826                 ret = region_add(resv, idx, idx + 1);
1827                 break;
1828         case VMA_END_RESV:
1829                 region_abort(resv, idx, idx + 1);
1830                 ret = 0;
1831                 break;
1832         default:
1833                 BUG();
1834         }
1835
1836         if (vma->vm_flags & VM_MAYSHARE)
1837                 return ret;
1838         else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1839                 /*
1840                  * In most cases, reserves always exist for private mappings.
1841                  * However, a file associated with mapping could have been
1842                  * hole punched or truncated after reserves were consumed.
1843                  * As subsequent fault on such a range will not use reserves.
1844                  * Subtle - The reserve map for private mappings has the
1845                  * opposite meaning than that of shared mappings.  If NO
1846                  * entry is in the reserve map, it means a reservation exists.
1847                  * If an entry exists in the reserve map, it means the
1848                  * reservation has already been consumed.  As a result, the
1849                  * return value of this routine is the opposite of the
1850                  * value returned from reserve map manipulation routines above.
1851                  */
1852                 if (ret)
1853                         return 0;
1854                 else
1855                         return 1;
1856         }
1857         else
1858                 return ret < 0 ? ret : 0;
1859 }
1860
1861 static long vma_needs_reservation(struct hstate *h,
1862                         struct vm_area_struct *vma, unsigned long addr)
1863 {
1864         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1865 }
1866
1867 static long vma_commit_reservation(struct hstate *h,
1868                         struct vm_area_struct *vma, unsigned long addr)
1869 {
1870         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1871 }
1872
1873 static void vma_end_reservation(struct hstate *h,
1874                         struct vm_area_struct *vma, unsigned long addr)
1875 {
1876         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1877 }
1878
1879 struct page *alloc_huge_page(struct vm_area_struct *vma,
1880                                     unsigned long addr, int avoid_reserve)
1881 {
1882         struct hugepage_subpool *spool = subpool_vma(vma);
1883         struct hstate *h = hstate_vma(vma);
1884         struct page *page;
1885         long map_chg, map_commit;
1886         long gbl_chg;
1887         int ret, idx;
1888         struct hugetlb_cgroup *h_cg;
1889
1890         idx = hstate_index(h);
1891         /*
1892          * Examine the region/reserve map to determine if the process
1893          * has a reservation for the page to be allocated.  A return
1894          * code of zero indicates a reservation exists (no change).
1895          */
1896         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1897         if (map_chg < 0)
1898                 return ERR_PTR(-ENOMEM);
1899
1900         /*
1901          * Processes that did not create the mapping will have no
1902          * reserves as indicated by the region/reserve map. Check
1903          * that the allocation will not exceed the subpool limit.
1904          * Allocations for MAP_NORESERVE mappings also need to be
1905          * checked against any subpool limit.
1906          */
1907         if (map_chg || avoid_reserve) {
1908                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
1909                 if (gbl_chg < 0) {
1910                         vma_end_reservation(h, vma, addr);
1911                         return ERR_PTR(-ENOSPC);
1912                 }
1913
1914                 /*
1915                  * Even though there was no reservation in the region/reserve
1916                  * map, there could be reservations associated with the
1917                  * subpool that can be used.  This would be indicated if the
1918                  * return value of hugepage_subpool_get_pages() is zero.
1919                  * However, if avoid_reserve is specified we still avoid even
1920                  * the subpool reservations.
1921                  */
1922                 if (avoid_reserve)
1923                         gbl_chg = 1;
1924         }
1925
1926         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1927         if (ret)
1928                 goto out_subpool_put;
1929
1930         spin_lock(&hugetlb_lock);
1931         /*
1932          * glb_chg is passed to indicate whether or not a page must be taken
1933          * from the global free pool (global change).  gbl_chg == 0 indicates
1934          * a reservation exists for the allocation.
1935          */
1936         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1937         if (!page) {
1938                 spin_unlock(&hugetlb_lock);
1939                 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1940                 if (!page)
1941                         goto out_uncharge_cgroup;
1942                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1943                         SetPagePrivate(page);
1944                         h->resv_huge_pages--;
1945                 }
1946                 spin_lock(&hugetlb_lock);
1947                 list_move(&page->lru, &h->hugepage_activelist);
1948                 /* Fall through */
1949         }
1950         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1951         spin_unlock(&hugetlb_lock);
1952
1953         set_page_private(page, (unsigned long)spool);
1954
1955         map_commit = vma_commit_reservation(h, vma, addr);
1956         if (unlikely(map_chg > map_commit)) {
1957                 /*
1958                  * The page was added to the reservation map between
1959                  * vma_needs_reservation and vma_commit_reservation.
1960                  * This indicates a race with hugetlb_reserve_pages.
1961                  * Adjust for the subpool count incremented above AND
1962                  * in hugetlb_reserve_pages for the same page.  Also,
1963                  * the reservation count added in hugetlb_reserve_pages
1964                  * no longer applies.
1965                  */
1966                 long rsv_adjust;
1967
1968                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1969                 hugetlb_acct_memory(h, -rsv_adjust);
1970         }
1971         return page;
1972
1973 out_uncharge_cgroup:
1974         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1975 out_subpool_put:
1976         if (map_chg || avoid_reserve)
1977                 hugepage_subpool_put_pages(spool, 1);
1978         vma_end_reservation(h, vma, addr);
1979         return ERR_PTR(-ENOSPC);
1980 }
1981
1982 /*
1983  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1984  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1985  * where no ERR_VALUE is expected to be returned.
1986  */
1987 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1988                                 unsigned long addr, int avoid_reserve)
1989 {
1990         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1991         if (IS_ERR(page))
1992                 page = NULL;
1993         return page;
1994 }
1995
1996 int __weak alloc_bootmem_huge_page(struct hstate *h)
1997 {
1998         struct huge_bootmem_page *m;
1999         int nr_nodes, node;
2000
2001         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2002                 void *addr;
2003
2004                 addr = memblock_virt_alloc_try_nid_nopanic(
2005                                 huge_page_size(h), huge_page_size(h),
2006                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2007                 if (addr) {
2008                         /*
2009                          * Use the beginning of the huge page to store the
2010                          * huge_bootmem_page struct (until gather_bootmem
2011                          * puts them into the mem_map).
2012                          */
2013                         m = addr;
2014                         goto found;
2015                 }
2016         }
2017         return 0;
2018
2019 found:
2020         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2021         /* Put them into a private list first because mem_map is not up yet */
2022         list_add(&m->list, &huge_boot_pages);
2023         m->hstate = h;
2024         return 1;
2025 }
2026
2027 static void __init prep_compound_huge_page(struct page *page,
2028                 unsigned int order)
2029 {
2030         if (unlikely(order > (MAX_ORDER - 1)))
2031                 prep_compound_gigantic_page(page, order);
2032         else
2033                 prep_compound_page(page, order);
2034 }
2035
2036 /* Put bootmem huge pages into the standard lists after mem_map is up */
2037 static void __init gather_bootmem_prealloc(void)
2038 {
2039         struct huge_bootmem_page *m;
2040
2041         list_for_each_entry(m, &huge_boot_pages, list) {
2042                 struct hstate *h = m->hstate;
2043                 struct page *page;
2044
2045 #ifdef CONFIG_HIGHMEM
2046                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2047                 memblock_free_late(__pa(m),
2048                                    sizeof(struct huge_bootmem_page));
2049 #else
2050                 page = virt_to_page(m);
2051 #endif
2052                 WARN_ON(page_count(page) != 1);
2053                 prep_compound_huge_page(page, h->order);
2054                 WARN_ON(PageReserved(page));
2055                 prep_new_huge_page(h, page, page_to_nid(page));
2056                 /*
2057                  * If we had gigantic hugepages allocated at boot time, we need
2058                  * to restore the 'stolen' pages to totalram_pages in order to
2059                  * fix confusing memory reports from free(1) and another
2060                  * side-effects, like CommitLimit going negative.
2061                  */
2062                 if (hstate_is_gigantic(h))
2063                         adjust_managed_page_count(page, 1 << h->order);
2064         }
2065 }
2066
2067 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2068 {
2069         unsigned long i;
2070
2071         for (i = 0; i < h->max_huge_pages; ++i) {
2072                 if (hstate_is_gigantic(h)) {
2073                         if (!alloc_bootmem_huge_page(h))
2074                                 break;
2075                 } else if (!alloc_fresh_huge_page(h,
2076                                          &node_states[N_MEMORY]))
2077                         break;
2078         }
2079         h->max_huge_pages = i;
2080 }
2081
2082 static void __init hugetlb_init_hstates(void)
2083 {
2084         struct hstate *h;
2085
2086         for_each_hstate(h) {
2087                 if (minimum_order > huge_page_order(h))
2088                         minimum_order = huge_page_order(h);
2089
2090                 /* oversize hugepages were init'ed in early boot */
2091                 if (!hstate_is_gigantic(h))
2092                         hugetlb_hstate_alloc_pages(h);
2093         }
2094         VM_BUG_ON(minimum_order == UINT_MAX);
2095 }
2096
2097 static char * __init memfmt(char *buf, unsigned long n)
2098 {
2099         if (n >= (1UL << 30))
2100                 sprintf(buf, "%lu GB", n >> 30);
2101         else if (n >= (1UL << 20))
2102                 sprintf(buf, "%lu MB", n >> 20);
2103         else
2104                 sprintf(buf, "%lu KB", n >> 10);
2105         return buf;
2106 }
2107
2108 static void __init report_hugepages(void)
2109 {
2110         struct hstate *h;
2111
2112         for_each_hstate(h) {
2113                 char buf[32];
2114                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2115                         memfmt(buf, huge_page_size(h)),
2116                         h->free_huge_pages);
2117         }
2118 }
2119
2120 #ifdef CONFIG_HIGHMEM
2121 static void try_to_free_low(struct hstate *h, unsigned long count,
2122                                                 nodemask_t *nodes_allowed)
2123 {
2124         int i;
2125
2126         if (hstate_is_gigantic(h))
2127                 return;
2128
2129         for_each_node_mask(i, *nodes_allowed) {
2130                 struct page *page, *next;
2131                 struct list_head *freel = &h->hugepage_freelists[i];
2132                 list_for_each_entry_safe(page, next, freel, lru) {
2133                         if (count >= h->nr_huge_pages)
2134                                 return;
2135                         if (PageHighMem(page))
2136                                 continue;
2137                         list_del(&page->lru);
2138                         update_and_free_page(h, page);
2139                         h->free_huge_pages--;
2140                         h->free_huge_pages_node[page_to_nid(page)]--;
2141                 }
2142         }
2143 }
2144 #else
2145 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2146                                                 nodemask_t *nodes_allowed)
2147 {
2148 }
2149 #endif
2150
2151 /*
2152  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2153  * balanced by operating on them in a round-robin fashion.
2154  * Returns 1 if an adjustment was made.
2155  */
2156 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2157                                 int delta)
2158 {
2159         int nr_nodes, node;
2160
2161         VM_BUG_ON(delta != -1 && delta != 1);
2162
2163         if (delta < 0) {
2164                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2165                         if (h->surplus_huge_pages_node[node])
2166                                 goto found;
2167                 }
2168         } else {
2169                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2170                         if (h->surplus_huge_pages_node[node] <
2171                                         h->nr_huge_pages_node[node])
2172                                 goto found;
2173                 }
2174         }
2175         return 0;
2176
2177 found:
2178         h->surplus_huge_pages += delta;
2179         h->surplus_huge_pages_node[node] += delta;
2180         return 1;
2181 }
2182
2183 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2184 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2185                                                 nodemask_t *nodes_allowed)
2186 {
2187         unsigned long min_count, ret;
2188
2189         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2190                 return h->max_huge_pages;
2191
2192         /*
2193          * Increase the pool size
2194          * First take pages out of surplus state.  Then make up the
2195          * remaining difference by allocating fresh huge pages.
2196          *
2197          * We might race with __alloc_buddy_huge_page() here and be unable
2198          * to convert a surplus huge page to a normal huge page. That is
2199          * not critical, though, it just means the overall size of the
2200          * pool might be one hugepage larger than it needs to be, but
2201          * within all the constraints specified by the sysctls.
2202          */
2203         spin_lock(&hugetlb_lock);
2204         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2205                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2206                         break;
2207         }
2208
2209         while (count > persistent_huge_pages(h)) {
2210                 /*
2211                  * If this allocation races such that we no longer need the
2212                  * page, free_huge_page will handle it by freeing the page
2213                  * and reducing the surplus.
2214                  */
2215                 spin_unlock(&hugetlb_lock);
2216                 if (hstate_is_gigantic(h))
2217                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2218                 else
2219                         ret = alloc_fresh_huge_page(h, nodes_allowed);
2220                 spin_lock(&hugetlb_lock);
2221                 if (!ret)
2222                         goto out;
2223
2224                 /* Bail for signals. Probably ctrl-c from user */
2225                 if (signal_pending(current))
2226                         goto out;
2227         }
2228
2229         /*
2230          * Decrease the pool size
2231          * First return free pages to the buddy allocator (being careful
2232          * to keep enough around to satisfy reservations).  Then place
2233          * pages into surplus state as needed so the pool will shrink
2234          * to the desired size as pages become free.
2235          *
2236          * By placing pages into the surplus state independent of the
2237          * overcommit value, we are allowing the surplus pool size to
2238          * exceed overcommit. There are few sane options here. Since
2239          * __alloc_buddy_huge_page() is checking the global counter,
2240          * though, we'll note that we're not allowed to exceed surplus
2241          * and won't grow the pool anywhere else. Not until one of the
2242          * sysctls are changed, or the surplus pages go out of use.
2243          */
2244         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2245         min_count = max(count, min_count);
2246         try_to_free_low(h, min_count, nodes_allowed);
2247         while (min_count < persistent_huge_pages(h)) {
2248                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2249                         break;
2250                 cond_resched_lock(&hugetlb_lock);
2251         }
2252         while (count < persistent_huge_pages(h)) {
2253                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2254                         break;
2255         }
2256 out:
2257         ret = persistent_huge_pages(h);
2258         spin_unlock(&hugetlb_lock);
2259         return ret;
2260 }
2261
2262 #define HSTATE_ATTR_RO(_name) \
2263         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2264
2265 #define HSTATE_ATTR(_name) \
2266         static struct kobj_attribute _name##_attr = \
2267                 __ATTR(_name, 0644, _name##_show, _name##_store)
2268
2269 static struct kobject *hugepages_kobj;
2270 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2271
2272 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2273
2274 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2275 {
2276         int i;
2277
2278         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2279                 if (hstate_kobjs[i] == kobj) {
2280                         if (nidp)
2281                                 *nidp = NUMA_NO_NODE;
2282                         return &hstates[i];
2283                 }
2284
2285         return kobj_to_node_hstate(kobj, nidp);
2286 }
2287
2288 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2289                                         struct kobj_attribute *attr, char *buf)
2290 {
2291         struct hstate *h;
2292         unsigned long nr_huge_pages;
2293         int nid;
2294
2295         h = kobj_to_hstate(kobj, &nid);
2296         if (nid == NUMA_NO_NODE)
2297                 nr_huge_pages = h->nr_huge_pages;
2298         else
2299                 nr_huge_pages = h->nr_huge_pages_node[nid];
2300
2301         return sprintf(buf, "%lu\n", nr_huge_pages);
2302 }
2303
2304 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2305                                            struct hstate *h, int nid,
2306                                            unsigned long count, size_t len)
2307 {
2308         int err;
2309         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2310
2311         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2312                 err = -EINVAL;
2313                 goto out;
2314         }
2315
2316         if (nid == NUMA_NO_NODE) {
2317                 /*
2318                  * global hstate attribute
2319                  */
2320                 if (!(obey_mempolicy &&
2321                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2322                         NODEMASK_FREE(nodes_allowed);
2323                         nodes_allowed = &node_states[N_MEMORY];
2324                 }
2325         } else if (nodes_allowed) {
2326                 /*
2327                  * per node hstate attribute: adjust count to global,
2328                  * but restrict alloc/free to the specified node.
2329                  */
2330                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2331                 init_nodemask_of_node(nodes_allowed, nid);
2332         } else
2333                 nodes_allowed = &node_states[N_MEMORY];
2334
2335         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2336
2337         if (nodes_allowed != &node_states[N_MEMORY])
2338                 NODEMASK_FREE(nodes_allowed);
2339
2340         return len;
2341 out:
2342         NODEMASK_FREE(nodes_allowed);
2343         return err;
2344 }
2345
2346 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2347                                          struct kobject *kobj, const char *buf,
2348                                          size_t len)
2349 {
2350         struct hstate *h;
2351         unsigned long count;
2352         int nid;
2353         int err;
2354
2355         err = kstrtoul(buf, 10, &count);
2356         if (err)
2357                 return err;
2358
2359         h = kobj_to_hstate(kobj, &nid);
2360         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2361 }
2362
2363 static ssize_t nr_hugepages_show(struct kobject *kobj,
2364                                        struct kobj_attribute *attr, char *buf)
2365 {
2366         return nr_hugepages_show_common(kobj, attr, buf);
2367 }
2368
2369 static ssize_t nr_hugepages_store(struct kobject *kobj,
2370                struct kobj_attribute *attr, const char *buf, size_t len)
2371 {
2372         return nr_hugepages_store_common(false, kobj, buf, len);
2373 }
2374 HSTATE_ATTR(nr_hugepages);
2375
2376 #ifdef CONFIG_NUMA
2377
2378 /*
2379  * hstate attribute for optionally mempolicy-based constraint on persistent
2380  * huge page alloc/free.
2381  */
2382 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2383                                        struct kobj_attribute *attr, char *buf)
2384 {
2385         return nr_hugepages_show_common(kobj, attr, buf);
2386 }
2387
2388 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2389                struct kobj_attribute *attr, const char *buf, size_t len)
2390 {
2391         return nr_hugepages_store_common(true, kobj, buf, len);
2392 }
2393 HSTATE_ATTR(nr_hugepages_mempolicy);
2394 #endif
2395
2396
2397 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2398                                         struct kobj_attribute *attr, char *buf)
2399 {
2400         struct hstate *h = kobj_to_hstate(kobj, NULL);
2401         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2402 }
2403
2404 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2405                 struct kobj_attribute *attr, const char *buf, size_t count)
2406 {
2407         int err;
2408         unsigned long input;
2409         struct hstate *h = kobj_to_hstate(kobj, NULL);
2410
2411         if (hstate_is_gigantic(h))
2412                 return -EINVAL;
2413
2414         err = kstrtoul(buf, 10, &input);
2415         if (err)
2416                 return err;
2417
2418         spin_lock(&hugetlb_lock);
2419         h->nr_overcommit_huge_pages = input;
2420         spin_unlock(&hugetlb_lock);
2421
2422         return count;
2423 }
2424 HSTATE_ATTR(nr_overcommit_hugepages);
2425
2426 static ssize_t free_hugepages_show(struct kobject *kobj,
2427                                         struct kobj_attribute *attr, char *buf)
2428 {
2429         struct hstate *h;
2430         unsigned long free_huge_pages;
2431         int nid;
2432
2433         h = kobj_to_hstate(kobj, &nid);
2434         if (nid == NUMA_NO_NODE)
2435                 free_huge_pages = h->free_huge_pages;
2436         else
2437                 free_huge_pages = h->free_huge_pages_node[nid];
2438
2439         return sprintf(buf, "%lu\n", free_huge_pages);
2440 }
2441 HSTATE_ATTR_RO(free_hugepages);
2442
2443 static ssize_t resv_hugepages_show(struct kobject *kobj,
2444                                         struct kobj_attribute *attr, char *buf)
2445 {
2446         struct hstate *h = kobj_to_hstate(kobj, NULL);
2447         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2448 }
2449 HSTATE_ATTR_RO(resv_hugepages);
2450
2451 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2452                                         struct kobj_attribute *attr, char *buf)
2453 {
2454         struct hstate *h;
2455         unsigned long surplus_huge_pages;
2456         int nid;
2457
2458         h = kobj_to_hstate(kobj, &nid);
2459         if (nid == NUMA_NO_NODE)
2460                 surplus_huge_pages = h->surplus_huge_pages;
2461         else
2462                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2463
2464         return sprintf(buf, "%lu\n", surplus_huge_pages);
2465 }
2466 HSTATE_ATTR_RO(surplus_hugepages);
2467
2468 static struct attribute *hstate_attrs[] = {
2469         &nr_hugepages_attr.attr,
2470         &nr_overcommit_hugepages_attr.attr,
2471         &free_hugepages_attr.attr,
2472         &resv_hugepages_attr.attr,
2473         &surplus_hugepages_attr.attr,
2474 #ifdef CONFIG_NUMA
2475         &nr_hugepages_mempolicy_attr.attr,
2476 #endif
2477         NULL,
2478 };
2479
2480 static struct attribute_group hstate_attr_group = {
2481         .attrs = hstate_attrs,
2482 };
2483
2484 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2485                                     struct kobject **hstate_kobjs,
2486                                     struct attribute_group *hstate_attr_group)
2487 {
2488         int retval;
2489         int hi = hstate_index(h);
2490
2491         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2492         if (!hstate_kobjs[hi])
2493                 return -ENOMEM;
2494
2495         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2496         if (retval)
2497                 kobject_put(hstate_kobjs[hi]);
2498
2499         return retval;
2500 }
2501
2502 static void __init hugetlb_sysfs_init(void)
2503 {
2504         struct hstate *h;
2505         int err;
2506
2507         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2508         if (!hugepages_kobj)
2509                 return;
2510
2511         for_each_hstate(h) {
2512                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2513                                          hstate_kobjs, &hstate_attr_group);
2514                 if (err)
2515                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2516         }
2517 }
2518
2519 #ifdef CONFIG_NUMA
2520
2521 /*
2522  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2523  * with node devices in node_devices[] using a parallel array.  The array
2524  * index of a node device or _hstate == node id.
2525  * This is here to avoid any static dependency of the node device driver, in
2526  * the base kernel, on the hugetlb module.
2527  */
2528 struct node_hstate {
2529         struct kobject          *hugepages_kobj;
2530         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2531 };
2532 static struct node_hstate node_hstates[MAX_NUMNODES];
2533
2534 /*
2535  * A subset of global hstate attributes for node devices
2536  */
2537 static struct attribute *per_node_hstate_attrs[] = {
2538         &nr_hugepages_attr.attr,
2539         &free_hugepages_attr.attr,
2540         &surplus_hugepages_attr.attr,
2541         NULL,
2542 };
2543
2544 static struct attribute_group per_node_hstate_attr_group = {
2545         .attrs = per_node_hstate_attrs,
2546 };
2547
2548 /*
2549  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2550  * Returns node id via non-NULL nidp.
2551  */
2552 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2553 {
2554         int nid;
2555
2556         for (nid = 0; nid < nr_node_ids; nid++) {
2557                 struct node_hstate *nhs = &node_hstates[nid];
2558                 int i;
2559                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2560                         if (nhs->hstate_kobjs[i] == kobj) {
2561                                 if (nidp)
2562                                         *nidp = nid;
2563                                 return &hstates[i];
2564                         }
2565         }
2566
2567         BUG();
2568         return NULL;
2569 }
2570
2571 /*
2572  * Unregister hstate attributes from a single node device.
2573  * No-op if no hstate attributes attached.
2574  */
2575 static void hugetlb_unregister_node(struct node *node)
2576 {
2577         struct hstate *h;
2578         struct node_hstate *nhs = &node_hstates[node->dev.id];
2579
2580         if (!nhs->hugepages_kobj)
2581                 return;         /* no hstate attributes */
2582
2583         for_each_hstate(h) {
2584                 int idx = hstate_index(h);
2585                 if (nhs->hstate_kobjs[idx]) {
2586                         kobject_put(nhs->hstate_kobjs[idx]);
2587                         nhs->hstate_kobjs[idx] = NULL;
2588                 }
2589         }
2590
2591         kobject_put(nhs->hugepages_kobj);
2592         nhs->hugepages_kobj = NULL;
2593 }
2594
2595
2596 /*
2597  * Register hstate attributes for a single node device.
2598  * No-op if attributes already registered.
2599  */
2600 static void hugetlb_register_node(struct node *node)
2601 {
2602         struct hstate *h;
2603         struct node_hstate *nhs = &node_hstates[node->dev.id];
2604         int err;
2605
2606         if (nhs->hugepages_kobj)
2607                 return;         /* already allocated */
2608
2609         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2610                                                         &node->dev.kobj);
2611         if (!nhs->hugepages_kobj)
2612                 return;
2613
2614         for_each_hstate(h) {
2615                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2616                                                 nhs->hstate_kobjs,
2617                                                 &per_node_hstate_attr_group);
2618                 if (err) {
2619                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2620                                 h->name, node->dev.id);
2621                         hugetlb_unregister_node(node);
2622                         break;
2623                 }
2624         }
2625 }
2626
2627 /*
2628  * hugetlb init time:  register hstate attributes for all registered node
2629  * devices of nodes that have memory.  All on-line nodes should have
2630  * registered their associated device by this time.
2631  */
2632 static void __init hugetlb_register_all_nodes(void)
2633 {
2634         int nid;
2635
2636         for_each_node_state(nid, N_MEMORY) {
2637                 struct node *node = node_devices[nid];
2638                 if (node->dev.id == nid)
2639                         hugetlb_register_node(node);
2640         }
2641
2642         /*
2643          * Let the node device driver know we're here so it can
2644          * [un]register hstate attributes on node hotplug.
2645          */
2646         register_hugetlbfs_with_node(hugetlb_register_node,
2647                                      hugetlb_unregister_node);
2648 }
2649 #else   /* !CONFIG_NUMA */
2650
2651 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2652 {
2653         BUG();
2654         if (nidp)
2655                 *nidp = -1;
2656         return NULL;
2657 }
2658
2659 static void hugetlb_register_all_nodes(void) { }
2660
2661 #endif
2662
2663 static int __init hugetlb_init(void)
2664 {
2665         int i;
2666
2667         if (!hugepages_supported())
2668                 return 0;
2669
2670         if (!size_to_hstate(default_hstate_size)) {
2671                 default_hstate_size = HPAGE_SIZE;
2672                 if (!size_to_hstate(default_hstate_size))
2673                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2674         }
2675         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2676         if (default_hstate_max_huge_pages) {
2677                 if (!default_hstate.max_huge_pages)
2678                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2679         }
2680
2681         hugetlb_init_hstates();
2682         gather_bootmem_prealloc();
2683         report_hugepages();
2684
2685         hugetlb_sysfs_init();
2686         hugetlb_register_all_nodes();
2687         hugetlb_cgroup_file_init();
2688
2689 #ifdef CONFIG_SMP
2690         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2691 #else
2692         num_fault_mutexes = 1;
2693 #endif
2694         hugetlb_fault_mutex_table =
2695                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2696         BUG_ON(!hugetlb_fault_mutex_table);
2697
2698         for (i = 0; i < num_fault_mutexes; i++)
2699                 mutex_init(&hugetlb_fault_mutex_table[i]);
2700         return 0;
2701 }
2702 subsys_initcall(hugetlb_init);
2703
2704 /* Should be called on processing a hugepagesz=... option */
2705 void __init hugetlb_bad_size(void)
2706 {
2707         parsed_valid_hugepagesz = false;
2708 }
2709
2710 void __init hugetlb_add_hstate(unsigned int order)
2711 {
2712         struct hstate *h;
2713         unsigned long i;
2714
2715         if (size_to_hstate(PAGE_SIZE << order)) {
2716                 pr_warn("hugepagesz= specified twice, ignoring\n");
2717                 return;
2718         }
2719         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2720         BUG_ON(order == 0);
2721         h = &hstates[hugetlb_max_hstate++];
2722         h->order = order;
2723         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2724         h->nr_huge_pages = 0;
2725         h->free_huge_pages = 0;
2726         for (i = 0; i < MAX_NUMNODES; ++i)
2727                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2728         INIT_LIST_HEAD(&h->hugepage_activelist);
2729         h->next_nid_to_alloc = first_memory_node;
2730         h->next_nid_to_free = first_memory_node;
2731         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2732                                         huge_page_size(h)/1024);
2733
2734         parsed_hstate = h;
2735 }
2736
2737 static int __init hugetlb_nrpages_setup(char *s)
2738 {
2739         unsigned long *mhp;
2740         static unsigned long *last_mhp;
2741
2742         if (!parsed_valid_hugepagesz) {
2743                 pr_warn("hugepages = %s preceded by "
2744                         "an unsupported hugepagesz, ignoring\n", s);
2745                 parsed_valid_hugepagesz = true;
2746                 return 1;
2747         }
2748         /*
2749          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2750          * so this hugepages= parameter goes to the "default hstate".
2751          */
2752         else if (!hugetlb_max_hstate)
2753                 mhp = &default_hstate_max_huge_pages;
2754         else
2755                 mhp = &parsed_hstate->max_huge_pages;
2756
2757         if (mhp == last_mhp) {
2758                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2759                 return 1;
2760         }
2761
2762         if (sscanf(s, "%lu", mhp) <= 0)
2763                 *mhp = 0;
2764
2765         /*
2766          * Global state is always initialized later in hugetlb_init.
2767          * But we need to allocate >= MAX_ORDER hstates here early to still
2768          * use the bootmem allocator.
2769          */
2770         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2771                 hugetlb_hstate_alloc_pages(parsed_hstate);
2772
2773         last_mhp = mhp;
2774
2775         return 1;
2776 }
2777 __setup("hugepages=", hugetlb_nrpages_setup);
2778
2779 static int __init hugetlb_default_setup(char *s)
2780 {
2781         default_hstate_size = memparse(s, &s);
2782         return 1;
2783 }
2784 __setup("default_hugepagesz=", hugetlb_default_setup);
2785
2786 static unsigned int cpuset_mems_nr(unsigned int *array)
2787 {
2788         int node;
2789         unsigned int nr = 0;
2790
2791         for_each_node_mask(node, cpuset_current_mems_allowed)
2792                 nr += array[node];
2793
2794         return nr;
2795 }
2796
2797 #ifdef CONFIG_SYSCTL
2798 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2799                          struct ctl_table *table, int write,
2800                          void __user *buffer, size_t *length, loff_t *ppos)
2801 {
2802         struct hstate *h = &default_hstate;
2803         unsigned long tmp = h->max_huge_pages;
2804         int ret;
2805
2806         if (!hugepages_supported())
2807                 return -EOPNOTSUPP;
2808
2809         table->data = &tmp;
2810         table->maxlen = sizeof(unsigned long);
2811         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2812         if (ret)
2813                 goto out;
2814
2815         if (write)
2816                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2817                                                   NUMA_NO_NODE, tmp, *length);
2818 out:
2819         return ret;
2820 }
2821
2822 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2823                           void __user *buffer, size_t *length, loff_t *ppos)
2824 {
2825
2826         return hugetlb_sysctl_handler_common(false, table, write,
2827                                                         buffer, length, ppos);
2828 }
2829
2830 #ifdef CONFIG_NUMA
2831 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2832                           void __user *buffer, size_t *length, loff_t *ppos)
2833 {
2834         return hugetlb_sysctl_handler_common(true, table, write,
2835                                                         buffer, length, ppos);
2836 }
2837 #endif /* CONFIG_NUMA */
2838
2839 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2840                         void __user *buffer,
2841                         size_t *length, loff_t *ppos)
2842 {
2843         struct hstate *h = &default_hstate;
2844         unsigned long tmp;
2845         int ret;
2846
2847         if (!hugepages_supported())
2848                 return -EOPNOTSUPP;
2849
2850         tmp = h->nr_overcommit_huge_pages;
2851
2852         if (write && hstate_is_gigantic(h))
2853                 return -EINVAL;
2854
2855         table->data = &tmp;
2856         table->maxlen = sizeof(unsigned long);
2857         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2858         if (ret)
2859                 goto out;
2860
2861         if (write) {
2862                 spin_lock(&hugetlb_lock);
2863                 h->nr_overcommit_huge_pages = tmp;
2864                 spin_unlock(&hugetlb_lock);
2865         }
2866 out:
2867         return ret;
2868 }
2869
2870 #endif /* CONFIG_SYSCTL */
2871
2872 void hugetlb_report_meminfo(struct seq_file *m)
2873 {
2874         struct hstate *h = &default_hstate;
2875         if (!hugepages_supported())
2876                 return;
2877         seq_printf(m,
2878                         "HugePages_Total:   %5lu\n"
2879                         "HugePages_Free:    %5lu\n"
2880                         "HugePages_Rsvd:    %5lu\n"
2881                         "HugePages_Surp:    %5lu\n"
2882                         "Hugepagesize:   %8lu kB\n",
2883                         h->nr_huge_pages,
2884                         h->free_huge_pages,
2885                         h->resv_huge_pages,
2886                         h->surplus_huge_pages,
2887                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2888 }
2889
2890 int hugetlb_report_node_meminfo(int nid, char *buf)
2891 {
2892         struct hstate *h = &default_hstate;
2893         if (!hugepages_supported())
2894                 return 0;
2895         return sprintf(buf,
2896                 "Node %d HugePages_Total: %5u\n"
2897                 "Node %d HugePages_Free:  %5u\n"
2898                 "Node %d HugePages_Surp:  %5u\n",
2899                 nid, h->nr_huge_pages_node[nid],
2900                 nid, h->free_huge_pages_node[nid],
2901                 nid, h->surplus_huge_pages_node[nid]);
2902 }
2903
2904 void hugetlb_show_meminfo(void)
2905 {
2906         struct hstate *h;
2907         int nid;
2908
2909         if (!hugepages_supported())
2910                 return;
2911
2912         for_each_node_state(nid, N_MEMORY)
2913                 for_each_hstate(h)
2914                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2915                                 nid,
2916                                 h->nr_huge_pages_node[nid],
2917                                 h->free_huge_pages_node[nid],
2918                                 h->surplus_huge_pages_node[nid],
2919                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2920 }
2921
2922 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2923 {
2924         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2925                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2926 }
2927
2928 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2929 unsigned long hugetlb_total_pages(void)
2930 {
2931         struct hstate *h;
2932         unsigned long nr_total_pages = 0;
2933
2934         for_each_hstate(h)
2935                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2936         return nr_total_pages;
2937 }
2938
2939 static int hugetlb_acct_memory(struct hstate *h, long delta)
2940 {
2941         int ret = -ENOMEM;
2942
2943         spin_lock(&hugetlb_lock);
2944         /*
2945          * When cpuset is configured, it breaks the strict hugetlb page
2946          * reservation as the accounting is done on a global variable. Such
2947          * reservation is completely rubbish in the presence of cpuset because
2948          * the reservation is not checked against page availability for the
2949          * current cpuset. Application can still potentially OOM'ed by kernel
2950          * with lack of free htlb page in cpuset that the task is in.
2951          * Attempt to enforce strict accounting with cpuset is almost
2952          * impossible (or too ugly) because cpuset is too fluid that
2953          * task or memory node can be dynamically moved between cpusets.
2954          *
2955          * The change of semantics for shared hugetlb mapping with cpuset is
2956          * undesirable. However, in order to preserve some of the semantics,
2957          * we fall back to check against current free page availability as
2958          * a best attempt and hopefully to minimize the impact of changing
2959          * semantics that cpuset has.
2960          */
2961         if (delta > 0) {
2962                 if (gather_surplus_pages(h, delta) < 0)
2963                         goto out;
2964
2965                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2966                         return_unused_surplus_pages(h, delta);
2967                         goto out;
2968                 }
2969         }
2970
2971         ret = 0;
2972         if (delta < 0)
2973                 return_unused_surplus_pages(h, (unsigned long) -delta);
2974
2975 out:
2976         spin_unlock(&hugetlb_lock);
2977         return ret;
2978 }
2979
2980 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2981 {
2982         struct resv_map *resv = vma_resv_map(vma);
2983
2984         /*
2985          * This new VMA should share its siblings reservation map if present.
2986          * The VMA will only ever have a valid reservation map pointer where
2987          * it is being copied for another still existing VMA.  As that VMA
2988          * has a reference to the reservation map it cannot disappear until
2989          * after this open call completes.  It is therefore safe to take a
2990          * new reference here without additional locking.
2991          */
2992         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2993                 kref_get(&resv->refs);
2994 }
2995
2996 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2997 {
2998         struct hstate *h = hstate_vma(vma);
2999         struct resv_map *resv = vma_resv_map(vma);
3000         struct hugepage_subpool *spool = subpool_vma(vma);
3001         unsigned long reserve, start, end;
3002         long gbl_reserve;
3003
3004         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3005                 return;
3006
3007         start = vma_hugecache_offset(h, vma, vma->vm_start);
3008         end = vma_hugecache_offset(h, vma, vma->vm_end);
3009
3010         reserve = (end - start) - region_count(resv, start, end);
3011
3012         kref_put(&resv->refs, resv_map_release);
3013
3014         if (reserve) {
3015                 /*
3016                  * Decrement reserve counts.  The global reserve count may be
3017                  * adjusted if the subpool has a minimum size.
3018                  */
3019                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3020                 hugetlb_acct_memory(h, -gbl_reserve);
3021         }
3022 }
3023
3024 /*
3025  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3026  * handle_mm_fault() to try to instantiate regular-sized pages in the
3027  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3028  * this far.
3029  */
3030 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3031 {
3032         BUG();
3033         return 0;
3034 }
3035
3036 const struct vm_operations_struct hugetlb_vm_ops = {
3037         .fault = hugetlb_vm_op_fault,
3038         .open = hugetlb_vm_op_open,
3039         .close = hugetlb_vm_op_close,
3040 };
3041
3042 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3043                                 int writable)
3044 {
3045         pte_t entry;
3046
3047         if (writable) {
3048                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3049                                          vma->vm_page_prot)));
3050         } else {
3051                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3052                                            vma->vm_page_prot));
3053         }
3054         entry = pte_mkyoung(entry);
3055         entry = pte_mkhuge(entry);
3056         entry = arch_make_huge_pte(entry, vma, page, writable);
3057
3058         return entry;
3059 }
3060
3061 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3062                                    unsigned long address, pte_t *ptep)
3063 {
3064         pte_t entry;
3065
3066         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3067         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3068                 update_mmu_cache(vma, address, ptep);
3069 }
3070
3071 static int is_hugetlb_entry_migration(pte_t pte)
3072 {
3073         swp_entry_t swp;
3074
3075         if (huge_pte_none(pte) || pte_present(pte))
3076                 return 0;
3077         swp = pte_to_swp_entry(pte);
3078         if (non_swap_entry(swp) && is_migration_entry(swp))
3079                 return 1;
3080         else
3081                 return 0;
3082 }
3083
3084 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3085 {
3086         swp_entry_t swp;
3087
3088         if (huge_pte_none(pte) || pte_present(pte))
3089                 return 0;
3090         swp = pte_to_swp_entry(pte);
3091         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3092                 return 1;
3093         else
3094                 return 0;
3095 }
3096
3097 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3098                             struct vm_area_struct *vma)
3099 {
3100         pte_t *src_pte, *dst_pte, entry;
3101         struct page *ptepage;
3102         unsigned long addr;
3103         int cow;
3104         struct hstate *h = hstate_vma(vma);
3105         unsigned long sz = huge_page_size(h);
3106         unsigned long mmun_start;       /* For mmu_notifiers */
3107         unsigned long mmun_end;         /* For mmu_notifiers */
3108         int ret = 0;
3109
3110         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3111
3112         mmun_start = vma->vm_start;
3113         mmun_end = vma->vm_end;
3114         if (cow)
3115                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3116
3117         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3118                 spinlock_t *src_ptl, *dst_ptl;
3119                 src_pte = huge_pte_offset(src, addr);
3120                 if (!src_pte)
3121                         continue;
3122                 dst_pte = huge_pte_alloc(dst, addr, sz);
3123                 if (!dst_pte) {
3124                         ret = -ENOMEM;
3125                         break;
3126                 }
3127
3128                 /* If the pagetables are shared don't copy or take references */
3129                 if (dst_pte == src_pte)
3130                         continue;
3131
3132                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3133                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3134                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3135                 entry = huge_ptep_get(src_pte);
3136                 if (huge_pte_none(entry)) { /* skip none entry */
3137                         ;
3138                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3139                                     is_hugetlb_entry_hwpoisoned(entry))) {
3140                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3141
3142                         if (is_write_migration_entry(swp_entry) && cow) {
3143                                 /*
3144                                  * COW mappings require pages in both
3145                                  * parent and child to be set to read.
3146                                  */
3147                                 make_migration_entry_read(&swp_entry);
3148                                 entry = swp_entry_to_pte(swp_entry);
3149                                 set_huge_pte_at(src, addr, src_pte, entry);
3150                         }
3151                         set_huge_pte_at(dst, addr, dst_pte, entry);
3152                 } else {
3153                         if (cow) {
3154                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3155                                 mmu_notifier_invalidate_range(src, mmun_start,
3156                                                                    mmun_end);
3157                         }
3158                         entry = huge_ptep_get(src_pte);
3159                         ptepage = pte_page(entry);
3160                         get_page(ptepage);
3161                         page_dup_rmap(ptepage, true);
3162                         set_huge_pte_at(dst, addr, dst_pte, entry);
3163                         hugetlb_count_add(pages_per_huge_page(h), dst);
3164                 }
3165                 spin_unlock(src_ptl);
3166                 spin_unlock(dst_ptl);
3167         }
3168
3169         if (cow)
3170                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3171
3172         return ret;
3173 }
3174
3175 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3176                             unsigned long start, unsigned long end,
3177                             struct page *ref_page)
3178 {
3179         int force_flush = 0;
3180         struct mm_struct *mm = vma->vm_mm;
3181         unsigned long address;
3182         pte_t *ptep;
3183         pte_t pte;
3184         spinlock_t *ptl;
3185         struct page *page;
3186         struct hstate *h = hstate_vma(vma);
3187         unsigned long sz = huge_page_size(h);
3188         const unsigned long mmun_start = start; /* For mmu_notifiers */
3189         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3190
3191         WARN_ON(!is_vm_hugetlb_page(vma));
3192         BUG_ON(start & ~huge_page_mask(h));
3193         BUG_ON(end & ~huge_page_mask(h));
3194
3195         tlb_start_vma(tlb, vma);
3196         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3197         address = start;
3198 again:
3199         for (; address < end; address += sz) {
3200                 ptep = huge_pte_offset(mm, address);
3201                 if (!ptep)
3202                         continue;
3203
3204                 ptl = huge_pte_lock(h, mm, ptep);
3205                 if (huge_pmd_unshare(mm, &address, ptep))
3206                         goto unlock;
3207
3208                 pte = huge_ptep_get(ptep);
3209                 if (huge_pte_none(pte))
3210                         goto unlock;
3211
3212                 /*
3213                  * Migrating hugepage or HWPoisoned hugepage is already
3214                  * unmapped and its refcount is dropped, so just clear pte here.
3215                  */
3216                 if (unlikely(!pte_present(pte))) {
3217                         huge_pte_clear(mm, address, ptep);
3218                         goto unlock;
3219                 }
3220
3221                 page = pte_page(pte);
3222                 /*
3223                  * If a reference page is supplied, it is because a specific
3224                  * page is being unmapped, not a range. Ensure the page we
3225                  * are about to unmap is the actual page of interest.
3226                  */
3227                 if (ref_page) {
3228                         if (page != ref_page)
3229                                 goto unlock;
3230
3231                         /*
3232                          * Mark the VMA as having unmapped its page so that
3233                          * future faults in this VMA will fail rather than
3234                          * looking like data was lost
3235                          */
3236                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3237                 }
3238
3239                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3240                 tlb_remove_tlb_entry(tlb, ptep, address);
3241                 if (huge_pte_dirty(pte))
3242                         set_page_dirty(page);
3243
3244                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3245                 page_remove_rmap(page, true);
3246                 force_flush = !__tlb_remove_page(tlb, page);
3247                 if (force_flush) {
3248                         address += sz;
3249                         spin_unlock(ptl);
3250                         break;
3251                 }
3252                 /* Bail out after unmapping reference page if supplied */
3253                 if (ref_page) {
3254                         spin_unlock(ptl);
3255                         break;
3256                 }
3257 unlock:
3258                 spin_unlock(ptl);
3259         }
3260         /*
3261          * mmu_gather ran out of room to batch pages, we break out of
3262          * the PTE lock to avoid doing the potential expensive TLB invalidate
3263          * and page-free while holding it.
3264          */
3265         if (force_flush) {
3266                 force_flush = 0;
3267                 tlb_flush_mmu(tlb);
3268                 if (address < end && !ref_page)
3269                         goto again;
3270         }
3271         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3272         tlb_end_vma(tlb, vma);
3273 }
3274
3275 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3276                           struct vm_area_struct *vma, unsigned long start,
3277                           unsigned long end, struct page *ref_page)
3278 {
3279         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3280
3281         /*
3282          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3283          * test will fail on a vma being torn down, and not grab a page table
3284          * on its way out.  We're lucky that the flag has such an appropriate
3285          * name, and can in fact be safely cleared here. We could clear it
3286          * before the __unmap_hugepage_range above, but all that's necessary
3287          * is to clear it before releasing the i_mmap_rwsem. This works
3288          * because in the context this is called, the VMA is about to be
3289          * destroyed and the i_mmap_rwsem is held.
3290          */
3291         vma->vm_flags &= ~VM_MAYSHARE;
3292 }
3293
3294 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3295                           unsigned long end, struct page *ref_page)
3296 {
3297         struct mm_struct *mm;
3298         struct mmu_gather tlb;
3299
3300         mm = vma->vm_mm;
3301
3302         tlb_gather_mmu(&tlb, mm, start, end);
3303         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3304         tlb_finish_mmu(&tlb, start, end);
3305 }
3306
3307 /*
3308  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3309  * mappping it owns the reserve page for. The intention is to unmap the page
3310  * from other VMAs and let the children be SIGKILLed if they are faulting the
3311  * same region.
3312  */
3313 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3314                               struct page *page, unsigned long address)
3315 {
3316         struct hstate *h = hstate_vma(vma);
3317         struct vm_area_struct *iter_vma;
3318         struct address_space *mapping;
3319         pgoff_t pgoff;
3320
3321         /*
3322          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3323          * from page cache lookup which is in HPAGE_SIZE units.
3324          */
3325         address = address & huge_page_mask(h);
3326         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3327                         vma->vm_pgoff;
3328         mapping = file_inode(vma->vm_file)->i_mapping;
3329
3330         /*
3331          * Take the mapping lock for the duration of the table walk. As
3332          * this mapping should be shared between all the VMAs,
3333          * __unmap_hugepage_range() is called as the lock is already held
3334          */
3335         i_mmap_lock_write(mapping);
3336         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3337                 /* Do not unmap the current VMA */
3338                 if (iter_vma == vma)
3339                         continue;
3340
3341                 /*
3342                  * Shared VMAs have their own reserves and do not affect
3343                  * MAP_PRIVATE accounting but it is possible that a shared
3344                  * VMA is using the same page so check and skip such VMAs.
3345                  */
3346                 if (iter_vma->vm_flags & VM_MAYSHARE)
3347                         continue;
3348
3349                 /*
3350                  * Unmap the page from other VMAs without their own reserves.
3351                  * They get marked to be SIGKILLed if they fault in these
3352                  * areas. This is because a future no-page fault on this VMA
3353                  * could insert a zeroed page instead of the data existing
3354                  * from the time of fork. This would look like data corruption
3355                  */
3356                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3357                         unmap_hugepage_range(iter_vma, address,
3358                                              address + huge_page_size(h), page);
3359         }
3360         i_mmap_unlock_write(mapping);
3361 }
3362
3363 /*
3364  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3365  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3366  * cannot race with other handlers or page migration.
3367  * Keep the pte_same checks anyway to make transition from the mutex easier.
3368  */
3369 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3370                         unsigned long address, pte_t *ptep, pte_t pte,
3371                         struct page *pagecache_page, spinlock_t *ptl)
3372 {
3373         struct hstate *h = hstate_vma(vma);
3374         struct page *old_page, *new_page;
3375         int ret = 0, outside_reserve = 0;
3376         unsigned long mmun_start;       /* For mmu_notifiers */
3377         unsigned long mmun_end;         /* For mmu_notifiers */
3378
3379         old_page = pte_page(pte);
3380
3381 retry_avoidcopy:
3382         /* If no-one else is actually using this page, avoid the copy
3383          * and just make the page writable */
3384         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3385                 page_move_anon_rmap(old_page, vma, address);
3386                 set_huge_ptep_writable(vma, address, ptep);
3387                 return 0;
3388         }
3389
3390         /*
3391          * If the process that created a MAP_PRIVATE mapping is about to
3392          * perform a COW due to a shared page count, attempt to satisfy
3393          * the allocation without using the existing reserves. The pagecache
3394          * page is used to determine if the reserve at this address was
3395          * consumed or not. If reserves were used, a partial faulted mapping
3396          * at the time of fork() could consume its reserves on COW instead
3397          * of the full address range.
3398          */
3399         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3400                         old_page != pagecache_page)
3401                 outside_reserve = 1;
3402
3403         get_page(old_page);
3404
3405         /*
3406          * Drop page table lock as buddy allocator may be called. It will
3407          * be acquired again before returning to the caller, as expected.
3408          */
3409         spin_unlock(ptl);
3410         new_page = alloc_huge_page(vma, address, outside_reserve);
3411
3412         if (IS_ERR(new_page)) {
3413                 /*
3414                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3415                  * it is due to references held by a child and an insufficient
3416                  * huge page pool. To guarantee the original mappers
3417                  * reliability, unmap the page from child processes. The child
3418                  * may get SIGKILLed if it later faults.
3419                  */
3420                 if (outside_reserve) {
3421                         put_page(old_page);
3422                         BUG_ON(huge_pte_none(pte));
3423                         unmap_ref_private(mm, vma, old_page, address);
3424                         BUG_ON(huge_pte_none(pte));
3425                         spin_lock(ptl);
3426                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3427                         if (likely(ptep &&
3428                                    pte_same(huge_ptep_get(ptep), pte)))
3429                                 goto retry_avoidcopy;
3430                         /*
3431                          * race occurs while re-acquiring page table
3432                          * lock, and our job is done.
3433                          */
3434                         return 0;
3435                 }
3436
3437                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3438                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3439                 goto out_release_old;
3440         }
3441
3442         /*
3443          * When the original hugepage is shared one, it does not have
3444          * anon_vma prepared.
3445          */
3446         if (unlikely(anon_vma_prepare(vma))) {
3447                 ret = VM_FAULT_OOM;
3448                 goto out_release_all;
3449         }
3450
3451         copy_user_huge_page(new_page, old_page, address, vma,
3452                             pages_per_huge_page(h));
3453         __SetPageUptodate(new_page);
3454         set_page_huge_active(new_page);
3455
3456         mmun_start = address & huge_page_mask(h);
3457         mmun_end = mmun_start + huge_page_size(h);
3458         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3459
3460         /*
3461          * Retake the page table lock to check for racing updates
3462          * before the page tables are altered
3463          */
3464         spin_lock(ptl);
3465         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3466         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3467                 ClearPagePrivate(new_page);
3468
3469                 /* Break COW */
3470                 huge_ptep_clear_flush(vma, address, ptep);
3471                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3472                 set_huge_pte_at(mm, address, ptep,
3473                                 make_huge_pte(vma, new_page, 1));
3474                 page_remove_rmap(old_page, true);
3475                 hugepage_add_new_anon_rmap(new_page, vma, address);
3476                 /* Make the old page be freed below */
3477                 new_page = old_page;
3478         }
3479         spin_unlock(ptl);
3480         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3481 out_release_all:
3482         put_page(new_page);
3483 out_release_old:
3484         put_page(old_page);
3485
3486         spin_lock(ptl); /* Caller expects lock to be held */
3487         return ret;
3488 }
3489
3490 /* Return the pagecache page at a given address within a VMA */
3491 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3492                         struct vm_area_struct *vma, unsigned long address)
3493 {
3494         struct address_space *mapping;
3495         pgoff_t idx;
3496
3497         mapping = vma->vm_file->f_mapping;
3498         idx = vma_hugecache_offset(h, vma, address);
3499
3500         return find_lock_page(mapping, idx);
3501 }
3502
3503 /*
3504  * Return whether there is a pagecache page to back given address within VMA.
3505  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3506  */
3507 static bool hugetlbfs_pagecache_present(struct hstate *h,
3508                         struct vm_area_struct *vma, unsigned long address)
3509 {
3510         struct address_space *mapping;
3511         pgoff_t idx;
3512         struct page *page;
3513
3514         mapping = vma->vm_file->f_mapping;
3515         idx = vma_hugecache_offset(h, vma, address);
3516
3517         page = find_get_page(mapping, idx);
3518         if (page)
3519                 put_page(page);
3520         return page != NULL;
3521 }
3522
3523 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3524                            pgoff_t idx)
3525 {
3526         struct inode *inode = mapping->host;
3527         struct hstate *h = hstate_inode(inode);
3528         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3529
3530         if (err)
3531                 return err;
3532         ClearPagePrivate(page);
3533
3534         spin_lock(&inode->i_lock);
3535         inode->i_blocks += blocks_per_huge_page(h);
3536         spin_unlock(&inode->i_lock);
3537         return 0;
3538 }
3539
3540 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3541                            struct address_space *mapping, pgoff_t idx,
3542                            unsigned long address, pte_t *ptep, unsigned int flags)
3543 {
3544         struct hstate *h = hstate_vma(vma);
3545         int ret = VM_FAULT_SIGBUS;
3546         int anon_rmap = 0;
3547         unsigned long size;
3548         struct page *page;
3549         pte_t new_pte;
3550         spinlock_t *ptl;
3551
3552         /*
3553          * Currently, we are forced to kill the process in the event the
3554          * original mapper has unmapped pages from the child due to a failed
3555          * COW. Warn that such a situation has occurred as it may not be obvious
3556          */
3557         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3558                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3559                            current->pid);
3560                 return ret;
3561         }
3562
3563         /*
3564          * Use page lock to guard against racing truncation
3565          * before we get page_table_lock.
3566          */
3567 retry:
3568         page = find_lock_page(mapping, idx);
3569         if (!page) {
3570                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3571                 if (idx >= size)
3572                         goto out;
3573                 page = alloc_huge_page(vma, address, 0);
3574                 if (IS_ERR(page)) {
3575                         ret = PTR_ERR(page);
3576                         if (ret == -ENOMEM)
3577                                 ret = VM_FAULT_OOM;
3578                         else
3579                                 ret = VM_FAULT_SIGBUS;
3580                         goto out;
3581                 }
3582                 clear_huge_page(page, address, pages_per_huge_page(h));
3583                 __SetPageUptodate(page);
3584                 set_page_huge_active(page);
3585
3586                 if (vma->vm_flags & VM_MAYSHARE) {
3587                         int err = huge_add_to_page_cache(page, mapping, idx);
3588                         if (err) {
3589                                 put_page(page);
3590                                 if (err == -EEXIST)
3591                                         goto retry;
3592                                 goto out;
3593                         }
3594                 } else {
3595                         lock_page(page);
3596                         if (unlikely(anon_vma_prepare(vma))) {
3597                                 ret = VM_FAULT_OOM;
3598                                 goto backout_unlocked;
3599                         }
3600                         anon_rmap = 1;
3601                 }
3602         } else {
3603                 /*
3604                  * If memory error occurs between mmap() and fault, some process
3605                  * don't have hwpoisoned swap entry for errored virtual address.
3606                  * So we need to block hugepage fault by PG_hwpoison bit check.
3607                  */
3608                 if (unlikely(PageHWPoison(page))) {
3609                         ret = VM_FAULT_HWPOISON |
3610                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3611                         goto backout_unlocked;
3612                 }
3613         }
3614
3615         /*
3616          * If we are going to COW a private mapping later, we examine the
3617          * pending reservations for this page now. This will ensure that
3618          * any allocations necessary to record that reservation occur outside
3619          * the spinlock.
3620          */
3621         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3622                 if (vma_needs_reservation(h, vma, address) < 0) {
3623                         ret = VM_FAULT_OOM;
3624                         goto backout_unlocked;
3625                 }
3626                 /* Just decrements count, does not deallocate */
3627                 vma_end_reservation(h, vma, address);
3628         }
3629
3630         ptl = huge_pte_lockptr(h, mm, ptep);
3631         spin_lock(ptl);
3632         size = i_size_read(mapping->host) >> huge_page_shift(h);
3633         if (idx >= size)
3634                 goto backout;
3635
3636         ret = 0;
3637         if (!huge_pte_none(huge_ptep_get(ptep)))
3638                 goto backout;
3639
3640         if (anon_rmap) {
3641                 ClearPagePrivate(page);
3642                 hugepage_add_new_anon_rmap(page, vma, address);
3643         } else
3644                 page_dup_rmap(page, true);
3645         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3646                                 && (vma->vm_flags & VM_SHARED)));
3647         set_huge_pte_at(mm, address, ptep, new_pte);
3648
3649         hugetlb_count_add(pages_per_huge_page(h), mm);
3650         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3651                 /* Optimization, do the COW without a second fault */
3652                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3653         }
3654
3655         spin_unlock(ptl);
3656         unlock_page(page);
3657 out:
3658         return ret;
3659
3660 backout:
3661         spin_unlock(ptl);
3662 backout_unlocked:
3663         unlock_page(page);
3664         put_page(page);
3665         goto out;
3666 }
3667
3668 #ifdef CONFIG_SMP
3669 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3670                             struct vm_area_struct *vma,
3671                             struct address_space *mapping,
3672                             pgoff_t idx, unsigned long address)
3673 {
3674         unsigned long key[2];
3675         u32 hash;
3676
3677         if (vma->vm_flags & VM_SHARED) {
3678                 key[0] = (unsigned long) mapping;
3679                 key[1] = idx;
3680         } else {
3681                 key[0] = (unsigned long) mm;
3682                 key[1] = address >> huge_page_shift(h);
3683         }
3684
3685         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3686
3687         return hash & (num_fault_mutexes - 1);
3688 }
3689 #else
3690 /*
3691  * For uniprocesor systems we always use a single mutex, so just
3692  * return 0 and avoid the hashing overhead.
3693  */
3694 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3695                             struct vm_area_struct *vma,
3696                             struct address_space *mapping,
3697                             pgoff_t idx, unsigned long address)
3698 {
3699         return 0;
3700 }
3701 #endif
3702
3703 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3704                         unsigned long address, unsigned int flags)
3705 {
3706         pte_t *ptep, entry;
3707         spinlock_t *ptl;
3708         int ret;
3709         u32 hash;
3710         pgoff_t idx;
3711         struct page *page = NULL;
3712         struct page *pagecache_page = NULL;
3713         struct hstate *h = hstate_vma(vma);
3714         struct address_space *mapping;
3715         int need_wait_lock = 0;
3716
3717         address &= huge_page_mask(h);
3718
3719         ptep = huge_pte_offset(mm, address);
3720         if (ptep) {
3721                 entry = huge_ptep_get(ptep);
3722                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3723                         migration_entry_wait_huge(vma, mm, ptep);
3724                         return 0;
3725                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3726                         return VM_FAULT_HWPOISON_LARGE |
3727                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3728         } else {
3729                 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3730                 if (!ptep)
3731                         return VM_FAULT_OOM;
3732         }
3733
3734         mapping = vma->vm_file->f_mapping;
3735         idx = vma_hugecache_offset(h, vma, address);
3736
3737         /*
3738          * Serialize hugepage allocation and instantiation, so that we don't
3739          * get spurious allocation failures if two CPUs race to instantiate
3740          * the same page in the page cache.
3741          */
3742         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3743         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3744
3745         entry = huge_ptep_get(ptep);
3746         if (huge_pte_none(entry)) {
3747                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3748                 goto out_mutex;
3749         }
3750
3751         ret = 0;
3752
3753         /*
3754          * entry could be a migration/hwpoison entry at this point, so this
3755          * check prevents the kernel from going below assuming that we have
3756          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3757          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3758          * handle it.
3759          */
3760         if (!pte_present(entry))
3761                 goto out_mutex;
3762
3763         /*
3764          * If we are going to COW the mapping later, we examine the pending
3765          * reservations for this page now. This will ensure that any
3766          * allocations necessary to record that reservation occur outside the
3767          * spinlock. For private mappings, we also lookup the pagecache
3768          * page now as it is used to determine if a reservation has been
3769          * consumed.
3770          */
3771         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3772                 if (vma_needs_reservation(h, vma, address) < 0) {
3773                         ret = VM_FAULT_OOM;
3774                         goto out_mutex;
3775                 }
3776                 /* Just decrements count, does not deallocate */
3777                 vma_end_reservation(h, vma, address);
3778
3779                 if (!(vma->vm_flags & VM_MAYSHARE))
3780                         pagecache_page = hugetlbfs_pagecache_page(h,
3781                                                                 vma, address);
3782         }
3783
3784         ptl = huge_pte_lock(h, mm, ptep);
3785
3786         /* Check for a racing update before calling hugetlb_cow */
3787         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3788                 goto out_ptl;
3789
3790         /*
3791          * hugetlb_cow() requires page locks of pte_page(entry) and
3792          * pagecache_page, so here we need take the former one
3793          * when page != pagecache_page or !pagecache_page.
3794          */
3795         page = pte_page(entry);
3796         if (page != pagecache_page)
3797                 if (!trylock_page(page)) {
3798                         need_wait_lock = 1;
3799                         goto out_ptl;
3800                 }
3801
3802         get_page(page);
3803
3804         if (flags & FAULT_FLAG_WRITE) {
3805                 if (!huge_pte_write(entry)) {
3806                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3807                                         pagecache_page, ptl);
3808                         goto out_put_page;
3809                 }
3810                 entry = huge_pte_mkdirty(entry);
3811         }
3812         entry = pte_mkyoung(entry);
3813         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3814                                                 flags & FAULT_FLAG_WRITE))
3815                 update_mmu_cache(vma, address, ptep);
3816 out_put_page:
3817         if (page != pagecache_page)
3818                 unlock_page(page);
3819         put_page(page);
3820 out_ptl:
3821         spin_unlock(ptl);
3822
3823         if (pagecache_page) {
3824                 unlock_page(pagecache_page);
3825                 put_page(pagecache_page);
3826         }
3827 out_mutex:
3828         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3829         /*
3830          * Generally it's safe to hold refcount during waiting page lock. But
3831          * here we just wait to defer the next page fault to avoid busy loop and
3832          * the page is not used after unlocked before returning from the current
3833          * page fault. So we are safe from accessing freed page, even if we wait
3834          * here without taking refcount.
3835          */
3836         if (need_wait_lock)
3837                 wait_on_page_locked(page);
3838         return ret;
3839 }
3840
3841 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3842                          struct page **pages, struct vm_area_struct **vmas,
3843                          unsigned long *position, unsigned long *nr_pages,
3844                          long i, unsigned int flags)
3845 {
3846         unsigned long pfn_offset;
3847         unsigned long vaddr = *position;
3848         unsigned long remainder = *nr_pages;
3849         struct hstate *h = hstate_vma(vma);
3850
3851         while (vaddr < vma->vm_end && remainder) {
3852                 pte_t *pte;
3853                 spinlock_t *ptl = NULL;
3854                 int absent;
3855                 struct page *page;
3856
3857                 /*
3858                  * If we have a pending SIGKILL, don't keep faulting pages and
3859                  * potentially allocating memory.
3860                  */
3861                 if (unlikely(fatal_signal_pending(current))) {
3862                         remainder = 0;
3863                         break;
3864                 }
3865
3866                 /*
3867                  * Some archs (sparc64, sh*) have multiple pte_ts to
3868                  * each hugepage.  We have to make sure we get the
3869                  * first, for the page indexing below to work.
3870                  *
3871                  * Note that page table lock is not held when pte is null.
3872                  */
3873                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3874                 if (pte)
3875                         ptl = huge_pte_lock(h, mm, pte);
3876                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3877
3878                 /*
3879                  * When coredumping, it suits get_dump_page if we just return
3880                  * an error where there's an empty slot with no huge pagecache
3881                  * to back it.  This way, we avoid allocating a hugepage, and
3882                  * the sparse dumpfile avoids allocating disk blocks, but its
3883                  * huge holes still show up with zeroes where they need to be.
3884                  */
3885                 if (absent && (flags & FOLL_DUMP) &&
3886                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3887                         if (pte)
3888                                 spin_unlock(ptl);
3889                         remainder = 0;
3890                         break;
3891                 }
3892
3893                 /*
3894                  * We need call hugetlb_fault for both hugepages under migration
3895                  * (in which case hugetlb_fault waits for the migration,) and
3896                  * hwpoisoned hugepages (in which case we need to prevent the
3897                  * caller from accessing to them.) In order to do this, we use
3898                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3899                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3900                  * both cases, and because we can't follow correct pages
3901                  * directly from any kind of swap entries.
3902                  */
3903                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3904                     ((flags & FOLL_WRITE) &&
3905                       !huge_pte_write(huge_ptep_get(pte)))) {
3906                         int ret;
3907
3908                         if (pte)
3909                                 spin_unlock(ptl);
3910                         ret = hugetlb_fault(mm, vma, vaddr,
3911                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3912                         if (!(ret & VM_FAULT_ERROR))
3913                                 continue;
3914
3915                         remainder = 0;
3916                         break;
3917                 }
3918
3919                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3920                 page = pte_page(huge_ptep_get(pte));
3921 same_page:
3922                 if (pages) {
3923                         pages[i] = mem_map_offset(page, pfn_offset);
3924                         get_page(pages[i]);
3925                 }
3926
3927                 if (vmas)
3928                         vmas[i] = vma;
3929
3930                 vaddr += PAGE_SIZE;
3931                 ++pfn_offset;
3932                 --remainder;
3933                 ++i;
3934                 if (vaddr < vma->vm_end && remainder &&
3935                                 pfn_offset < pages_per_huge_page(h)) {
3936                         /*
3937                          * We use pfn_offset to avoid touching the pageframes
3938                          * of this compound page.
3939                          */
3940                         goto same_page;
3941                 }
3942                 spin_unlock(ptl);
3943         }
3944         *nr_pages = remainder;
3945         *position = vaddr;
3946
3947         return i ? i : -EFAULT;
3948 }
3949
3950 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3951                 unsigned long address, unsigned long end, pgprot_t newprot)
3952 {
3953         struct mm_struct *mm = vma->vm_mm;
3954         unsigned long start = address;
3955         pte_t *ptep;
3956         pte_t pte;
3957         struct hstate *h = hstate_vma(vma);
3958         unsigned long pages = 0;
3959
3960         BUG_ON(address >= end);
3961         flush_cache_range(vma, address, end);
3962
3963         mmu_notifier_invalidate_range_start(mm, start, end);
3964         i_mmap_lock_write(vma->vm_file->f_mapping);
3965         for (; address < end; address += huge_page_size(h)) {
3966                 spinlock_t *ptl;
3967                 ptep = huge_pte_offset(mm, address);
3968                 if (!ptep)
3969                         continue;
3970                 ptl = huge_pte_lock(h, mm, ptep);
3971                 if (huge_pmd_unshare(mm, &address, ptep)) {
3972                         pages++;
3973                         spin_unlock(ptl);
3974                         continue;
3975                 }
3976                 pte = huge_ptep_get(ptep);
3977                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3978                         spin_unlock(ptl);
3979                         continue;
3980                 }
3981                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3982                         swp_entry_t entry = pte_to_swp_entry(pte);
3983
3984                         if (is_write_migration_entry(entry)) {
3985                                 pte_t newpte;
3986
3987                                 make_migration_entry_read(&entry);
3988                                 newpte = swp_entry_to_pte(entry);
3989                                 set_huge_pte_at(mm, address, ptep, newpte);
3990                                 pages++;
3991                         }
3992                         spin_unlock(ptl);
3993                         continue;
3994                 }
3995                 if (!huge_pte_none(pte)) {
3996                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3997                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3998                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3999                         set_huge_pte_at(mm, address, ptep, pte);
4000                         pages++;
4001                 }
4002                 spin_unlock(ptl);
4003         }
4004         /*
4005          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4006          * may have cleared our pud entry and done put_page on the page table:
4007          * once we release i_mmap_rwsem, another task can do the final put_page
4008          * and that page table be reused and filled with junk.
4009          */
4010         flush_tlb_range(vma, start, end);
4011         mmu_notifier_invalidate_range(mm, start, end);
4012         i_mmap_unlock_write(vma->vm_file->f_mapping);
4013         mmu_notifier_invalidate_range_end(mm, start, end);
4014
4015         return pages << h->order;
4016 }
4017
4018 int hugetlb_reserve_pages(struct inode *inode,
4019                                         long from, long to,
4020                                         struct vm_area_struct *vma,
4021                                         vm_flags_t vm_flags)
4022 {
4023         long ret, chg;
4024         struct hstate *h = hstate_inode(inode);
4025         struct hugepage_subpool *spool = subpool_inode(inode);
4026         struct resv_map *resv_map;
4027         long gbl_reserve;
4028
4029         /*
4030          * Only apply hugepage reservation if asked. At fault time, an
4031          * attempt will be made for VM_NORESERVE to allocate a page
4032          * without using reserves
4033          */
4034         if (vm_flags & VM_NORESERVE)
4035                 return 0;
4036
4037         /*
4038          * Shared mappings base their reservation on the number of pages that
4039          * are already allocated on behalf of the file. Private mappings need
4040          * to reserve the full area even if read-only as mprotect() may be
4041          * called to make the mapping read-write. Assume !vma is a shm mapping
4042          */
4043         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4044                 resv_map = inode_resv_map(inode);
4045
4046                 chg = region_chg(resv_map, from, to);
4047
4048         } else {
4049                 resv_map = resv_map_alloc();
4050                 if (!resv_map)
4051                         return -ENOMEM;
4052
4053                 chg = to - from;
4054
4055                 set_vma_resv_map(vma, resv_map);
4056                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4057         }
4058
4059         if (chg < 0) {
4060                 ret = chg;
4061                 goto out_err;
4062         }
4063
4064         /*
4065          * There must be enough pages in the subpool for the mapping. If
4066          * the subpool has a minimum size, there may be some global
4067          * reservations already in place (gbl_reserve).
4068          */
4069         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4070         if (gbl_reserve < 0) {
4071                 ret = -ENOSPC;
4072                 goto out_err;
4073         }
4074
4075         /*
4076          * Check enough hugepages are available for the reservation.
4077          * Hand the pages back to the subpool if there are not
4078          */
4079         ret = hugetlb_acct_memory(h, gbl_reserve);
4080         if (ret < 0) {
4081                 /* put back original number of pages, chg */
4082                 (void)hugepage_subpool_put_pages(spool, chg);
4083                 goto out_err;
4084         }
4085
4086         /*
4087          * Account for the reservations made. Shared mappings record regions
4088          * that have reservations as they are shared by multiple VMAs.
4089          * When the last VMA disappears, the region map says how much
4090          * the reservation was and the page cache tells how much of
4091          * the reservation was consumed. Private mappings are per-VMA and
4092          * only the consumed reservations are tracked. When the VMA
4093          * disappears, the original reservation is the VMA size and the
4094          * consumed reservations are stored in the map. Hence, nothing
4095          * else has to be done for private mappings here
4096          */
4097         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4098                 long add = region_add(resv_map, from, to);
4099
4100                 if (unlikely(chg > add)) {
4101                         /*
4102                          * pages in this range were added to the reserve
4103                          * map between region_chg and region_add.  This
4104                          * indicates a race with alloc_huge_page.  Adjust
4105                          * the subpool and reserve counts modified above
4106                          * based on the difference.
4107                          */
4108                         long rsv_adjust;
4109
4110                         rsv_adjust = hugepage_subpool_put_pages(spool,
4111                                                                 chg - add);
4112                         hugetlb_acct_memory(h, -rsv_adjust);
4113                 }
4114         }
4115         return 0;
4116 out_err:
4117         if (!vma || vma->vm_flags & VM_MAYSHARE)
4118                 region_abort(resv_map, from, to);
4119         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4120                 kref_put(&resv_map->refs, resv_map_release);
4121         return ret;
4122 }
4123
4124 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4125                                                                 long freed)
4126 {
4127         struct hstate *h = hstate_inode(inode);
4128         struct resv_map *resv_map = inode_resv_map(inode);
4129         long chg = 0;
4130         struct hugepage_subpool *spool = subpool_inode(inode);
4131         long gbl_reserve;
4132
4133         if (resv_map) {
4134                 chg = region_del(resv_map, start, end);
4135                 /*
4136                  * region_del() can fail in the rare case where a region
4137                  * must be split and another region descriptor can not be
4138                  * allocated.  If end == LONG_MAX, it will not fail.
4139                  */
4140                 if (chg < 0)
4141                         return chg;
4142         }
4143
4144         spin_lock(&inode->i_lock);
4145         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4146         spin_unlock(&inode->i_lock);
4147
4148         /*
4149          * If the subpool has a minimum size, the number of global
4150          * reservations to be released may be adjusted.
4151          */
4152         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4153         hugetlb_acct_memory(h, -gbl_reserve);
4154
4155         return 0;
4156 }
4157
4158 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4159 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4160                                 struct vm_area_struct *vma,
4161                                 unsigned long addr, pgoff_t idx)
4162 {
4163         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4164                                 svma->vm_start;
4165         unsigned long sbase = saddr & PUD_MASK;
4166         unsigned long s_end = sbase + PUD_SIZE;
4167
4168         /* Allow segments to share if only one is marked locked */
4169         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4170         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4171
4172         /*
4173          * match the virtual addresses, permission and the alignment of the
4174          * page table page.
4175          */
4176         if (pmd_index(addr) != pmd_index(saddr) ||
4177             vm_flags != svm_flags ||
4178             sbase < svma->vm_start || svma->vm_end < s_end)
4179                 return 0;
4180
4181         return saddr;
4182 }
4183
4184 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4185 {
4186         unsigned long base = addr & PUD_MASK;
4187         unsigned long end = base + PUD_SIZE;
4188
4189         /*
4190          * check on proper vm_flags and page table alignment
4191          */
4192         if (vma->vm_flags & VM_MAYSHARE &&
4193             vma->vm_start <= base && end <= vma->vm_end)
4194                 return true;
4195         return false;
4196 }
4197
4198 /*
4199  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4200  * and returns the corresponding pte. While this is not necessary for the
4201  * !shared pmd case because we can allocate the pmd later as well, it makes the
4202  * code much cleaner. pmd allocation is essential for the shared case because
4203  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4204  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4205  * bad pmd for sharing.
4206  */
4207 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4208 {
4209         struct vm_area_struct *vma = find_vma(mm, addr);
4210         struct address_space *mapping = vma->vm_file->f_mapping;
4211         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4212                         vma->vm_pgoff;
4213         struct vm_area_struct *svma;
4214         unsigned long saddr;
4215         pte_t *spte = NULL;
4216         pte_t *pte;
4217         spinlock_t *ptl;
4218
4219         if (!vma_shareable(vma, addr))
4220                 return (pte_t *)pmd_alloc(mm, pud, addr);
4221
4222         i_mmap_lock_write(mapping);
4223         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4224                 if (svma == vma)
4225                         continue;
4226
4227                 saddr = page_table_shareable(svma, vma, addr, idx);
4228                 if (saddr) {
4229                         spte = huge_pte_offset(svma->vm_mm, saddr);
4230                         if (spte) {
4231                                 mm_inc_nr_pmds(mm);
4232                                 get_page(virt_to_page(spte));
4233                                 break;
4234                         }
4235                 }
4236         }
4237
4238         if (!spte)
4239                 goto out;
4240
4241         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4242         spin_lock(ptl);
4243         if (pud_none(*pud)) {
4244                 pud_populate(mm, pud,
4245                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4246         } else {
4247                 put_page(virt_to_page(spte));
4248                 mm_inc_nr_pmds(mm);
4249         }
4250         spin_unlock(ptl);
4251 out:
4252         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4253         i_mmap_unlock_write(mapping);
4254         return pte;
4255 }
4256
4257 /*
4258  * unmap huge page backed by shared pte.
4259  *
4260  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4261  * indicated by page_count > 1, unmap is achieved by clearing pud and
4262  * decrementing the ref count. If count == 1, the pte page is not shared.
4263  *
4264  * called with page table lock held.
4265  *
4266  * returns: 1 successfully unmapped a shared pte page
4267  *          0 the underlying pte page is not shared, or it is the last user
4268  */
4269 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4270 {
4271         pgd_t *pgd = pgd_offset(mm, *addr);
4272         pud_t *pud = pud_offset(pgd, *addr);
4273
4274         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4275         if (page_count(virt_to_page(ptep)) == 1)
4276                 return 0;
4277
4278         pud_clear(pud);
4279         put_page(virt_to_page(ptep));
4280         mm_dec_nr_pmds(mm);
4281         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4282         return 1;
4283 }
4284 #define want_pmd_share()        (1)
4285 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4286 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4287 {
4288         return NULL;
4289 }
4290
4291 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4292 {
4293         return 0;
4294 }
4295 #define want_pmd_share()        (0)
4296 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4297
4298 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4299 pte_t *huge_pte_alloc(struct mm_struct *mm,
4300                         unsigned long addr, unsigned long sz)
4301 {
4302         pgd_t *pgd;
4303         pud_t *pud;
4304         pte_t *pte = NULL;
4305
4306         pgd = pgd_offset(mm, addr);
4307         pud = pud_alloc(mm, pgd, addr);
4308         if (pud) {
4309                 if (sz == PUD_SIZE) {
4310                         pte = (pte_t *)pud;
4311                 } else {
4312                         BUG_ON(sz != PMD_SIZE);
4313                         if (want_pmd_share() && pud_none(*pud))
4314                                 pte = huge_pmd_share(mm, addr, pud);
4315                         else
4316                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4317                 }
4318         }
4319         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4320
4321         return pte;
4322 }
4323
4324 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4325 {
4326         pgd_t *pgd;
4327         pud_t *pud;
4328         pmd_t *pmd = NULL;
4329
4330         pgd = pgd_offset(mm, addr);
4331         if (pgd_present(*pgd)) {
4332                 pud = pud_offset(pgd, addr);
4333                 if (pud_present(*pud)) {
4334                         if (pud_huge(*pud))
4335                                 return (pte_t *)pud;
4336                         pmd = pmd_offset(pud, addr);
4337                 }
4338         }
4339         return (pte_t *) pmd;
4340 }
4341
4342 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4343
4344 /*
4345  * These functions are overwritable if your architecture needs its own
4346  * behavior.
4347  */
4348 struct page * __weak
4349 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4350                               int write)
4351 {
4352         return ERR_PTR(-EINVAL);
4353 }
4354
4355 struct page * __weak
4356 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4357                 pmd_t *pmd, int flags)
4358 {
4359         struct page *page = NULL;
4360         spinlock_t *ptl;
4361 retry:
4362         ptl = pmd_lockptr(mm, pmd);
4363         spin_lock(ptl);
4364         /*
4365          * make sure that the address range covered by this pmd is not
4366          * unmapped from other threads.
4367          */
4368         if (!pmd_huge(*pmd))
4369                 goto out;
4370         if (pmd_present(*pmd)) {
4371                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4372                 if (flags & FOLL_GET)
4373                         get_page(page);
4374         } else {
4375                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4376                         spin_unlock(ptl);
4377                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4378                         goto retry;
4379                 }
4380                 /*
4381                  * hwpoisoned entry is treated as no_page_table in
4382                  * follow_page_mask().
4383                  */
4384         }
4385 out:
4386         spin_unlock(ptl);
4387         return page;
4388 }
4389
4390 struct page * __weak
4391 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4392                 pud_t *pud, int flags)
4393 {
4394         if (flags & FOLL_GET)
4395                 return NULL;
4396
4397         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4398 }
4399
4400 #ifdef CONFIG_MEMORY_FAILURE
4401
4402 /*
4403  * This function is called from memory failure code.
4404  * Assume the caller holds page lock of the head page.
4405  */
4406 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4407 {
4408         struct hstate *h = page_hstate(hpage);
4409         int nid = page_to_nid(hpage);
4410         int ret = -EBUSY;
4411
4412         spin_lock(&hugetlb_lock);
4413         /*
4414          * Just checking !page_huge_active is not enough, because that could be
4415          * an isolated/hwpoisoned hugepage (which have >0 refcount).
4416          */
4417         if (!page_huge_active(hpage) && !page_count(hpage)) {
4418                 /*
4419                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
4420                  * but dangling hpage->lru can trigger list-debug warnings
4421                  * (this happens when we call unpoison_memory() on it),
4422                  * so let it point to itself with list_del_init().
4423                  */
4424                 list_del_init(&hpage->lru);
4425                 set_page_refcounted(hpage);
4426                 h->free_huge_pages--;
4427                 h->free_huge_pages_node[nid]--;
4428                 ret = 0;
4429         }
4430         spin_unlock(&hugetlb_lock);
4431         return ret;
4432 }
4433 #endif
4434
4435 bool isolate_huge_page(struct page *page, struct list_head *list)
4436 {
4437         bool ret = true;
4438
4439         VM_BUG_ON_PAGE(!PageHead(page), page);
4440         spin_lock(&hugetlb_lock);
4441         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4442                 ret = false;
4443                 goto unlock;
4444         }
4445         clear_page_huge_active(page);
4446         list_move_tail(&page->lru, list);
4447 unlock:
4448         spin_unlock(&hugetlb_lock);
4449         return ret;
4450 }
4451
4452 void putback_active_hugepage(struct page *page)
4453 {
4454         VM_BUG_ON_PAGE(!PageHead(page), page);
4455         spin_lock(&hugetlb_lock);
4456         set_page_huge_active(page);
4457         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4458         spin_unlock(&hugetlb_lock);
4459         put_page(page);
4460 }