ASoC: fix memory leak
[cascardo/linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/page-isolation.h>
25 #include <linux/jhash.h>
26
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/tlb.h>
30
31 #include <linux/io.h>
32 #include <linux/hugetlb.h>
33 #include <linux/hugetlb_cgroup.h>
34 #include <linux/node.h>
35 #include "internal.h"
36
37 int hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 /*
43  * Minimum page order among possible hugepage sizes, set to a proper value
44  * at boot time.
45  */
46 static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48 __initdata LIST_HEAD(huge_boot_pages);
49
50 /* for command line parsing */
51 static struct hstate * __initdata parsed_hstate;
52 static unsigned long __initdata default_hstate_max_huge_pages;
53 static unsigned long __initdata default_hstate_size;
54
55 /*
56  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
57  * free_huge_pages, and surplus_huge_pages.
58  */
59 DEFINE_SPINLOCK(hugetlb_lock);
60
61 /*
62  * Serializes faults on the same logical page.  This is used to
63  * prevent spurious OOMs when the hugepage pool is fully utilized.
64  */
65 static int num_fault_mutexes;
66 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
67
68 /* Forward declaration */
69 static int hugetlb_acct_memory(struct hstate *h, long delta);
70
71 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
72 {
73         bool free = (spool->count == 0) && (spool->used_hpages == 0);
74
75         spin_unlock(&spool->lock);
76
77         /* If no pages are used, and no other handles to the subpool
78          * remain, give up any reservations mased on minimum size and
79          * free the subpool */
80         if (free) {
81                 if (spool->min_hpages != -1)
82                         hugetlb_acct_memory(spool->hstate,
83                                                 -spool->min_hpages);
84                 kfree(spool);
85         }
86 }
87
88 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
89                                                 long min_hpages)
90 {
91         struct hugepage_subpool *spool;
92
93         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
94         if (!spool)
95                 return NULL;
96
97         spin_lock_init(&spool->lock);
98         spool->count = 1;
99         spool->max_hpages = max_hpages;
100         spool->hstate = h;
101         spool->min_hpages = min_hpages;
102
103         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
104                 kfree(spool);
105                 return NULL;
106         }
107         spool->rsv_hpages = min_hpages;
108
109         return spool;
110 }
111
112 void hugepage_put_subpool(struct hugepage_subpool *spool)
113 {
114         spin_lock(&spool->lock);
115         BUG_ON(!spool->count);
116         spool->count--;
117         unlock_or_release_subpool(spool);
118 }
119
120 /*
121  * Subpool accounting for allocating and reserving pages.
122  * Return -ENOMEM if there are not enough resources to satisfy the
123  * the request.  Otherwise, return the number of pages by which the
124  * global pools must be adjusted (upward).  The returned value may
125  * only be different than the passed value (delta) in the case where
126  * a subpool minimum size must be manitained.
127  */
128 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
129                                       long delta)
130 {
131         long ret = delta;
132
133         if (!spool)
134                 return ret;
135
136         spin_lock(&spool->lock);
137
138         if (spool->max_hpages != -1) {          /* maximum size accounting */
139                 if ((spool->used_hpages + delta) <= spool->max_hpages)
140                         spool->used_hpages += delta;
141                 else {
142                         ret = -ENOMEM;
143                         goto unlock_ret;
144                 }
145         }
146
147         if (spool->min_hpages != -1) {          /* minimum size accounting */
148                 if (delta > spool->rsv_hpages) {
149                         /*
150                          * Asking for more reserves than those already taken on
151                          * behalf of subpool.  Return difference.
152                          */
153                         ret = delta - spool->rsv_hpages;
154                         spool->rsv_hpages = 0;
155                 } else {
156                         ret = 0;        /* reserves already accounted for */
157                         spool->rsv_hpages -= delta;
158                 }
159         }
160
161 unlock_ret:
162         spin_unlock(&spool->lock);
163         return ret;
164 }
165
166 /*
167  * Subpool accounting for freeing and unreserving pages.
168  * Return the number of global page reservations that must be dropped.
169  * The return value may only be different than the passed value (delta)
170  * in the case where a subpool minimum size must be maintained.
171  */
172 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
173                                        long delta)
174 {
175         long ret = delta;
176
177         if (!spool)
178                 return delta;
179
180         spin_lock(&spool->lock);
181
182         if (spool->max_hpages != -1)            /* maximum size accounting */
183                 spool->used_hpages -= delta;
184
185         if (spool->min_hpages != -1) {          /* minimum size accounting */
186                 if (spool->rsv_hpages + delta <= spool->min_hpages)
187                         ret = 0;
188                 else
189                         ret = spool->rsv_hpages + delta - spool->min_hpages;
190
191                 spool->rsv_hpages += delta;
192                 if (spool->rsv_hpages > spool->min_hpages)
193                         spool->rsv_hpages = spool->min_hpages;
194         }
195
196         /*
197          * If hugetlbfs_put_super couldn't free spool due to an outstanding
198          * quota reference, free it now.
199          */
200         unlock_or_release_subpool(spool);
201
202         return ret;
203 }
204
205 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
206 {
207         return HUGETLBFS_SB(inode->i_sb)->spool;
208 }
209
210 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
211 {
212         return subpool_inode(file_inode(vma->vm_file));
213 }
214
215 /*
216  * Region tracking -- allows tracking of reservations and instantiated pages
217  *                    across the pages in a mapping.
218  *
219  * The region data structures are embedded into a resv_map and protected
220  * by a resv_map's lock.  The set of regions within the resv_map represent
221  * reservations for huge pages, or huge pages that have already been
222  * instantiated within the map.  The from and to elements are huge page
223  * indicies into the associated mapping.  from indicates the starting index
224  * of the region.  to represents the first index past the end of  the region.
225  *
226  * For example, a file region structure with from == 0 and to == 4 represents
227  * four huge pages in a mapping.  It is important to note that the to element
228  * represents the first element past the end of the region. This is used in
229  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
230  *
231  * Interval notation of the form [from, to) will be used to indicate that
232  * the endpoint from is inclusive and to is exclusive.
233  */
234 struct file_region {
235         struct list_head link;
236         long from;
237         long to;
238 };
239
240 /*
241  * Add the huge page range represented by [f, t) to the reserve
242  * map.  In the normal case, existing regions will be expanded
243  * to accommodate the specified range.  Sufficient regions should
244  * exist for expansion due to the previous call to region_chg
245  * with the same range.  However, it is possible that region_del
246  * could have been called after region_chg and modifed the map
247  * in such a way that no region exists to be expanded.  In this
248  * case, pull a region descriptor from the cache associated with
249  * the map and use that for the new range.
250  *
251  * Return the number of new huge pages added to the map.  This
252  * number is greater than or equal to zero.
253  */
254 static long region_add(struct resv_map *resv, long f, long t)
255 {
256         struct list_head *head = &resv->regions;
257         struct file_region *rg, *nrg, *trg;
258         long add = 0;
259
260         spin_lock(&resv->lock);
261         /* Locate the region we are either in or before. */
262         list_for_each_entry(rg, head, link)
263                 if (f <= rg->to)
264                         break;
265
266         /*
267          * If no region exists which can be expanded to include the
268          * specified range, the list must have been modified by an
269          * interleving call to region_del().  Pull a region descriptor
270          * from the cache and use it for this range.
271          */
272         if (&rg->link == head || t < rg->from) {
273                 VM_BUG_ON(resv->region_cache_count <= 0);
274
275                 resv->region_cache_count--;
276                 nrg = list_first_entry(&resv->region_cache, struct file_region,
277                                         link);
278                 list_del(&nrg->link);
279
280                 nrg->from = f;
281                 nrg->to = t;
282                 list_add(&nrg->link, rg->link.prev);
283
284                 add += t - f;
285                 goto out_locked;
286         }
287
288         /* Round our left edge to the current segment if it encloses us. */
289         if (f > rg->from)
290                 f = rg->from;
291
292         /* Check for and consume any regions we now overlap with. */
293         nrg = rg;
294         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
295                 if (&rg->link == head)
296                         break;
297                 if (rg->from > t)
298                         break;
299
300                 /* If this area reaches higher then extend our area to
301                  * include it completely.  If this is not the first area
302                  * which we intend to reuse, free it. */
303                 if (rg->to > t)
304                         t = rg->to;
305                 if (rg != nrg) {
306                         /* Decrement return value by the deleted range.
307                          * Another range will span this area so that by
308                          * end of routine add will be >= zero
309                          */
310                         add -= (rg->to - rg->from);
311                         list_del(&rg->link);
312                         kfree(rg);
313                 }
314         }
315
316         add += (nrg->from - f);         /* Added to beginning of region */
317         nrg->from = f;
318         add += t - nrg->to;             /* Added to end of region */
319         nrg->to = t;
320
321 out_locked:
322         resv->adds_in_progress--;
323         spin_unlock(&resv->lock);
324         VM_BUG_ON(add < 0);
325         return add;
326 }
327
328 /*
329  * Examine the existing reserve map and determine how many
330  * huge pages in the specified range [f, t) are NOT currently
331  * represented.  This routine is called before a subsequent
332  * call to region_add that will actually modify the reserve
333  * map to add the specified range [f, t).  region_chg does
334  * not change the number of huge pages represented by the
335  * map.  However, if the existing regions in the map can not
336  * be expanded to represent the new range, a new file_region
337  * structure is added to the map as a placeholder.  This is
338  * so that the subsequent region_add call will have all the
339  * regions it needs and will not fail.
340  *
341  * Upon entry, region_chg will also examine the cache of region descriptors
342  * associated with the map.  If there are not enough descriptors cached, one
343  * will be allocated for the in progress add operation.
344  *
345  * Returns the number of huge pages that need to be added to the existing
346  * reservation map for the range [f, t).  This number is greater or equal to
347  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
348  * is needed and can not be allocated.
349  */
350 static long region_chg(struct resv_map *resv, long f, long t)
351 {
352         struct list_head *head = &resv->regions;
353         struct file_region *rg, *nrg = NULL;
354         long chg = 0;
355
356 retry:
357         spin_lock(&resv->lock);
358 retry_locked:
359         resv->adds_in_progress++;
360
361         /*
362          * Check for sufficient descriptors in the cache to accommodate
363          * the number of in progress add operations.
364          */
365         if (resv->adds_in_progress > resv->region_cache_count) {
366                 struct file_region *trg;
367
368                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
369                 /* Must drop lock to allocate a new descriptor. */
370                 resv->adds_in_progress--;
371                 spin_unlock(&resv->lock);
372
373                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
374                 if (!trg) {
375                         kfree(nrg);
376                         return -ENOMEM;
377                 }
378
379                 spin_lock(&resv->lock);
380                 list_add(&trg->link, &resv->region_cache);
381                 resv->region_cache_count++;
382                 goto retry_locked;
383         }
384
385         /* Locate the region we are before or in. */
386         list_for_each_entry(rg, head, link)
387                 if (f <= rg->to)
388                         break;
389
390         /* If we are below the current region then a new region is required.
391          * Subtle, allocate a new region at the position but make it zero
392          * size such that we can guarantee to record the reservation. */
393         if (&rg->link == head || t < rg->from) {
394                 if (!nrg) {
395                         resv->adds_in_progress--;
396                         spin_unlock(&resv->lock);
397                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
398                         if (!nrg)
399                                 return -ENOMEM;
400
401                         nrg->from = f;
402                         nrg->to   = f;
403                         INIT_LIST_HEAD(&nrg->link);
404                         goto retry;
405                 }
406
407                 list_add(&nrg->link, rg->link.prev);
408                 chg = t - f;
409                 goto out_nrg;
410         }
411
412         /* Round our left edge to the current segment if it encloses us. */
413         if (f > rg->from)
414                 f = rg->from;
415         chg = t - f;
416
417         /* Check for and consume any regions we now overlap with. */
418         list_for_each_entry(rg, rg->link.prev, link) {
419                 if (&rg->link == head)
420                         break;
421                 if (rg->from > t)
422                         goto out;
423
424                 /* We overlap with this area, if it extends further than
425                  * us then we must extend ourselves.  Account for its
426                  * existing reservation. */
427                 if (rg->to > t) {
428                         chg += rg->to - t;
429                         t = rg->to;
430                 }
431                 chg -= rg->to - rg->from;
432         }
433
434 out:
435         spin_unlock(&resv->lock);
436         /*  We already know we raced and no longer need the new region */
437         kfree(nrg);
438         return chg;
439 out_nrg:
440         spin_unlock(&resv->lock);
441         return chg;
442 }
443
444 /*
445  * Abort the in progress add operation.  The adds_in_progress field
446  * of the resv_map keeps track of the operations in progress between
447  * calls to region_chg and region_add.  Operations are sometimes
448  * aborted after the call to region_chg.  In such cases, region_abort
449  * is called to decrement the adds_in_progress counter.
450  *
451  * NOTE: The range arguments [f, t) are not needed or used in this
452  * routine.  They are kept to make reading the calling code easier as
453  * arguments will match the associated region_chg call.
454  */
455 static void region_abort(struct resv_map *resv, long f, long t)
456 {
457         spin_lock(&resv->lock);
458         VM_BUG_ON(!resv->region_cache_count);
459         resv->adds_in_progress--;
460         spin_unlock(&resv->lock);
461 }
462
463 /*
464  * Delete the specified range [f, t) from the reserve map.  If the
465  * t parameter is LONG_MAX, this indicates that ALL regions after f
466  * should be deleted.  Locate the regions which intersect [f, t)
467  * and either trim, delete or split the existing regions.
468  *
469  * Returns the number of huge pages deleted from the reserve map.
470  * In the normal case, the return value is zero or more.  In the
471  * case where a region must be split, a new region descriptor must
472  * be allocated.  If the allocation fails, -ENOMEM will be returned.
473  * NOTE: If the parameter t == LONG_MAX, then we will never split
474  * a region and possibly return -ENOMEM.  Callers specifying
475  * t == LONG_MAX do not need to check for -ENOMEM error.
476  */
477 static long region_del(struct resv_map *resv, long f, long t)
478 {
479         struct list_head *head = &resv->regions;
480         struct file_region *rg, *trg;
481         struct file_region *nrg = NULL;
482         long del = 0;
483
484 retry:
485         spin_lock(&resv->lock);
486         list_for_each_entry_safe(rg, trg, head, link) {
487                 /*
488                  * Skip regions before the range to be deleted.  file_region
489                  * ranges are normally of the form [from, to).  However, there
490                  * may be a "placeholder" entry in the map which is of the form
491                  * (from, to) with from == to.  Check for placeholder entries
492                  * at the beginning of the range to be deleted.
493                  */
494                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
495                         continue;
496
497                 if (rg->from >= t)
498                         break;
499
500                 if (f > rg->from && t < rg->to) { /* Must split region */
501                         /*
502                          * Check for an entry in the cache before dropping
503                          * lock and attempting allocation.
504                          */
505                         if (!nrg &&
506                             resv->region_cache_count > resv->adds_in_progress) {
507                                 nrg = list_first_entry(&resv->region_cache,
508                                                         struct file_region,
509                                                         link);
510                                 list_del(&nrg->link);
511                                 resv->region_cache_count--;
512                         }
513
514                         if (!nrg) {
515                                 spin_unlock(&resv->lock);
516                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
517                                 if (!nrg)
518                                         return -ENOMEM;
519                                 goto retry;
520                         }
521
522                         del += t - f;
523
524                         /* New entry for end of split region */
525                         nrg->from = t;
526                         nrg->to = rg->to;
527                         INIT_LIST_HEAD(&nrg->link);
528
529                         /* Original entry is trimmed */
530                         rg->to = f;
531
532                         list_add(&nrg->link, &rg->link);
533                         nrg = NULL;
534                         break;
535                 }
536
537                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
538                         del += rg->to - rg->from;
539                         list_del(&rg->link);
540                         kfree(rg);
541                         continue;
542                 }
543
544                 if (f <= rg->from) {    /* Trim beginning of region */
545                         del += t - rg->from;
546                         rg->from = t;
547                 } else {                /* Trim end of region */
548                         del += rg->to - f;
549                         rg->to = f;
550                 }
551         }
552
553         spin_unlock(&resv->lock);
554         kfree(nrg);
555         return del;
556 }
557
558 /*
559  * A rare out of memory error was encountered which prevented removal of
560  * the reserve map region for a page.  The huge page itself was free'ed
561  * and removed from the page cache.  This routine will adjust the subpool
562  * usage count, and the global reserve count if needed.  By incrementing
563  * these counts, the reserve map entry which could not be deleted will
564  * appear as a "reserved" entry instead of simply dangling with incorrect
565  * counts.
566  */
567 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
568 {
569         struct hugepage_subpool *spool = subpool_inode(inode);
570         long rsv_adjust;
571
572         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
573         if (restore_reserve && rsv_adjust) {
574                 struct hstate *h = hstate_inode(inode);
575
576                 hugetlb_acct_memory(h, 1);
577         }
578 }
579
580 /*
581  * Count and return the number of huge pages in the reserve map
582  * that intersect with the range [f, t).
583  */
584 static long region_count(struct resv_map *resv, long f, long t)
585 {
586         struct list_head *head = &resv->regions;
587         struct file_region *rg;
588         long chg = 0;
589
590         spin_lock(&resv->lock);
591         /* Locate each segment we overlap with, and count that overlap. */
592         list_for_each_entry(rg, head, link) {
593                 long seg_from;
594                 long seg_to;
595
596                 if (rg->to <= f)
597                         continue;
598                 if (rg->from >= t)
599                         break;
600
601                 seg_from = max(rg->from, f);
602                 seg_to = min(rg->to, t);
603
604                 chg += seg_to - seg_from;
605         }
606         spin_unlock(&resv->lock);
607
608         return chg;
609 }
610
611 /*
612  * Convert the address within this vma to the page offset within
613  * the mapping, in pagecache page units; huge pages here.
614  */
615 static pgoff_t vma_hugecache_offset(struct hstate *h,
616                         struct vm_area_struct *vma, unsigned long address)
617 {
618         return ((address - vma->vm_start) >> huge_page_shift(h)) +
619                         (vma->vm_pgoff >> huge_page_order(h));
620 }
621
622 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
623                                      unsigned long address)
624 {
625         return vma_hugecache_offset(hstate_vma(vma), vma, address);
626 }
627
628 /*
629  * Return the size of the pages allocated when backing a VMA. In the majority
630  * cases this will be same size as used by the page table entries.
631  */
632 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
633 {
634         struct hstate *hstate;
635
636         if (!is_vm_hugetlb_page(vma))
637                 return PAGE_SIZE;
638
639         hstate = hstate_vma(vma);
640
641         return 1UL << huge_page_shift(hstate);
642 }
643 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
644
645 /*
646  * Return the page size being used by the MMU to back a VMA. In the majority
647  * of cases, the page size used by the kernel matches the MMU size. On
648  * architectures where it differs, an architecture-specific version of this
649  * function is required.
650  */
651 #ifndef vma_mmu_pagesize
652 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
653 {
654         return vma_kernel_pagesize(vma);
655 }
656 #endif
657
658 /*
659  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
660  * bits of the reservation map pointer, which are always clear due to
661  * alignment.
662  */
663 #define HPAGE_RESV_OWNER    (1UL << 0)
664 #define HPAGE_RESV_UNMAPPED (1UL << 1)
665 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
666
667 /*
668  * These helpers are used to track how many pages are reserved for
669  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
670  * is guaranteed to have their future faults succeed.
671  *
672  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
673  * the reserve counters are updated with the hugetlb_lock held. It is safe
674  * to reset the VMA at fork() time as it is not in use yet and there is no
675  * chance of the global counters getting corrupted as a result of the values.
676  *
677  * The private mapping reservation is represented in a subtly different
678  * manner to a shared mapping.  A shared mapping has a region map associated
679  * with the underlying file, this region map represents the backing file
680  * pages which have ever had a reservation assigned which this persists even
681  * after the page is instantiated.  A private mapping has a region map
682  * associated with the original mmap which is attached to all VMAs which
683  * reference it, this region map represents those offsets which have consumed
684  * reservation ie. where pages have been instantiated.
685  */
686 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
687 {
688         return (unsigned long)vma->vm_private_data;
689 }
690
691 static void set_vma_private_data(struct vm_area_struct *vma,
692                                                         unsigned long value)
693 {
694         vma->vm_private_data = (void *)value;
695 }
696
697 struct resv_map *resv_map_alloc(void)
698 {
699         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
700         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
701
702         if (!resv_map || !rg) {
703                 kfree(resv_map);
704                 kfree(rg);
705                 return NULL;
706         }
707
708         kref_init(&resv_map->refs);
709         spin_lock_init(&resv_map->lock);
710         INIT_LIST_HEAD(&resv_map->regions);
711
712         resv_map->adds_in_progress = 0;
713
714         INIT_LIST_HEAD(&resv_map->region_cache);
715         list_add(&rg->link, &resv_map->region_cache);
716         resv_map->region_cache_count = 1;
717
718         return resv_map;
719 }
720
721 void resv_map_release(struct kref *ref)
722 {
723         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
724         struct list_head *head = &resv_map->region_cache;
725         struct file_region *rg, *trg;
726
727         /* Clear out any active regions before we release the map. */
728         region_del(resv_map, 0, LONG_MAX);
729
730         /* ... and any entries left in the cache */
731         list_for_each_entry_safe(rg, trg, head, link) {
732                 list_del(&rg->link);
733                 kfree(rg);
734         }
735
736         VM_BUG_ON(resv_map->adds_in_progress);
737
738         kfree(resv_map);
739 }
740
741 static inline struct resv_map *inode_resv_map(struct inode *inode)
742 {
743         return inode->i_mapping->private_data;
744 }
745
746 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
747 {
748         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
749         if (vma->vm_flags & VM_MAYSHARE) {
750                 struct address_space *mapping = vma->vm_file->f_mapping;
751                 struct inode *inode = mapping->host;
752
753                 return inode_resv_map(inode);
754
755         } else {
756                 return (struct resv_map *)(get_vma_private_data(vma) &
757                                                         ~HPAGE_RESV_MASK);
758         }
759 }
760
761 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
762 {
763         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
764         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
765
766         set_vma_private_data(vma, (get_vma_private_data(vma) &
767                                 HPAGE_RESV_MASK) | (unsigned long)map);
768 }
769
770 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
771 {
772         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774
775         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
776 }
777
778 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
779 {
780         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
781
782         return (get_vma_private_data(vma) & flag) != 0;
783 }
784
785 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
786 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
787 {
788         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
789         if (!(vma->vm_flags & VM_MAYSHARE))
790                 vma->vm_private_data = (void *)0;
791 }
792
793 /* Returns true if the VMA has associated reserve pages */
794 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
795 {
796         if (vma->vm_flags & VM_NORESERVE) {
797                 /*
798                  * This address is already reserved by other process(chg == 0),
799                  * so, we should decrement reserved count. Without decrementing,
800                  * reserve count remains after releasing inode, because this
801                  * allocated page will go into page cache and is regarded as
802                  * coming from reserved pool in releasing step.  Currently, we
803                  * don't have any other solution to deal with this situation
804                  * properly, so add work-around here.
805                  */
806                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
807                         return true;
808                 else
809                         return false;
810         }
811
812         /* Shared mappings always use reserves */
813         if (vma->vm_flags & VM_MAYSHARE) {
814                 /*
815                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
816                  * be a region map for all pages.  The only situation where
817                  * there is no region map is if a hole was punched via
818                  * fallocate.  In this case, there really are no reverves to
819                  * use.  This situation is indicated if chg != 0.
820                  */
821                 if (chg)
822                         return false;
823                 else
824                         return true;
825         }
826
827         /*
828          * Only the process that called mmap() has reserves for
829          * private mappings.
830          */
831         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
832                 return true;
833
834         return false;
835 }
836
837 static void enqueue_huge_page(struct hstate *h, struct page *page)
838 {
839         int nid = page_to_nid(page);
840         list_move(&page->lru, &h->hugepage_freelists[nid]);
841         h->free_huge_pages++;
842         h->free_huge_pages_node[nid]++;
843 }
844
845 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
846 {
847         struct page *page;
848
849         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
850                 if (!is_migrate_isolate_page(page))
851                         break;
852         /*
853          * if 'non-isolated free hugepage' not found on the list,
854          * the allocation fails.
855          */
856         if (&h->hugepage_freelists[nid] == &page->lru)
857                 return NULL;
858         list_move(&page->lru, &h->hugepage_activelist);
859         set_page_refcounted(page);
860         h->free_huge_pages--;
861         h->free_huge_pages_node[nid]--;
862         return page;
863 }
864
865 /* Movability of hugepages depends on migration support. */
866 static inline gfp_t htlb_alloc_mask(struct hstate *h)
867 {
868         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
869                 return GFP_HIGHUSER_MOVABLE;
870         else
871                 return GFP_HIGHUSER;
872 }
873
874 static struct page *dequeue_huge_page_vma(struct hstate *h,
875                                 struct vm_area_struct *vma,
876                                 unsigned long address, int avoid_reserve,
877                                 long chg)
878 {
879         struct page *page = NULL;
880         struct mempolicy *mpol;
881         nodemask_t *nodemask;
882         struct zonelist *zonelist;
883         struct zone *zone;
884         struct zoneref *z;
885         unsigned int cpuset_mems_cookie;
886
887         /*
888          * A child process with MAP_PRIVATE mappings created by their parent
889          * have no page reserves. This check ensures that reservations are
890          * not "stolen". The child may still get SIGKILLed
891          */
892         if (!vma_has_reserves(vma, chg) &&
893                         h->free_huge_pages - h->resv_huge_pages == 0)
894                 goto err;
895
896         /* If reserves cannot be used, ensure enough pages are in the pool */
897         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
898                 goto err;
899
900 retry_cpuset:
901         cpuset_mems_cookie = read_mems_allowed_begin();
902         zonelist = huge_zonelist(vma, address,
903                                         htlb_alloc_mask(h), &mpol, &nodemask);
904
905         for_each_zone_zonelist_nodemask(zone, z, zonelist,
906                                                 MAX_NR_ZONES - 1, nodemask) {
907                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
908                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
909                         if (page) {
910                                 if (avoid_reserve)
911                                         break;
912                                 if (!vma_has_reserves(vma, chg))
913                                         break;
914
915                                 SetPagePrivate(page);
916                                 h->resv_huge_pages--;
917                                 break;
918                         }
919                 }
920         }
921
922         mpol_cond_put(mpol);
923         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
924                 goto retry_cpuset;
925         return page;
926
927 err:
928         return NULL;
929 }
930
931 /*
932  * common helper functions for hstate_next_node_to_{alloc|free}.
933  * We may have allocated or freed a huge page based on a different
934  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
935  * be outside of *nodes_allowed.  Ensure that we use an allowed
936  * node for alloc or free.
937  */
938 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
939 {
940         nid = next_node(nid, *nodes_allowed);
941         if (nid == MAX_NUMNODES)
942                 nid = first_node(*nodes_allowed);
943         VM_BUG_ON(nid >= MAX_NUMNODES);
944
945         return nid;
946 }
947
948 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
949 {
950         if (!node_isset(nid, *nodes_allowed))
951                 nid = next_node_allowed(nid, nodes_allowed);
952         return nid;
953 }
954
955 /*
956  * returns the previously saved node ["this node"] from which to
957  * allocate a persistent huge page for the pool and advance the
958  * next node from which to allocate, handling wrap at end of node
959  * mask.
960  */
961 static int hstate_next_node_to_alloc(struct hstate *h,
962                                         nodemask_t *nodes_allowed)
963 {
964         int nid;
965
966         VM_BUG_ON(!nodes_allowed);
967
968         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
969         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
970
971         return nid;
972 }
973
974 /*
975  * helper for free_pool_huge_page() - return the previously saved
976  * node ["this node"] from which to free a huge page.  Advance the
977  * next node id whether or not we find a free huge page to free so
978  * that the next attempt to free addresses the next node.
979  */
980 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
981 {
982         int nid;
983
984         VM_BUG_ON(!nodes_allowed);
985
986         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
987         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
988
989         return nid;
990 }
991
992 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
993         for (nr_nodes = nodes_weight(*mask);                            \
994                 nr_nodes > 0 &&                                         \
995                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
996                 nr_nodes--)
997
998 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
999         for (nr_nodes = nodes_weight(*mask);                            \
1000                 nr_nodes > 0 &&                                         \
1001                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1002                 nr_nodes--)
1003
1004 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
1005 static void destroy_compound_gigantic_page(struct page *page,
1006                                         unsigned int order)
1007 {
1008         int i;
1009         int nr_pages = 1 << order;
1010         struct page *p = page + 1;
1011
1012         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1013                 clear_compound_head(p);
1014                 set_page_refcounted(p);
1015         }
1016
1017         set_compound_order(page, 0);
1018         __ClearPageHead(page);
1019 }
1020
1021 static void free_gigantic_page(struct page *page, unsigned int order)
1022 {
1023         free_contig_range(page_to_pfn(page), 1 << order);
1024 }
1025
1026 static int __alloc_gigantic_page(unsigned long start_pfn,
1027                                 unsigned long nr_pages)
1028 {
1029         unsigned long end_pfn = start_pfn + nr_pages;
1030         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1031 }
1032
1033 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1034                                 unsigned long nr_pages)
1035 {
1036         unsigned long i, end_pfn = start_pfn + nr_pages;
1037         struct page *page;
1038
1039         for (i = start_pfn; i < end_pfn; i++) {
1040                 if (!pfn_valid(i))
1041                         return false;
1042
1043                 page = pfn_to_page(i);
1044
1045                 if (PageReserved(page))
1046                         return false;
1047
1048                 if (page_count(page) > 0)
1049                         return false;
1050
1051                 if (PageHuge(page))
1052                         return false;
1053         }
1054
1055         return true;
1056 }
1057
1058 static bool zone_spans_last_pfn(const struct zone *zone,
1059                         unsigned long start_pfn, unsigned long nr_pages)
1060 {
1061         unsigned long last_pfn = start_pfn + nr_pages - 1;
1062         return zone_spans_pfn(zone, last_pfn);
1063 }
1064
1065 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1066 {
1067         unsigned long nr_pages = 1 << order;
1068         unsigned long ret, pfn, flags;
1069         struct zone *z;
1070
1071         z = NODE_DATA(nid)->node_zones;
1072         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1073                 spin_lock_irqsave(&z->lock, flags);
1074
1075                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1076                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1077                         if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1078                                 /*
1079                                  * We release the zone lock here because
1080                                  * alloc_contig_range() will also lock the zone
1081                                  * at some point. If there's an allocation
1082                                  * spinning on this lock, it may win the race
1083                                  * and cause alloc_contig_range() to fail...
1084                                  */
1085                                 spin_unlock_irqrestore(&z->lock, flags);
1086                                 ret = __alloc_gigantic_page(pfn, nr_pages);
1087                                 if (!ret)
1088                                         return pfn_to_page(pfn);
1089                                 spin_lock_irqsave(&z->lock, flags);
1090                         }
1091                         pfn += nr_pages;
1092                 }
1093
1094                 spin_unlock_irqrestore(&z->lock, flags);
1095         }
1096
1097         return NULL;
1098 }
1099
1100 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1101 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1102
1103 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1104 {
1105         struct page *page;
1106
1107         page = alloc_gigantic_page(nid, huge_page_order(h));
1108         if (page) {
1109                 prep_compound_gigantic_page(page, huge_page_order(h));
1110                 prep_new_huge_page(h, page, nid);
1111         }
1112
1113         return page;
1114 }
1115
1116 static int alloc_fresh_gigantic_page(struct hstate *h,
1117                                 nodemask_t *nodes_allowed)
1118 {
1119         struct page *page = NULL;
1120         int nr_nodes, node;
1121
1122         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1123                 page = alloc_fresh_gigantic_page_node(h, node);
1124                 if (page)
1125                         return 1;
1126         }
1127
1128         return 0;
1129 }
1130
1131 static inline bool gigantic_page_supported(void) { return true; }
1132 #else
1133 static inline bool gigantic_page_supported(void) { return false; }
1134 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1135 static inline void destroy_compound_gigantic_page(struct page *page,
1136                                                 unsigned int order) { }
1137 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1138                                         nodemask_t *nodes_allowed) { return 0; }
1139 #endif
1140
1141 static void update_and_free_page(struct hstate *h, struct page *page)
1142 {
1143         int i;
1144
1145         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1146                 return;
1147
1148         h->nr_huge_pages--;
1149         h->nr_huge_pages_node[page_to_nid(page)]--;
1150         for (i = 0; i < pages_per_huge_page(h); i++) {
1151                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1152                                 1 << PG_referenced | 1 << PG_dirty |
1153                                 1 << PG_active | 1 << PG_private |
1154                                 1 << PG_writeback);
1155         }
1156         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1157         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1158         set_page_refcounted(page);
1159         if (hstate_is_gigantic(h)) {
1160                 destroy_compound_gigantic_page(page, huge_page_order(h));
1161                 free_gigantic_page(page, huge_page_order(h));
1162         } else {
1163                 __free_pages(page, huge_page_order(h));
1164         }
1165 }
1166
1167 struct hstate *size_to_hstate(unsigned long size)
1168 {
1169         struct hstate *h;
1170
1171         for_each_hstate(h) {
1172                 if (huge_page_size(h) == size)
1173                         return h;
1174         }
1175         return NULL;
1176 }
1177
1178 /*
1179  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1180  * to hstate->hugepage_activelist.)
1181  *
1182  * This function can be called for tail pages, but never returns true for them.
1183  */
1184 bool page_huge_active(struct page *page)
1185 {
1186         VM_BUG_ON_PAGE(!PageHuge(page), page);
1187         return PageHead(page) && PagePrivate(&page[1]);
1188 }
1189
1190 /* never called for tail page */
1191 static void set_page_huge_active(struct page *page)
1192 {
1193         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1194         SetPagePrivate(&page[1]);
1195 }
1196
1197 static void clear_page_huge_active(struct page *page)
1198 {
1199         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1200         ClearPagePrivate(&page[1]);
1201 }
1202
1203 void free_huge_page(struct page *page)
1204 {
1205         /*
1206          * Can't pass hstate in here because it is called from the
1207          * compound page destructor.
1208          */
1209         struct hstate *h = page_hstate(page);
1210         int nid = page_to_nid(page);
1211         struct hugepage_subpool *spool =
1212                 (struct hugepage_subpool *)page_private(page);
1213         bool restore_reserve;
1214
1215         set_page_private(page, 0);
1216         page->mapping = NULL;
1217         BUG_ON(page_count(page));
1218         BUG_ON(page_mapcount(page));
1219         restore_reserve = PagePrivate(page);
1220         ClearPagePrivate(page);
1221
1222         /*
1223          * A return code of zero implies that the subpool will be under its
1224          * minimum size if the reservation is not restored after page is free.
1225          * Therefore, force restore_reserve operation.
1226          */
1227         if (hugepage_subpool_put_pages(spool, 1) == 0)
1228                 restore_reserve = true;
1229
1230         spin_lock(&hugetlb_lock);
1231         clear_page_huge_active(page);
1232         hugetlb_cgroup_uncharge_page(hstate_index(h),
1233                                      pages_per_huge_page(h), page);
1234         if (restore_reserve)
1235                 h->resv_huge_pages++;
1236
1237         if (h->surplus_huge_pages_node[nid]) {
1238                 /* remove the page from active list */
1239                 list_del(&page->lru);
1240                 update_and_free_page(h, page);
1241                 h->surplus_huge_pages--;
1242                 h->surplus_huge_pages_node[nid]--;
1243         } else {
1244                 arch_clear_hugepage_flags(page);
1245                 enqueue_huge_page(h, page);
1246         }
1247         spin_unlock(&hugetlb_lock);
1248 }
1249
1250 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1251 {
1252         INIT_LIST_HEAD(&page->lru);
1253         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1254         spin_lock(&hugetlb_lock);
1255         set_hugetlb_cgroup(page, NULL);
1256         h->nr_huge_pages++;
1257         h->nr_huge_pages_node[nid]++;
1258         spin_unlock(&hugetlb_lock);
1259         put_page(page); /* free it into the hugepage allocator */
1260 }
1261
1262 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1263 {
1264         int i;
1265         int nr_pages = 1 << order;
1266         struct page *p = page + 1;
1267
1268         /* we rely on prep_new_huge_page to set the destructor */
1269         set_compound_order(page, order);
1270         __ClearPageReserved(page);
1271         __SetPageHead(page);
1272         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1273                 /*
1274                  * For gigantic hugepages allocated through bootmem at
1275                  * boot, it's safer to be consistent with the not-gigantic
1276                  * hugepages and clear the PG_reserved bit from all tail pages
1277                  * too.  Otherwse drivers using get_user_pages() to access tail
1278                  * pages may get the reference counting wrong if they see
1279                  * PG_reserved set on a tail page (despite the head page not
1280                  * having PG_reserved set).  Enforcing this consistency between
1281                  * head and tail pages allows drivers to optimize away a check
1282                  * on the head page when they need know if put_page() is needed
1283                  * after get_user_pages().
1284                  */
1285                 __ClearPageReserved(p);
1286                 set_page_count(p, 0);
1287                 set_compound_head(p, page);
1288         }
1289 }
1290
1291 /*
1292  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1293  * transparent huge pages.  See the PageTransHuge() documentation for more
1294  * details.
1295  */
1296 int PageHuge(struct page *page)
1297 {
1298         if (!PageCompound(page))
1299                 return 0;
1300
1301         page = compound_head(page);
1302         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1303 }
1304 EXPORT_SYMBOL_GPL(PageHuge);
1305
1306 /*
1307  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1308  * normal or transparent huge pages.
1309  */
1310 int PageHeadHuge(struct page *page_head)
1311 {
1312         if (!PageHead(page_head))
1313                 return 0;
1314
1315         return get_compound_page_dtor(page_head) == free_huge_page;
1316 }
1317
1318 pgoff_t __basepage_index(struct page *page)
1319 {
1320         struct page *page_head = compound_head(page);
1321         pgoff_t index = page_index(page_head);
1322         unsigned long compound_idx;
1323
1324         if (!PageHuge(page_head))
1325                 return page_index(page);
1326
1327         if (compound_order(page_head) >= MAX_ORDER)
1328                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1329         else
1330                 compound_idx = page - page_head;
1331
1332         return (index << compound_order(page_head)) + compound_idx;
1333 }
1334
1335 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1336 {
1337         struct page *page;
1338
1339         page = __alloc_pages_node(nid,
1340                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1341                                                 __GFP_REPEAT|__GFP_NOWARN,
1342                 huge_page_order(h));
1343         if (page) {
1344                 prep_new_huge_page(h, page, nid);
1345         }
1346
1347         return page;
1348 }
1349
1350 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1351 {
1352         struct page *page;
1353         int nr_nodes, node;
1354         int ret = 0;
1355
1356         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1357                 page = alloc_fresh_huge_page_node(h, node);
1358                 if (page) {
1359                         ret = 1;
1360                         break;
1361                 }
1362         }
1363
1364         if (ret)
1365                 count_vm_event(HTLB_BUDDY_PGALLOC);
1366         else
1367                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1368
1369         return ret;
1370 }
1371
1372 /*
1373  * Free huge page from pool from next node to free.
1374  * Attempt to keep persistent huge pages more or less
1375  * balanced over allowed nodes.
1376  * Called with hugetlb_lock locked.
1377  */
1378 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1379                                                          bool acct_surplus)
1380 {
1381         int nr_nodes, node;
1382         int ret = 0;
1383
1384         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1385                 /*
1386                  * If we're returning unused surplus pages, only examine
1387                  * nodes with surplus pages.
1388                  */
1389                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1390                     !list_empty(&h->hugepage_freelists[node])) {
1391                         struct page *page =
1392                                 list_entry(h->hugepage_freelists[node].next,
1393                                           struct page, lru);
1394                         list_del(&page->lru);
1395                         h->free_huge_pages--;
1396                         h->free_huge_pages_node[node]--;
1397                         if (acct_surplus) {
1398                                 h->surplus_huge_pages--;
1399                                 h->surplus_huge_pages_node[node]--;
1400                         }
1401                         update_and_free_page(h, page);
1402                         ret = 1;
1403                         break;
1404                 }
1405         }
1406
1407         return ret;
1408 }
1409
1410 /*
1411  * Dissolve a given free hugepage into free buddy pages. This function does
1412  * nothing for in-use (including surplus) hugepages.
1413  */
1414 static void dissolve_free_huge_page(struct page *page)
1415 {
1416         spin_lock(&hugetlb_lock);
1417         if (PageHuge(page) && !page_count(page)) {
1418                 struct hstate *h = page_hstate(page);
1419                 int nid = page_to_nid(page);
1420                 list_del(&page->lru);
1421                 h->free_huge_pages--;
1422                 h->free_huge_pages_node[nid]--;
1423                 update_and_free_page(h, page);
1424         }
1425         spin_unlock(&hugetlb_lock);
1426 }
1427
1428 /*
1429  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1430  * make specified memory blocks removable from the system.
1431  * Note that start_pfn should aligned with (minimum) hugepage size.
1432  */
1433 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1434 {
1435         unsigned long pfn;
1436
1437         if (!hugepages_supported())
1438                 return;
1439
1440         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1441         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1442                 dissolve_free_huge_page(pfn_to_page(pfn));
1443 }
1444
1445 /*
1446  * There are 3 ways this can get called:
1447  * 1. With vma+addr: we use the VMA's memory policy
1448  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1449  *    page from any node, and let the buddy allocator itself figure
1450  *    it out.
1451  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1452  *    strictly from 'nid'
1453  */
1454 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1455                 struct vm_area_struct *vma, unsigned long addr, int nid)
1456 {
1457         int order = huge_page_order(h);
1458         gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1459         unsigned int cpuset_mems_cookie;
1460
1461         /*
1462          * We need a VMA to get a memory policy.  If we do not
1463          * have one, we use the 'nid' argument.
1464          *
1465          * The mempolicy stuff below has some non-inlined bits
1466          * and calls ->vm_ops.  That makes it hard to optimize at
1467          * compile-time, even when NUMA is off and it does
1468          * nothing.  This helps the compiler optimize it out.
1469          */
1470         if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1471                 /*
1472                  * If a specific node is requested, make sure to
1473                  * get memory from there, but only when a node
1474                  * is explicitly specified.
1475                  */
1476                 if (nid != NUMA_NO_NODE)
1477                         gfp |= __GFP_THISNODE;
1478                 /*
1479                  * Make sure to call something that can handle
1480                  * nid=NUMA_NO_NODE
1481                  */
1482                 return alloc_pages_node(nid, gfp, order);
1483         }
1484
1485         /*
1486          * OK, so we have a VMA.  Fetch the mempolicy and try to
1487          * allocate a huge page with it.  We will only reach this
1488          * when CONFIG_NUMA=y.
1489          */
1490         do {
1491                 struct page *page;
1492                 struct mempolicy *mpol;
1493                 struct zonelist *zl;
1494                 nodemask_t *nodemask;
1495
1496                 cpuset_mems_cookie = read_mems_allowed_begin();
1497                 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1498                 mpol_cond_put(mpol);
1499                 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1500                 if (page)
1501                         return page;
1502         } while (read_mems_allowed_retry(cpuset_mems_cookie));
1503
1504         return NULL;
1505 }
1506
1507 /*
1508  * There are two ways to allocate a huge page:
1509  * 1. When you have a VMA and an address (like a fault)
1510  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1511  *
1512  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1513  * this case which signifies that the allocation should be done with
1514  * respect for the VMA's memory policy.
1515  *
1516  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1517  * implies that memory policies will not be taken in to account.
1518  */
1519 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1520                 struct vm_area_struct *vma, unsigned long addr, int nid)
1521 {
1522         struct page *page;
1523         unsigned int r_nid;
1524
1525         if (hstate_is_gigantic(h))
1526                 return NULL;
1527
1528         /*
1529          * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1530          * This makes sure the caller is picking _one_ of the modes with which
1531          * we can call this function, not both.
1532          */
1533         if (vma || (addr != -1)) {
1534                 VM_WARN_ON_ONCE(addr == -1);
1535                 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1536         }
1537         /*
1538          * Assume we will successfully allocate the surplus page to
1539          * prevent racing processes from causing the surplus to exceed
1540          * overcommit
1541          *
1542          * This however introduces a different race, where a process B
1543          * tries to grow the static hugepage pool while alloc_pages() is
1544          * called by process A. B will only examine the per-node
1545          * counters in determining if surplus huge pages can be
1546          * converted to normal huge pages in adjust_pool_surplus(). A
1547          * won't be able to increment the per-node counter, until the
1548          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1549          * no more huge pages can be converted from surplus to normal
1550          * state (and doesn't try to convert again). Thus, we have a
1551          * case where a surplus huge page exists, the pool is grown, and
1552          * the surplus huge page still exists after, even though it
1553          * should just have been converted to a normal huge page. This
1554          * does not leak memory, though, as the hugepage will be freed
1555          * once it is out of use. It also does not allow the counters to
1556          * go out of whack in adjust_pool_surplus() as we don't modify
1557          * the node values until we've gotten the hugepage and only the
1558          * per-node value is checked there.
1559          */
1560         spin_lock(&hugetlb_lock);
1561         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1562                 spin_unlock(&hugetlb_lock);
1563                 return NULL;
1564         } else {
1565                 h->nr_huge_pages++;
1566                 h->surplus_huge_pages++;
1567         }
1568         spin_unlock(&hugetlb_lock);
1569
1570         page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1571
1572         spin_lock(&hugetlb_lock);
1573         if (page) {
1574                 INIT_LIST_HEAD(&page->lru);
1575                 r_nid = page_to_nid(page);
1576                 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1577                 set_hugetlb_cgroup(page, NULL);
1578                 /*
1579                  * We incremented the global counters already
1580                  */
1581                 h->nr_huge_pages_node[r_nid]++;
1582                 h->surplus_huge_pages_node[r_nid]++;
1583                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1584         } else {
1585                 h->nr_huge_pages--;
1586                 h->surplus_huge_pages--;
1587                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1588         }
1589         spin_unlock(&hugetlb_lock);
1590
1591         return page;
1592 }
1593
1594 /*
1595  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1596  * NUMA_NO_NODE, which means that it may be allocated
1597  * anywhere.
1598  */
1599 static
1600 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1601 {
1602         unsigned long addr = -1;
1603
1604         return __alloc_buddy_huge_page(h, NULL, addr, nid);
1605 }
1606
1607 /*
1608  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1609  */
1610 static
1611 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1612                 struct vm_area_struct *vma, unsigned long addr)
1613 {
1614         return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1615 }
1616
1617 /*
1618  * This allocation function is useful in the context where vma is irrelevant.
1619  * E.g. soft-offlining uses this function because it only cares physical
1620  * address of error page.
1621  */
1622 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1623 {
1624         struct page *page = NULL;
1625
1626         spin_lock(&hugetlb_lock);
1627         if (h->free_huge_pages - h->resv_huge_pages > 0)
1628                 page = dequeue_huge_page_node(h, nid);
1629         spin_unlock(&hugetlb_lock);
1630
1631         if (!page)
1632                 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1633
1634         return page;
1635 }
1636
1637 /*
1638  * Increase the hugetlb pool such that it can accommodate a reservation
1639  * of size 'delta'.
1640  */
1641 static int gather_surplus_pages(struct hstate *h, int delta)
1642 {
1643         struct list_head surplus_list;
1644         struct page *page, *tmp;
1645         int ret, i;
1646         int needed, allocated;
1647         bool alloc_ok = true;
1648
1649         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1650         if (needed <= 0) {
1651                 h->resv_huge_pages += delta;
1652                 return 0;
1653         }
1654
1655         allocated = 0;
1656         INIT_LIST_HEAD(&surplus_list);
1657
1658         ret = -ENOMEM;
1659 retry:
1660         spin_unlock(&hugetlb_lock);
1661         for (i = 0; i < needed; i++) {
1662                 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1663                 if (!page) {
1664                         alloc_ok = false;
1665                         break;
1666                 }
1667                 list_add(&page->lru, &surplus_list);
1668         }
1669         allocated += i;
1670
1671         /*
1672          * After retaking hugetlb_lock, we need to recalculate 'needed'
1673          * because either resv_huge_pages or free_huge_pages may have changed.
1674          */
1675         spin_lock(&hugetlb_lock);
1676         needed = (h->resv_huge_pages + delta) -
1677                         (h->free_huge_pages + allocated);
1678         if (needed > 0) {
1679                 if (alloc_ok)
1680                         goto retry;
1681                 /*
1682                  * We were not able to allocate enough pages to
1683                  * satisfy the entire reservation so we free what
1684                  * we've allocated so far.
1685                  */
1686                 goto free;
1687         }
1688         /*
1689          * The surplus_list now contains _at_least_ the number of extra pages
1690          * needed to accommodate the reservation.  Add the appropriate number
1691          * of pages to the hugetlb pool and free the extras back to the buddy
1692          * allocator.  Commit the entire reservation here to prevent another
1693          * process from stealing the pages as they are added to the pool but
1694          * before they are reserved.
1695          */
1696         needed += allocated;
1697         h->resv_huge_pages += delta;
1698         ret = 0;
1699
1700         /* Free the needed pages to the hugetlb pool */
1701         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1702                 if ((--needed) < 0)
1703                         break;
1704                 /*
1705                  * This page is now managed by the hugetlb allocator and has
1706                  * no users -- drop the buddy allocator's reference.
1707                  */
1708                 put_page_testzero(page);
1709                 VM_BUG_ON_PAGE(page_count(page), page);
1710                 enqueue_huge_page(h, page);
1711         }
1712 free:
1713         spin_unlock(&hugetlb_lock);
1714
1715         /* Free unnecessary surplus pages to the buddy allocator */
1716         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1717                 put_page(page);
1718         spin_lock(&hugetlb_lock);
1719
1720         return ret;
1721 }
1722
1723 /*
1724  * When releasing a hugetlb pool reservation, any surplus pages that were
1725  * allocated to satisfy the reservation must be explicitly freed if they were
1726  * never used.
1727  * Called with hugetlb_lock held.
1728  */
1729 static void return_unused_surplus_pages(struct hstate *h,
1730                                         unsigned long unused_resv_pages)
1731 {
1732         unsigned long nr_pages;
1733
1734         /* Uncommit the reservation */
1735         h->resv_huge_pages -= unused_resv_pages;
1736
1737         /* Cannot return gigantic pages currently */
1738         if (hstate_is_gigantic(h))
1739                 return;
1740
1741         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1742
1743         /*
1744          * We want to release as many surplus pages as possible, spread
1745          * evenly across all nodes with memory. Iterate across these nodes
1746          * until we can no longer free unreserved surplus pages. This occurs
1747          * when the nodes with surplus pages have no free pages.
1748          * free_pool_huge_page() will balance the the freed pages across the
1749          * on-line nodes with memory and will handle the hstate accounting.
1750          */
1751         while (nr_pages--) {
1752                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1753                         break;
1754                 cond_resched_lock(&hugetlb_lock);
1755         }
1756 }
1757
1758
1759 /*
1760  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1761  * are used by the huge page allocation routines to manage reservations.
1762  *
1763  * vma_needs_reservation is called to determine if the huge page at addr
1764  * within the vma has an associated reservation.  If a reservation is
1765  * needed, the value 1 is returned.  The caller is then responsible for
1766  * managing the global reservation and subpool usage counts.  After
1767  * the huge page has been allocated, vma_commit_reservation is called
1768  * to add the page to the reservation map.  If the page allocation fails,
1769  * the reservation must be ended instead of committed.  vma_end_reservation
1770  * is called in such cases.
1771  *
1772  * In the normal case, vma_commit_reservation returns the same value
1773  * as the preceding vma_needs_reservation call.  The only time this
1774  * is not the case is if a reserve map was changed between calls.  It
1775  * is the responsibility of the caller to notice the difference and
1776  * take appropriate action.
1777  */
1778 enum vma_resv_mode {
1779         VMA_NEEDS_RESV,
1780         VMA_COMMIT_RESV,
1781         VMA_END_RESV,
1782 };
1783 static long __vma_reservation_common(struct hstate *h,
1784                                 struct vm_area_struct *vma, unsigned long addr,
1785                                 enum vma_resv_mode mode)
1786 {
1787         struct resv_map *resv;
1788         pgoff_t idx;
1789         long ret;
1790
1791         resv = vma_resv_map(vma);
1792         if (!resv)
1793                 return 1;
1794
1795         idx = vma_hugecache_offset(h, vma, addr);
1796         switch (mode) {
1797         case VMA_NEEDS_RESV:
1798                 ret = region_chg(resv, idx, idx + 1);
1799                 break;
1800         case VMA_COMMIT_RESV:
1801                 ret = region_add(resv, idx, idx + 1);
1802                 break;
1803         case VMA_END_RESV:
1804                 region_abort(resv, idx, idx + 1);
1805                 ret = 0;
1806                 break;
1807         default:
1808                 BUG();
1809         }
1810
1811         if (vma->vm_flags & VM_MAYSHARE)
1812                 return ret;
1813         else
1814                 return ret < 0 ? ret : 0;
1815 }
1816
1817 static long vma_needs_reservation(struct hstate *h,
1818                         struct vm_area_struct *vma, unsigned long addr)
1819 {
1820         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1821 }
1822
1823 static long vma_commit_reservation(struct hstate *h,
1824                         struct vm_area_struct *vma, unsigned long addr)
1825 {
1826         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1827 }
1828
1829 static void vma_end_reservation(struct hstate *h,
1830                         struct vm_area_struct *vma, unsigned long addr)
1831 {
1832         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1833 }
1834
1835 struct page *alloc_huge_page(struct vm_area_struct *vma,
1836                                     unsigned long addr, int avoid_reserve)
1837 {
1838         struct hugepage_subpool *spool = subpool_vma(vma);
1839         struct hstate *h = hstate_vma(vma);
1840         struct page *page;
1841         long map_chg, map_commit;
1842         long gbl_chg;
1843         int ret, idx;
1844         struct hugetlb_cgroup *h_cg;
1845
1846         idx = hstate_index(h);
1847         /*
1848          * Examine the region/reserve map to determine if the process
1849          * has a reservation for the page to be allocated.  A return
1850          * code of zero indicates a reservation exists (no change).
1851          */
1852         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1853         if (map_chg < 0)
1854                 return ERR_PTR(-ENOMEM);
1855
1856         /*
1857          * Processes that did not create the mapping will have no
1858          * reserves as indicated by the region/reserve map. Check
1859          * that the allocation will not exceed the subpool limit.
1860          * Allocations for MAP_NORESERVE mappings also need to be
1861          * checked against any subpool limit.
1862          */
1863         if (map_chg || avoid_reserve) {
1864                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
1865                 if (gbl_chg < 0) {
1866                         vma_end_reservation(h, vma, addr);
1867                         return ERR_PTR(-ENOSPC);
1868                 }
1869
1870                 /*
1871                  * Even though there was no reservation in the region/reserve
1872                  * map, there could be reservations associated with the
1873                  * subpool that can be used.  This would be indicated if the
1874                  * return value of hugepage_subpool_get_pages() is zero.
1875                  * However, if avoid_reserve is specified we still avoid even
1876                  * the subpool reservations.
1877                  */
1878                 if (avoid_reserve)
1879                         gbl_chg = 1;
1880         }
1881
1882         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1883         if (ret)
1884                 goto out_subpool_put;
1885
1886         spin_lock(&hugetlb_lock);
1887         /*
1888          * glb_chg is passed to indicate whether or not a page must be taken
1889          * from the global free pool (global change).  gbl_chg == 0 indicates
1890          * a reservation exists for the allocation.
1891          */
1892         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1893         if (!page) {
1894                 spin_unlock(&hugetlb_lock);
1895                 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1896                 if (!page)
1897                         goto out_uncharge_cgroup;
1898                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1899                         SetPagePrivate(page);
1900                         h->resv_huge_pages--;
1901                 }
1902                 spin_lock(&hugetlb_lock);
1903                 list_move(&page->lru, &h->hugepage_activelist);
1904                 /* Fall through */
1905         }
1906         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1907         spin_unlock(&hugetlb_lock);
1908
1909         set_page_private(page, (unsigned long)spool);
1910
1911         map_commit = vma_commit_reservation(h, vma, addr);
1912         if (unlikely(map_chg > map_commit)) {
1913                 /*
1914                  * The page was added to the reservation map between
1915                  * vma_needs_reservation and vma_commit_reservation.
1916                  * This indicates a race with hugetlb_reserve_pages.
1917                  * Adjust for the subpool count incremented above AND
1918                  * in hugetlb_reserve_pages for the same page.  Also,
1919                  * the reservation count added in hugetlb_reserve_pages
1920                  * no longer applies.
1921                  */
1922                 long rsv_adjust;
1923
1924                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1925                 hugetlb_acct_memory(h, -rsv_adjust);
1926         }
1927         return page;
1928
1929 out_uncharge_cgroup:
1930         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1931 out_subpool_put:
1932         if (map_chg || avoid_reserve)
1933                 hugepage_subpool_put_pages(spool, 1);
1934         vma_end_reservation(h, vma, addr);
1935         return ERR_PTR(-ENOSPC);
1936 }
1937
1938 /*
1939  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1940  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1941  * where no ERR_VALUE is expected to be returned.
1942  */
1943 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1944                                 unsigned long addr, int avoid_reserve)
1945 {
1946         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1947         if (IS_ERR(page))
1948                 page = NULL;
1949         return page;
1950 }
1951
1952 int __weak alloc_bootmem_huge_page(struct hstate *h)
1953 {
1954         struct huge_bootmem_page *m;
1955         int nr_nodes, node;
1956
1957         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1958                 void *addr;
1959
1960                 addr = memblock_virt_alloc_try_nid_nopanic(
1961                                 huge_page_size(h), huge_page_size(h),
1962                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1963                 if (addr) {
1964                         /*
1965                          * Use the beginning of the huge page to store the
1966                          * huge_bootmem_page struct (until gather_bootmem
1967                          * puts them into the mem_map).
1968                          */
1969                         m = addr;
1970                         goto found;
1971                 }
1972         }
1973         return 0;
1974
1975 found:
1976         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1977         /* Put them into a private list first because mem_map is not up yet */
1978         list_add(&m->list, &huge_boot_pages);
1979         m->hstate = h;
1980         return 1;
1981 }
1982
1983 static void __init prep_compound_huge_page(struct page *page,
1984                 unsigned int order)
1985 {
1986         if (unlikely(order > (MAX_ORDER - 1)))
1987                 prep_compound_gigantic_page(page, order);
1988         else
1989                 prep_compound_page(page, order);
1990 }
1991
1992 /* Put bootmem huge pages into the standard lists after mem_map is up */
1993 static void __init gather_bootmem_prealloc(void)
1994 {
1995         struct huge_bootmem_page *m;
1996
1997         list_for_each_entry(m, &huge_boot_pages, list) {
1998                 struct hstate *h = m->hstate;
1999                 struct page *page;
2000
2001 #ifdef CONFIG_HIGHMEM
2002                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2003                 memblock_free_late(__pa(m),
2004                                    sizeof(struct huge_bootmem_page));
2005 #else
2006                 page = virt_to_page(m);
2007 #endif
2008                 WARN_ON(page_count(page) != 1);
2009                 prep_compound_huge_page(page, h->order);
2010                 WARN_ON(PageReserved(page));
2011                 prep_new_huge_page(h, page, page_to_nid(page));
2012                 /*
2013                  * If we had gigantic hugepages allocated at boot time, we need
2014                  * to restore the 'stolen' pages to totalram_pages in order to
2015                  * fix confusing memory reports from free(1) and another
2016                  * side-effects, like CommitLimit going negative.
2017                  */
2018                 if (hstate_is_gigantic(h))
2019                         adjust_managed_page_count(page, 1 << h->order);
2020         }
2021 }
2022
2023 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2024 {
2025         unsigned long i;
2026
2027         for (i = 0; i < h->max_huge_pages; ++i) {
2028                 if (hstate_is_gigantic(h)) {
2029                         if (!alloc_bootmem_huge_page(h))
2030                                 break;
2031                 } else if (!alloc_fresh_huge_page(h,
2032                                          &node_states[N_MEMORY]))
2033                         break;
2034         }
2035         h->max_huge_pages = i;
2036 }
2037
2038 static void __init hugetlb_init_hstates(void)
2039 {
2040         struct hstate *h;
2041
2042         for_each_hstate(h) {
2043                 if (minimum_order > huge_page_order(h))
2044                         minimum_order = huge_page_order(h);
2045
2046                 /* oversize hugepages were init'ed in early boot */
2047                 if (!hstate_is_gigantic(h))
2048                         hugetlb_hstate_alloc_pages(h);
2049         }
2050         VM_BUG_ON(minimum_order == UINT_MAX);
2051 }
2052
2053 static char * __init memfmt(char *buf, unsigned long n)
2054 {
2055         if (n >= (1UL << 30))
2056                 sprintf(buf, "%lu GB", n >> 30);
2057         else if (n >= (1UL << 20))
2058                 sprintf(buf, "%lu MB", n >> 20);
2059         else
2060                 sprintf(buf, "%lu KB", n >> 10);
2061         return buf;
2062 }
2063
2064 static void __init report_hugepages(void)
2065 {
2066         struct hstate *h;
2067
2068         for_each_hstate(h) {
2069                 char buf[32];
2070                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2071                         memfmt(buf, huge_page_size(h)),
2072                         h->free_huge_pages);
2073         }
2074 }
2075
2076 #ifdef CONFIG_HIGHMEM
2077 static void try_to_free_low(struct hstate *h, unsigned long count,
2078                                                 nodemask_t *nodes_allowed)
2079 {
2080         int i;
2081
2082         if (hstate_is_gigantic(h))
2083                 return;
2084
2085         for_each_node_mask(i, *nodes_allowed) {
2086                 struct page *page, *next;
2087                 struct list_head *freel = &h->hugepage_freelists[i];
2088                 list_for_each_entry_safe(page, next, freel, lru) {
2089                         if (count >= h->nr_huge_pages)
2090                                 return;
2091                         if (PageHighMem(page))
2092                                 continue;
2093                         list_del(&page->lru);
2094                         update_and_free_page(h, page);
2095                         h->free_huge_pages--;
2096                         h->free_huge_pages_node[page_to_nid(page)]--;
2097                 }
2098         }
2099 }
2100 #else
2101 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2102                                                 nodemask_t *nodes_allowed)
2103 {
2104 }
2105 #endif
2106
2107 /*
2108  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2109  * balanced by operating on them in a round-robin fashion.
2110  * Returns 1 if an adjustment was made.
2111  */
2112 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2113                                 int delta)
2114 {
2115         int nr_nodes, node;
2116
2117         VM_BUG_ON(delta != -1 && delta != 1);
2118
2119         if (delta < 0) {
2120                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2121                         if (h->surplus_huge_pages_node[node])
2122                                 goto found;
2123                 }
2124         } else {
2125                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2126                         if (h->surplus_huge_pages_node[node] <
2127                                         h->nr_huge_pages_node[node])
2128                                 goto found;
2129                 }
2130         }
2131         return 0;
2132
2133 found:
2134         h->surplus_huge_pages += delta;
2135         h->surplus_huge_pages_node[node] += delta;
2136         return 1;
2137 }
2138
2139 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2140 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2141                                                 nodemask_t *nodes_allowed)
2142 {
2143         unsigned long min_count, ret;
2144
2145         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2146                 return h->max_huge_pages;
2147
2148         /*
2149          * Increase the pool size
2150          * First take pages out of surplus state.  Then make up the
2151          * remaining difference by allocating fresh huge pages.
2152          *
2153          * We might race with __alloc_buddy_huge_page() here and be unable
2154          * to convert a surplus huge page to a normal huge page. That is
2155          * not critical, though, it just means the overall size of the
2156          * pool might be one hugepage larger than it needs to be, but
2157          * within all the constraints specified by the sysctls.
2158          */
2159         spin_lock(&hugetlb_lock);
2160         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2161                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2162                         break;
2163         }
2164
2165         while (count > persistent_huge_pages(h)) {
2166                 /*
2167                  * If this allocation races such that we no longer need the
2168                  * page, free_huge_page will handle it by freeing the page
2169                  * and reducing the surplus.
2170                  */
2171                 spin_unlock(&hugetlb_lock);
2172                 if (hstate_is_gigantic(h))
2173                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2174                 else
2175                         ret = alloc_fresh_huge_page(h, nodes_allowed);
2176                 spin_lock(&hugetlb_lock);
2177                 if (!ret)
2178                         goto out;
2179
2180                 /* Bail for signals. Probably ctrl-c from user */
2181                 if (signal_pending(current))
2182                         goto out;
2183         }
2184
2185         /*
2186          * Decrease the pool size
2187          * First return free pages to the buddy allocator (being careful
2188          * to keep enough around to satisfy reservations).  Then place
2189          * pages into surplus state as needed so the pool will shrink
2190          * to the desired size as pages become free.
2191          *
2192          * By placing pages into the surplus state independent of the
2193          * overcommit value, we are allowing the surplus pool size to
2194          * exceed overcommit. There are few sane options here. Since
2195          * __alloc_buddy_huge_page() is checking the global counter,
2196          * though, we'll note that we're not allowed to exceed surplus
2197          * and won't grow the pool anywhere else. Not until one of the
2198          * sysctls are changed, or the surplus pages go out of use.
2199          */
2200         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2201         min_count = max(count, min_count);
2202         try_to_free_low(h, min_count, nodes_allowed);
2203         while (min_count < persistent_huge_pages(h)) {
2204                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2205                         break;
2206                 cond_resched_lock(&hugetlb_lock);
2207         }
2208         while (count < persistent_huge_pages(h)) {
2209                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2210                         break;
2211         }
2212 out:
2213         ret = persistent_huge_pages(h);
2214         spin_unlock(&hugetlb_lock);
2215         return ret;
2216 }
2217
2218 #define HSTATE_ATTR_RO(_name) \
2219         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2220
2221 #define HSTATE_ATTR(_name) \
2222         static struct kobj_attribute _name##_attr = \
2223                 __ATTR(_name, 0644, _name##_show, _name##_store)
2224
2225 static struct kobject *hugepages_kobj;
2226 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2227
2228 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2229
2230 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2231 {
2232         int i;
2233
2234         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2235                 if (hstate_kobjs[i] == kobj) {
2236                         if (nidp)
2237                                 *nidp = NUMA_NO_NODE;
2238                         return &hstates[i];
2239                 }
2240
2241         return kobj_to_node_hstate(kobj, nidp);
2242 }
2243
2244 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2245                                         struct kobj_attribute *attr, char *buf)
2246 {
2247         struct hstate *h;
2248         unsigned long nr_huge_pages;
2249         int nid;
2250
2251         h = kobj_to_hstate(kobj, &nid);
2252         if (nid == NUMA_NO_NODE)
2253                 nr_huge_pages = h->nr_huge_pages;
2254         else
2255                 nr_huge_pages = h->nr_huge_pages_node[nid];
2256
2257         return sprintf(buf, "%lu\n", nr_huge_pages);
2258 }
2259
2260 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2261                                            struct hstate *h, int nid,
2262                                            unsigned long count, size_t len)
2263 {
2264         int err;
2265         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2266
2267         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2268                 err = -EINVAL;
2269                 goto out;
2270         }
2271
2272         if (nid == NUMA_NO_NODE) {
2273                 /*
2274                  * global hstate attribute
2275                  */
2276                 if (!(obey_mempolicy &&
2277                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2278                         NODEMASK_FREE(nodes_allowed);
2279                         nodes_allowed = &node_states[N_MEMORY];
2280                 }
2281         } else if (nodes_allowed) {
2282                 /*
2283                  * per node hstate attribute: adjust count to global,
2284                  * but restrict alloc/free to the specified node.
2285                  */
2286                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2287                 init_nodemask_of_node(nodes_allowed, nid);
2288         } else
2289                 nodes_allowed = &node_states[N_MEMORY];
2290
2291         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2292
2293         if (nodes_allowed != &node_states[N_MEMORY])
2294                 NODEMASK_FREE(nodes_allowed);
2295
2296         return len;
2297 out:
2298         NODEMASK_FREE(nodes_allowed);
2299         return err;
2300 }
2301
2302 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2303                                          struct kobject *kobj, const char *buf,
2304                                          size_t len)
2305 {
2306         struct hstate *h;
2307         unsigned long count;
2308         int nid;
2309         int err;
2310
2311         err = kstrtoul(buf, 10, &count);
2312         if (err)
2313                 return err;
2314
2315         h = kobj_to_hstate(kobj, &nid);
2316         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2317 }
2318
2319 static ssize_t nr_hugepages_show(struct kobject *kobj,
2320                                        struct kobj_attribute *attr, char *buf)
2321 {
2322         return nr_hugepages_show_common(kobj, attr, buf);
2323 }
2324
2325 static ssize_t nr_hugepages_store(struct kobject *kobj,
2326                struct kobj_attribute *attr, const char *buf, size_t len)
2327 {
2328         return nr_hugepages_store_common(false, kobj, buf, len);
2329 }
2330 HSTATE_ATTR(nr_hugepages);
2331
2332 #ifdef CONFIG_NUMA
2333
2334 /*
2335  * hstate attribute for optionally mempolicy-based constraint on persistent
2336  * huge page alloc/free.
2337  */
2338 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2339                                        struct kobj_attribute *attr, char *buf)
2340 {
2341         return nr_hugepages_show_common(kobj, attr, buf);
2342 }
2343
2344 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2345                struct kobj_attribute *attr, const char *buf, size_t len)
2346 {
2347         return nr_hugepages_store_common(true, kobj, buf, len);
2348 }
2349 HSTATE_ATTR(nr_hugepages_mempolicy);
2350 #endif
2351
2352
2353 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2354                                         struct kobj_attribute *attr, char *buf)
2355 {
2356         struct hstate *h = kobj_to_hstate(kobj, NULL);
2357         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2358 }
2359
2360 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2361                 struct kobj_attribute *attr, const char *buf, size_t count)
2362 {
2363         int err;
2364         unsigned long input;
2365         struct hstate *h = kobj_to_hstate(kobj, NULL);
2366
2367         if (hstate_is_gigantic(h))
2368                 return -EINVAL;
2369
2370         err = kstrtoul(buf, 10, &input);
2371         if (err)
2372                 return err;
2373
2374         spin_lock(&hugetlb_lock);
2375         h->nr_overcommit_huge_pages = input;
2376         spin_unlock(&hugetlb_lock);
2377
2378         return count;
2379 }
2380 HSTATE_ATTR(nr_overcommit_hugepages);
2381
2382 static ssize_t free_hugepages_show(struct kobject *kobj,
2383                                         struct kobj_attribute *attr, char *buf)
2384 {
2385         struct hstate *h;
2386         unsigned long free_huge_pages;
2387         int nid;
2388
2389         h = kobj_to_hstate(kobj, &nid);
2390         if (nid == NUMA_NO_NODE)
2391                 free_huge_pages = h->free_huge_pages;
2392         else
2393                 free_huge_pages = h->free_huge_pages_node[nid];
2394
2395         return sprintf(buf, "%lu\n", free_huge_pages);
2396 }
2397 HSTATE_ATTR_RO(free_hugepages);
2398
2399 static ssize_t resv_hugepages_show(struct kobject *kobj,
2400                                         struct kobj_attribute *attr, char *buf)
2401 {
2402         struct hstate *h = kobj_to_hstate(kobj, NULL);
2403         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2404 }
2405 HSTATE_ATTR_RO(resv_hugepages);
2406
2407 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2408                                         struct kobj_attribute *attr, char *buf)
2409 {
2410         struct hstate *h;
2411         unsigned long surplus_huge_pages;
2412         int nid;
2413
2414         h = kobj_to_hstate(kobj, &nid);
2415         if (nid == NUMA_NO_NODE)
2416                 surplus_huge_pages = h->surplus_huge_pages;
2417         else
2418                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2419
2420         return sprintf(buf, "%lu\n", surplus_huge_pages);
2421 }
2422 HSTATE_ATTR_RO(surplus_hugepages);
2423
2424 static struct attribute *hstate_attrs[] = {
2425         &nr_hugepages_attr.attr,
2426         &nr_overcommit_hugepages_attr.attr,
2427         &free_hugepages_attr.attr,
2428         &resv_hugepages_attr.attr,
2429         &surplus_hugepages_attr.attr,
2430 #ifdef CONFIG_NUMA
2431         &nr_hugepages_mempolicy_attr.attr,
2432 #endif
2433         NULL,
2434 };
2435
2436 static struct attribute_group hstate_attr_group = {
2437         .attrs = hstate_attrs,
2438 };
2439
2440 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2441                                     struct kobject **hstate_kobjs,
2442                                     struct attribute_group *hstate_attr_group)
2443 {
2444         int retval;
2445         int hi = hstate_index(h);
2446
2447         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2448         if (!hstate_kobjs[hi])
2449                 return -ENOMEM;
2450
2451         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2452         if (retval)
2453                 kobject_put(hstate_kobjs[hi]);
2454
2455         return retval;
2456 }
2457
2458 static void __init hugetlb_sysfs_init(void)
2459 {
2460         struct hstate *h;
2461         int err;
2462
2463         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2464         if (!hugepages_kobj)
2465                 return;
2466
2467         for_each_hstate(h) {
2468                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2469                                          hstate_kobjs, &hstate_attr_group);
2470                 if (err)
2471                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2472         }
2473 }
2474
2475 #ifdef CONFIG_NUMA
2476
2477 /*
2478  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2479  * with node devices in node_devices[] using a parallel array.  The array
2480  * index of a node device or _hstate == node id.
2481  * This is here to avoid any static dependency of the node device driver, in
2482  * the base kernel, on the hugetlb module.
2483  */
2484 struct node_hstate {
2485         struct kobject          *hugepages_kobj;
2486         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2487 };
2488 static struct node_hstate node_hstates[MAX_NUMNODES];
2489
2490 /*
2491  * A subset of global hstate attributes for node devices
2492  */
2493 static struct attribute *per_node_hstate_attrs[] = {
2494         &nr_hugepages_attr.attr,
2495         &free_hugepages_attr.attr,
2496         &surplus_hugepages_attr.attr,
2497         NULL,
2498 };
2499
2500 static struct attribute_group per_node_hstate_attr_group = {
2501         .attrs = per_node_hstate_attrs,
2502 };
2503
2504 /*
2505  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2506  * Returns node id via non-NULL nidp.
2507  */
2508 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2509 {
2510         int nid;
2511
2512         for (nid = 0; nid < nr_node_ids; nid++) {
2513                 struct node_hstate *nhs = &node_hstates[nid];
2514                 int i;
2515                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2516                         if (nhs->hstate_kobjs[i] == kobj) {
2517                                 if (nidp)
2518                                         *nidp = nid;
2519                                 return &hstates[i];
2520                         }
2521         }
2522
2523         BUG();
2524         return NULL;
2525 }
2526
2527 /*
2528  * Unregister hstate attributes from a single node device.
2529  * No-op if no hstate attributes attached.
2530  */
2531 static void hugetlb_unregister_node(struct node *node)
2532 {
2533         struct hstate *h;
2534         struct node_hstate *nhs = &node_hstates[node->dev.id];
2535
2536         if (!nhs->hugepages_kobj)
2537                 return;         /* no hstate attributes */
2538
2539         for_each_hstate(h) {
2540                 int idx = hstate_index(h);
2541                 if (nhs->hstate_kobjs[idx]) {
2542                         kobject_put(nhs->hstate_kobjs[idx]);
2543                         nhs->hstate_kobjs[idx] = NULL;
2544                 }
2545         }
2546
2547         kobject_put(nhs->hugepages_kobj);
2548         nhs->hugepages_kobj = NULL;
2549 }
2550
2551
2552 /*
2553  * Register hstate attributes for a single node device.
2554  * No-op if attributes already registered.
2555  */
2556 static void hugetlb_register_node(struct node *node)
2557 {
2558         struct hstate *h;
2559         struct node_hstate *nhs = &node_hstates[node->dev.id];
2560         int err;
2561
2562         if (nhs->hugepages_kobj)
2563                 return;         /* already allocated */
2564
2565         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2566                                                         &node->dev.kobj);
2567         if (!nhs->hugepages_kobj)
2568                 return;
2569
2570         for_each_hstate(h) {
2571                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2572                                                 nhs->hstate_kobjs,
2573                                                 &per_node_hstate_attr_group);
2574                 if (err) {
2575                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2576                                 h->name, node->dev.id);
2577                         hugetlb_unregister_node(node);
2578                         break;
2579                 }
2580         }
2581 }
2582
2583 /*
2584  * hugetlb init time:  register hstate attributes for all registered node
2585  * devices of nodes that have memory.  All on-line nodes should have
2586  * registered their associated device by this time.
2587  */
2588 static void __init hugetlb_register_all_nodes(void)
2589 {
2590         int nid;
2591
2592         for_each_node_state(nid, N_MEMORY) {
2593                 struct node *node = node_devices[nid];
2594                 if (node->dev.id == nid)
2595                         hugetlb_register_node(node);
2596         }
2597
2598         /*
2599          * Let the node device driver know we're here so it can
2600          * [un]register hstate attributes on node hotplug.
2601          */
2602         register_hugetlbfs_with_node(hugetlb_register_node,
2603                                      hugetlb_unregister_node);
2604 }
2605 #else   /* !CONFIG_NUMA */
2606
2607 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2608 {
2609         BUG();
2610         if (nidp)
2611                 *nidp = -1;
2612         return NULL;
2613 }
2614
2615 static void hugetlb_register_all_nodes(void) { }
2616
2617 #endif
2618
2619 static int __init hugetlb_init(void)
2620 {
2621         int i;
2622
2623         if (!hugepages_supported())
2624                 return 0;
2625
2626         if (!size_to_hstate(default_hstate_size)) {
2627                 default_hstate_size = HPAGE_SIZE;
2628                 if (!size_to_hstate(default_hstate_size))
2629                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2630         }
2631         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2632         if (default_hstate_max_huge_pages)
2633                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2634
2635         hugetlb_init_hstates();
2636         gather_bootmem_prealloc();
2637         report_hugepages();
2638
2639         hugetlb_sysfs_init();
2640         hugetlb_register_all_nodes();
2641         hugetlb_cgroup_file_init();
2642
2643 #ifdef CONFIG_SMP
2644         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2645 #else
2646         num_fault_mutexes = 1;
2647 #endif
2648         hugetlb_fault_mutex_table =
2649                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2650         BUG_ON(!hugetlb_fault_mutex_table);
2651
2652         for (i = 0; i < num_fault_mutexes; i++)
2653                 mutex_init(&hugetlb_fault_mutex_table[i]);
2654         return 0;
2655 }
2656 subsys_initcall(hugetlb_init);
2657
2658 /* Should be called on processing a hugepagesz=... option */
2659 void __init hugetlb_add_hstate(unsigned int order)
2660 {
2661         struct hstate *h;
2662         unsigned long i;
2663
2664         if (size_to_hstate(PAGE_SIZE << order)) {
2665                 pr_warning("hugepagesz= specified twice, ignoring\n");
2666                 return;
2667         }
2668         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2669         BUG_ON(order == 0);
2670         h = &hstates[hugetlb_max_hstate++];
2671         h->order = order;
2672         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2673         h->nr_huge_pages = 0;
2674         h->free_huge_pages = 0;
2675         for (i = 0; i < MAX_NUMNODES; ++i)
2676                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2677         INIT_LIST_HEAD(&h->hugepage_activelist);
2678         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2679         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2680         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2681                                         huge_page_size(h)/1024);
2682
2683         parsed_hstate = h;
2684 }
2685
2686 static int __init hugetlb_nrpages_setup(char *s)
2687 {
2688         unsigned long *mhp;
2689         static unsigned long *last_mhp;
2690
2691         /*
2692          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2693          * so this hugepages= parameter goes to the "default hstate".
2694          */
2695         if (!hugetlb_max_hstate)
2696                 mhp = &default_hstate_max_huge_pages;
2697         else
2698                 mhp = &parsed_hstate->max_huge_pages;
2699
2700         if (mhp == last_mhp) {
2701                 pr_warning("hugepages= specified twice without "
2702                            "interleaving hugepagesz=, ignoring\n");
2703                 return 1;
2704         }
2705
2706         if (sscanf(s, "%lu", mhp) <= 0)
2707                 *mhp = 0;
2708
2709         /*
2710          * Global state is always initialized later in hugetlb_init.
2711          * But we need to allocate >= MAX_ORDER hstates here early to still
2712          * use the bootmem allocator.
2713          */
2714         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2715                 hugetlb_hstate_alloc_pages(parsed_hstate);
2716
2717         last_mhp = mhp;
2718
2719         return 1;
2720 }
2721 __setup("hugepages=", hugetlb_nrpages_setup);
2722
2723 static int __init hugetlb_default_setup(char *s)
2724 {
2725         default_hstate_size = memparse(s, &s);
2726         return 1;
2727 }
2728 __setup("default_hugepagesz=", hugetlb_default_setup);
2729
2730 static unsigned int cpuset_mems_nr(unsigned int *array)
2731 {
2732         int node;
2733         unsigned int nr = 0;
2734
2735         for_each_node_mask(node, cpuset_current_mems_allowed)
2736                 nr += array[node];
2737
2738         return nr;
2739 }
2740
2741 #ifdef CONFIG_SYSCTL
2742 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2743                          struct ctl_table *table, int write,
2744                          void __user *buffer, size_t *length, loff_t *ppos)
2745 {
2746         struct hstate *h = &default_hstate;
2747         unsigned long tmp = h->max_huge_pages;
2748         int ret;
2749
2750         if (!hugepages_supported())
2751                 return -ENOTSUPP;
2752
2753         table->data = &tmp;
2754         table->maxlen = sizeof(unsigned long);
2755         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2756         if (ret)
2757                 goto out;
2758
2759         if (write)
2760                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2761                                                   NUMA_NO_NODE, tmp, *length);
2762 out:
2763         return ret;
2764 }
2765
2766 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2767                           void __user *buffer, size_t *length, loff_t *ppos)
2768 {
2769
2770         return hugetlb_sysctl_handler_common(false, table, write,
2771                                                         buffer, length, ppos);
2772 }
2773
2774 #ifdef CONFIG_NUMA
2775 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2776                           void __user *buffer, size_t *length, loff_t *ppos)
2777 {
2778         return hugetlb_sysctl_handler_common(true, table, write,
2779                                                         buffer, length, ppos);
2780 }
2781 #endif /* CONFIG_NUMA */
2782
2783 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2784                         void __user *buffer,
2785                         size_t *length, loff_t *ppos)
2786 {
2787         struct hstate *h = &default_hstate;
2788         unsigned long tmp;
2789         int ret;
2790
2791         if (!hugepages_supported())
2792                 return -ENOTSUPP;
2793
2794         tmp = h->nr_overcommit_huge_pages;
2795
2796         if (write && hstate_is_gigantic(h))
2797                 return -EINVAL;
2798
2799         table->data = &tmp;
2800         table->maxlen = sizeof(unsigned long);
2801         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2802         if (ret)
2803                 goto out;
2804
2805         if (write) {
2806                 spin_lock(&hugetlb_lock);
2807                 h->nr_overcommit_huge_pages = tmp;
2808                 spin_unlock(&hugetlb_lock);
2809         }
2810 out:
2811         return ret;
2812 }
2813
2814 #endif /* CONFIG_SYSCTL */
2815
2816 void hugetlb_report_meminfo(struct seq_file *m)
2817 {
2818         struct hstate *h = &default_hstate;
2819         if (!hugepages_supported())
2820                 return;
2821         seq_printf(m,
2822                         "HugePages_Total:   %5lu\n"
2823                         "HugePages_Free:    %5lu\n"
2824                         "HugePages_Rsvd:    %5lu\n"
2825                         "HugePages_Surp:    %5lu\n"
2826                         "Hugepagesize:   %8lu kB\n",
2827                         h->nr_huge_pages,
2828                         h->free_huge_pages,
2829                         h->resv_huge_pages,
2830                         h->surplus_huge_pages,
2831                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2832 }
2833
2834 int hugetlb_report_node_meminfo(int nid, char *buf)
2835 {
2836         struct hstate *h = &default_hstate;
2837         if (!hugepages_supported())
2838                 return 0;
2839         return sprintf(buf,
2840                 "Node %d HugePages_Total: %5u\n"
2841                 "Node %d HugePages_Free:  %5u\n"
2842                 "Node %d HugePages_Surp:  %5u\n",
2843                 nid, h->nr_huge_pages_node[nid],
2844                 nid, h->free_huge_pages_node[nid],
2845                 nid, h->surplus_huge_pages_node[nid]);
2846 }
2847
2848 void hugetlb_show_meminfo(void)
2849 {
2850         struct hstate *h;
2851         int nid;
2852
2853         if (!hugepages_supported())
2854                 return;
2855
2856         for_each_node_state(nid, N_MEMORY)
2857                 for_each_hstate(h)
2858                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2859                                 nid,
2860                                 h->nr_huge_pages_node[nid],
2861                                 h->free_huge_pages_node[nid],
2862                                 h->surplus_huge_pages_node[nid],
2863                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2864 }
2865
2866 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2867 {
2868         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2869                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2870 }
2871
2872 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2873 unsigned long hugetlb_total_pages(void)
2874 {
2875         struct hstate *h;
2876         unsigned long nr_total_pages = 0;
2877
2878         for_each_hstate(h)
2879                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2880         return nr_total_pages;
2881 }
2882
2883 static int hugetlb_acct_memory(struct hstate *h, long delta)
2884 {
2885         int ret = -ENOMEM;
2886
2887         spin_lock(&hugetlb_lock);
2888         /*
2889          * When cpuset is configured, it breaks the strict hugetlb page
2890          * reservation as the accounting is done on a global variable. Such
2891          * reservation is completely rubbish in the presence of cpuset because
2892          * the reservation is not checked against page availability for the
2893          * current cpuset. Application can still potentially OOM'ed by kernel
2894          * with lack of free htlb page in cpuset that the task is in.
2895          * Attempt to enforce strict accounting with cpuset is almost
2896          * impossible (or too ugly) because cpuset is too fluid that
2897          * task or memory node can be dynamically moved between cpusets.
2898          *
2899          * The change of semantics for shared hugetlb mapping with cpuset is
2900          * undesirable. However, in order to preserve some of the semantics,
2901          * we fall back to check against current free page availability as
2902          * a best attempt and hopefully to minimize the impact of changing
2903          * semantics that cpuset has.
2904          */
2905         if (delta > 0) {
2906                 if (gather_surplus_pages(h, delta) < 0)
2907                         goto out;
2908
2909                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2910                         return_unused_surplus_pages(h, delta);
2911                         goto out;
2912                 }
2913         }
2914
2915         ret = 0;
2916         if (delta < 0)
2917                 return_unused_surplus_pages(h, (unsigned long) -delta);
2918
2919 out:
2920         spin_unlock(&hugetlb_lock);
2921         return ret;
2922 }
2923
2924 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2925 {
2926         struct resv_map *resv = vma_resv_map(vma);
2927
2928         /*
2929          * This new VMA should share its siblings reservation map if present.
2930          * The VMA will only ever have a valid reservation map pointer where
2931          * it is being copied for another still existing VMA.  As that VMA
2932          * has a reference to the reservation map it cannot disappear until
2933          * after this open call completes.  It is therefore safe to take a
2934          * new reference here without additional locking.
2935          */
2936         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2937                 kref_get(&resv->refs);
2938 }
2939
2940 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2941 {
2942         struct hstate *h = hstate_vma(vma);
2943         struct resv_map *resv = vma_resv_map(vma);
2944         struct hugepage_subpool *spool = subpool_vma(vma);
2945         unsigned long reserve, start, end;
2946         long gbl_reserve;
2947
2948         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2949                 return;
2950
2951         start = vma_hugecache_offset(h, vma, vma->vm_start);
2952         end = vma_hugecache_offset(h, vma, vma->vm_end);
2953
2954         reserve = (end - start) - region_count(resv, start, end);
2955
2956         kref_put(&resv->refs, resv_map_release);
2957
2958         if (reserve) {
2959                 /*
2960                  * Decrement reserve counts.  The global reserve count may be
2961                  * adjusted if the subpool has a minimum size.
2962                  */
2963                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2964                 hugetlb_acct_memory(h, -gbl_reserve);
2965         }
2966 }
2967
2968 /*
2969  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2970  * handle_mm_fault() to try to instantiate regular-sized pages in the
2971  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2972  * this far.
2973  */
2974 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2975 {
2976         BUG();
2977         return 0;
2978 }
2979
2980 const struct vm_operations_struct hugetlb_vm_ops = {
2981         .fault = hugetlb_vm_op_fault,
2982         .open = hugetlb_vm_op_open,
2983         .close = hugetlb_vm_op_close,
2984 };
2985
2986 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2987                                 int writable)
2988 {
2989         pte_t entry;
2990
2991         if (writable) {
2992                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2993                                          vma->vm_page_prot)));
2994         } else {
2995                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2996                                            vma->vm_page_prot));
2997         }
2998         entry = pte_mkyoung(entry);
2999         entry = pte_mkhuge(entry);
3000         entry = arch_make_huge_pte(entry, vma, page, writable);
3001
3002         return entry;
3003 }
3004
3005 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3006                                    unsigned long address, pte_t *ptep)
3007 {
3008         pte_t entry;
3009
3010         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3011         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3012                 update_mmu_cache(vma, address, ptep);
3013 }
3014
3015 static int is_hugetlb_entry_migration(pte_t pte)
3016 {
3017         swp_entry_t swp;
3018
3019         if (huge_pte_none(pte) || pte_present(pte))
3020                 return 0;
3021         swp = pte_to_swp_entry(pte);
3022         if (non_swap_entry(swp) && is_migration_entry(swp))
3023                 return 1;
3024         else
3025                 return 0;
3026 }
3027
3028 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3029 {
3030         swp_entry_t swp;
3031
3032         if (huge_pte_none(pte) || pte_present(pte))
3033                 return 0;
3034         swp = pte_to_swp_entry(pte);
3035         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3036                 return 1;
3037         else
3038                 return 0;
3039 }
3040
3041 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3042                             struct vm_area_struct *vma)
3043 {
3044         pte_t *src_pte, *dst_pte, entry;
3045         struct page *ptepage;
3046         unsigned long addr;
3047         int cow;
3048         struct hstate *h = hstate_vma(vma);
3049         unsigned long sz = huge_page_size(h);
3050         unsigned long mmun_start;       /* For mmu_notifiers */
3051         unsigned long mmun_end;         /* For mmu_notifiers */
3052         int ret = 0;
3053
3054         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3055
3056         mmun_start = vma->vm_start;
3057         mmun_end = vma->vm_end;
3058         if (cow)
3059                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3060
3061         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3062                 spinlock_t *src_ptl, *dst_ptl;
3063                 src_pte = huge_pte_offset(src, addr);
3064                 if (!src_pte)
3065                         continue;
3066                 dst_pte = huge_pte_alloc(dst, addr, sz);
3067                 if (!dst_pte) {
3068                         ret = -ENOMEM;
3069                         break;
3070                 }
3071
3072                 /* If the pagetables are shared don't copy or take references */
3073                 if (dst_pte == src_pte)
3074                         continue;
3075
3076                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3077                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3078                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3079                 entry = huge_ptep_get(src_pte);
3080                 if (huge_pte_none(entry)) { /* skip none entry */
3081                         ;
3082                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3083                                     is_hugetlb_entry_hwpoisoned(entry))) {
3084                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3085
3086                         if (is_write_migration_entry(swp_entry) && cow) {
3087                                 /*
3088                                  * COW mappings require pages in both
3089                                  * parent and child to be set to read.
3090                                  */
3091                                 make_migration_entry_read(&swp_entry);
3092                                 entry = swp_entry_to_pte(swp_entry);
3093                                 set_huge_pte_at(src, addr, src_pte, entry);
3094                         }
3095                         set_huge_pte_at(dst, addr, dst_pte, entry);
3096                 } else {
3097                         if (cow) {
3098                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3099                                 mmu_notifier_invalidate_range(src, mmun_start,
3100                                                                    mmun_end);
3101                         }
3102                         entry = huge_ptep_get(src_pte);
3103                         ptepage = pte_page(entry);
3104                         get_page(ptepage);
3105                         page_dup_rmap(ptepage, true);
3106                         set_huge_pte_at(dst, addr, dst_pte, entry);
3107                         hugetlb_count_add(pages_per_huge_page(h), dst);
3108                 }
3109                 spin_unlock(src_ptl);
3110                 spin_unlock(dst_ptl);
3111         }
3112
3113         if (cow)
3114                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3115
3116         return ret;
3117 }
3118
3119 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3120                             unsigned long start, unsigned long end,
3121                             struct page *ref_page)
3122 {
3123         int force_flush = 0;
3124         struct mm_struct *mm = vma->vm_mm;
3125         unsigned long address;
3126         pte_t *ptep;
3127         pte_t pte;
3128         spinlock_t *ptl;
3129         struct page *page;
3130         struct hstate *h = hstate_vma(vma);
3131         unsigned long sz = huge_page_size(h);
3132         const unsigned long mmun_start = start; /* For mmu_notifiers */
3133         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3134
3135         WARN_ON(!is_vm_hugetlb_page(vma));
3136         BUG_ON(start & ~huge_page_mask(h));
3137         BUG_ON(end & ~huge_page_mask(h));
3138
3139         tlb_start_vma(tlb, vma);
3140         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3141         address = start;
3142 again:
3143         for (; address < end; address += sz) {
3144                 ptep = huge_pte_offset(mm, address);
3145                 if (!ptep)
3146                         continue;
3147
3148                 ptl = huge_pte_lock(h, mm, ptep);
3149                 if (huge_pmd_unshare(mm, &address, ptep))
3150                         goto unlock;
3151
3152                 pte = huge_ptep_get(ptep);
3153                 if (huge_pte_none(pte))
3154                         goto unlock;
3155
3156                 /*
3157                  * Migrating hugepage or HWPoisoned hugepage is already
3158                  * unmapped and its refcount is dropped, so just clear pte here.
3159                  */
3160                 if (unlikely(!pte_present(pte))) {
3161                         huge_pte_clear(mm, address, ptep);
3162                         goto unlock;
3163                 }
3164
3165                 page = pte_page(pte);
3166                 /*
3167                  * If a reference page is supplied, it is because a specific
3168                  * page is being unmapped, not a range. Ensure the page we
3169                  * are about to unmap is the actual page of interest.
3170                  */
3171                 if (ref_page) {
3172                         if (page != ref_page)
3173                                 goto unlock;
3174
3175                         /*
3176                          * Mark the VMA as having unmapped its page so that
3177                          * future faults in this VMA will fail rather than
3178                          * looking like data was lost
3179                          */
3180                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3181                 }
3182
3183                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3184                 tlb_remove_tlb_entry(tlb, ptep, address);
3185                 if (huge_pte_dirty(pte))
3186                         set_page_dirty(page);
3187
3188                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3189                 page_remove_rmap(page, true);
3190                 force_flush = !__tlb_remove_page(tlb, page);
3191                 if (force_flush) {
3192                         address += sz;
3193                         spin_unlock(ptl);
3194                         break;
3195                 }
3196                 /* Bail out after unmapping reference page if supplied */
3197                 if (ref_page) {
3198                         spin_unlock(ptl);
3199                         break;
3200                 }
3201 unlock:
3202                 spin_unlock(ptl);
3203         }
3204         /*
3205          * mmu_gather ran out of room to batch pages, we break out of
3206          * the PTE lock to avoid doing the potential expensive TLB invalidate
3207          * and page-free while holding it.
3208          */
3209         if (force_flush) {
3210                 force_flush = 0;
3211                 tlb_flush_mmu(tlb);
3212                 if (address < end && !ref_page)
3213                         goto again;
3214         }
3215         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3216         tlb_end_vma(tlb, vma);
3217 }
3218
3219 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3220                           struct vm_area_struct *vma, unsigned long start,
3221                           unsigned long end, struct page *ref_page)
3222 {
3223         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3224
3225         /*
3226          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3227          * test will fail on a vma being torn down, and not grab a page table
3228          * on its way out.  We're lucky that the flag has such an appropriate
3229          * name, and can in fact be safely cleared here. We could clear it
3230          * before the __unmap_hugepage_range above, but all that's necessary
3231          * is to clear it before releasing the i_mmap_rwsem. This works
3232          * because in the context this is called, the VMA is about to be
3233          * destroyed and the i_mmap_rwsem is held.
3234          */
3235         vma->vm_flags &= ~VM_MAYSHARE;
3236 }
3237
3238 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3239                           unsigned long end, struct page *ref_page)
3240 {
3241         struct mm_struct *mm;
3242         struct mmu_gather tlb;
3243
3244         mm = vma->vm_mm;
3245
3246         tlb_gather_mmu(&tlb, mm, start, end);
3247         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3248         tlb_finish_mmu(&tlb, start, end);
3249 }
3250
3251 /*
3252  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3253  * mappping it owns the reserve page for. The intention is to unmap the page
3254  * from other VMAs and let the children be SIGKILLed if they are faulting the
3255  * same region.
3256  */
3257 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3258                               struct page *page, unsigned long address)
3259 {
3260         struct hstate *h = hstate_vma(vma);
3261         struct vm_area_struct *iter_vma;
3262         struct address_space *mapping;
3263         pgoff_t pgoff;
3264
3265         /*
3266          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3267          * from page cache lookup which is in HPAGE_SIZE units.
3268          */
3269         address = address & huge_page_mask(h);
3270         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3271                         vma->vm_pgoff;
3272         mapping = file_inode(vma->vm_file)->i_mapping;
3273
3274         /*
3275          * Take the mapping lock for the duration of the table walk. As
3276          * this mapping should be shared between all the VMAs,
3277          * __unmap_hugepage_range() is called as the lock is already held
3278          */
3279         i_mmap_lock_write(mapping);
3280         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3281                 /* Do not unmap the current VMA */
3282                 if (iter_vma == vma)
3283                         continue;
3284
3285                 /*
3286                  * Shared VMAs have their own reserves and do not affect
3287                  * MAP_PRIVATE accounting but it is possible that a shared
3288                  * VMA is using the same page so check and skip such VMAs.
3289                  */
3290                 if (iter_vma->vm_flags & VM_MAYSHARE)
3291                         continue;
3292
3293                 /*
3294                  * Unmap the page from other VMAs without their own reserves.
3295                  * They get marked to be SIGKILLed if they fault in these
3296                  * areas. This is because a future no-page fault on this VMA
3297                  * could insert a zeroed page instead of the data existing
3298                  * from the time of fork. This would look like data corruption
3299                  */
3300                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3301                         unmap_hugepage_range(iter_vma, address,
3302                                              address + huge_page_size(h), page);
3303         }
3304         i_mmap_unlock_write(mapping);
3305 }
3306
3307 /*
3308  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3309  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3310  * cannot race with other handlers or page migration.
3311  * Keep the pte_same checks anyway to make transition from the mutex easier.
3312  */
3313 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3314                         unsigned long address, pte_t *ptep, pte_t pte,
3315                         struct page *pagecache_page, spinlock_t *ptl)
3316 {
3317         struct hstate *h = hstate_vma(vma);
3318         struct page *old_page, *new_page;
3319         int ret = 0, outside_reserve = 0;
3320         unsigned long mmun_start;       /* For mmu_notifiers */
3321         unsigned long mmun_end;         /* For mmu_notifiers */
3322
3323         old_page = pte_page(pte);
3324
3325 retry_avoidcopy:
3326         /* If no-one else is actually using this page, avoid the copy
3327          * and just make the page writable */
3328         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3329                 page_move_anon_rmap(old_page, vma, address);
3330                 set_huge_ptep_writable(vma, address, ptep);
3331                 return 0;
3332         }
3333
3334         /*
3335          * If the process that created a MAP_PRIVATE mapping is about to
3336          * perform a COW due to a shared page count, attempt to satisfy
3337          * the allocation without using the existing reserves. The pagecache
3338          * page is used to determine if the reserve at this address was
3339          * consumed or not. If reserves were used, a partial faulted mapping
3340          * at the time of fork() could consume its reserves on COW instead
3341          * of the full address range.
3342          */
3343         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3344                         old_page != pagecache_page)
3345                 outside_reserve = 1;
3346
3347         page_cache_get(old_page);
3348
3349         /*
3350          * Drop page table lock as buddy allocator may be called. It will
3351          * be acquired again before returning to the caller, as expected.
3352          */
3353         spin_unlock(ptl);
3354         new_page = alloc_huge_page(vma, address, outside_reserve);
3355
3356         if (IS_ERR(new_page)) {
3357                 /*
3358                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3359                  * it is due to references held by a child and an insufficient
3360                  * huge page pool. To guarantee the original mappers
3361                  * reliability, unmap the page from child processes. The child
3362                  * may get SIGKILLed if it later faults.
3363                  */
3364                 if (outside_reserve) {
3365                         page_cache_release(old_page);
3366                         BUG_ON(huge_pte_none(pte));
3367                         unmap_ref_private(mm, vma, old_page, address);
3368                         BUG_ON(huge_pte_none(pte));
3369                         spin_lock(ptl);
3370                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3371                         if (likely(ptep &&
3372                                    pte_same(huge_ptep_get(ptep), pte)))
3373                                 goto retry_avoidcopy;
3374                         /*
3375                          * race occurs while re-acquiring page table
3376                          * lock, and our job is done.
3377                          */
3378                         return 0;
3379                 }
3380
3381                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3382                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3383                 goto out_release_old;
3384         }
3385
3386         /*
3387          * When the original hugepage is shared one, it does not have
3388          * anon_vma prepared.
3389          */
3390         if (unlikely(anon_vma_prepare(vma))) {
3391                 ret = VM_FAULT_OOM;
3392                 goto out_release_all;
3393         }
3394
3395         copy_user_huge_page(new_page, old_page, address, vma,
3396                             pages_per_huge_page(h));
3397         __SetPageUptodate(new_page);
3398         set_page_huge_active(new_page);
3399
3400         mmun_start = address & huge_page_mask(h);
3401         mmun_end = mmun_start + huge_page_size(h);
3402         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3403
3404         /*
3405          * Retake the page table lock to check for racing updates
3406          * before the page tables are altered
3407          */
3408         spin_lock(ptl);
3409         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3410         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3411                 ClearPagePrivate(new_page);
3412
3413                 /* Break COW */
3414                 huge_ptep_clear_flush(vma, address, ptep);
3415                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3416                 set_huge_pte_at(mm, address, ptep,
3417                                 make_huge_pte(vma, new_page, 1));
3418                 page_remove_rmap(old_page, true);
3419                 hugepage_add_new_anon_rmap(new_page, vma, address);
3420                 /* Make the old page be freed below */
3421                 new_page = old_page;
3422         }
3423         spin_unlock(ptl);
3424         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3425 out_release_all:
3426         page_cache_release(new_page);
3427 out_release_old:
3428         page_cache_release(old_page);
3429
3430         spin_lock(ptl); /* Caller expects lock to be held */
3431         return ret;
3432 }
3433
3434 /* Return the pagecache page at a given address within a VMA */
3435 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3436                         struct vm_area_struct *vma, unsigned long address)
3437 {
3438         struct address_space *mapping;
3439         pgoff_t idx;
3440
3441         mapping = vma->vm_file->f_mapping;
3442         idx = vma_hugecache_offset(h, vma, address);
3443
3444         return find_lock_page(mapping, idx);
3445 }
3446
3447 /*
3448  * Return whether there is a pagecache page to back given address within VMA.
3449  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3450  */
3451 static bool hugetlbfs_pagecache_present(struct hstate *h,
3452                         struct vm_area_struct *vma, unsigned long address)
3453 {
3454         struct address_space *mapping;
3455         pgoff_t idx;
3456         struct page *page;
3457
3458         mapping = vma->vm_file->f_mapping;
3459         idx = vma_hugecache_offset(h, vma, address);
3460
3461         page = find_get_page(mapping, idx);
3462         if (page)
3463                 put_page(page);
3464         return page != NULL;
3465 }
3466
3467 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3468                            pgoff_t idx)
3469 {
3470         struct inode *inode = mapping->host;
3471         struct hstate *h = hstate_inode(inode);
3472         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3473
3474         if (err)
3475                 return err;
3476         ClearPagePrivate(page);
3477
3478         spin_lock(&inode->i_lock);
3479         inode->i_blocks += blocks_per_huge_page(h);
3480         spin_unlock(&inode->i_lock);
3481         return 0;
3482 }
3483
3484 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3485                            struct address_space *mapping, pgoff_t idx,
3486                            unsigned long address, pte_t *ptep, unsigned int flags)
3487 {
3488         struct hstate *h = hstate_vma(vma);
3489         int ret = VM_FAULT_SIGBUS;
3490         int anon_rmap = 0;
3491         unsigned long size;
3492         struct page *page;
3493         pte_t new_pte;
3494         spinlock_t *ptl;
3495
3496         /*
3497          * Currently, we are forced to kill the process in the event the
3498          * original mapper has unmapped pages from the child due to a failed
3499          * COW. Warn that such a situation has occurred as it may not be obvious
3500          */
3501         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3502                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3503                            current->pid);
3504                 return ret;
3505         }
3506
3507         /*
3508          * Use page lock to guard against racing truncation
3509          * before we get page_table_lock.
3510          */
3511 retry:
3512         page = find_lock_page(mapping, idx);
3513         if (!page) {
3514                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3515                 if (idx >= size)
3516                         goto out;
3517                 page = alloc_huge_page(vma, address, 0);
3518                 if (IS_ERR(page)) {
3519                         ret = PTR_ERR(page);
3520                         if (ret == -ENOMEM)
3521                                 ret = VM_FAULT_OOM;
3522                         else
3523                                 ret = VM_FAULT_SIGBUS;
3524                         goto out;
3525                 }
3526                 clear_huge_page(page, address, pages_per_huge_page(h));
3527                 __SetPageUptodate(page);
3528                 set_page_huge_active(page);
3529
3530                 if (vma->vm_flags & VM_MAYSHARE) {
3531                         int err = huge_add_to_page_cache(page, mapping, idx);
3532                         if (err) {
3533                                 put_page(page);
3534                                 if (err == -EEXIST)
3535                                         goto retry;
3536                                 goto out;
3537                         }
3538                 } else {
3539                         lock_page(page);
3540                         if (unlikely(anon_vma_prepare(vma))) {
3541                                 ret = VM_FAULT_OOM;
3542                                 goto backout_unlocked;
3543                         }
3544                         anon_rmap = 1;
3545                 }
3546         } else {
3547                 /*
3548                  * If memory error occurs between mmap() and fault, some process
3549                  * don't have hwpoisoned swap entry for errored virtual address.
3550                  * So we need to block hugepage fault by PG_hwpoison bit check.
3551                  */
3552                 if (unlikely(PageHWPoison(page))) {
3553                         ret = VM_FAULT_HWPOISON |
3554                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3555                         goto backout_unlocked;
3556                 }
3557         }
3558
3559         /*
3560          * If we are going to COW a private mapping later, we examine the
3561          * pending reservations for this page now. This will ensure that
3562          * any allocations necessary to record that reservation occur outside
3563          * the spinlock.
3564          */
3565         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3566                 if (vma_needs_reservation(h, vma, address) < 0) {
3567                         ret = VM_FAULT_OOM;
3568                         goto backout_unlocked;
3569                 }
3570                 /* Just decrements count, does not deallocate */
3571                 vma_end_reservation(h, vma, address);
3572         }
3573
3574         ptl = huge_pte_lockptr(h, mm, ptep);
3575         spin_lock(ptl);
3576         size = i_size_read(mapping->host) >> huge_page_shift(h);
3577         if (idx >= size)
3578                 goto backout;
3579
3580         ret = 0;
3581         if (!huge_pte_none(huge_ptep_get(ptep)))
3582                 goto backout;
3583
3584         if (anon_rmap) {
3585                 ClearPagePrivate(page);
3586                 hugepage_add_new_anon_rmap(page, vma, address);
3587         } else
3588                 page_dup_rmap(page, true);
3589         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3590                                 && (vma->vm_flags & VM_SHARED)));
3591         set_huge_pte_at(mm, address, ptep, new_pte);
3592
3593         hugetlb_count_add(pages_per_huge_page(h), mm);
3594         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3595                 /* Optimization, do the COW without a second fault */
3596                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3597         }
3598
3599         spin_unlock(ptl);
3600         unlock_page(page);
3601 out:
3602         return ret;
3603
3604 backout:
3605         spin_unlock(ptl);
3606 backout_unlocked:
3607         unlock_page(page);
3608         put_page(page);
3609         goto out;
3610 }
3611
3612 #ifdef CONFIG_SMP
3613 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3614                             struct vm_area_struct *vma,
3615                             struct address_space *mapping,
3616                             pgoff_t idx, unsigned long address)
3617 {
3618         unsigned long key[2];
3619         u32 hash;
3620
3621         if (vma->vm_flags & VM_SHARED) {
3622                 key[0] = (unsigned long) mapping;
3623                 key[1] = idx;
3624         } else {
3625                 key[0] = (unsigned long) mm;
3626                 key[1] = address >> huge_page_shift(h);
3627         }
3628
3629         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3630
3631         return hash & (num_fault_mutexes - 1);
3632 }
3633 #else
3634 /*
3635  * For uniprocesor systems we always use a single mutex, so just
3636  * return 0 and avoid the hashing overhead.
3637  */
3638 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3639                             struct vm_area_struct *vma,
3640                             struct address_space *mapping,
3641                             pgoff_t idx, unsigned long address)
3642 {
3643         return 0;
3644 }
3645 #endif
3646
3647 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3648                         unsigned long address, unsigned int flags)
3649 {
3650         pte_t *ptep, entry;
3651         spinlock_t *ptl;
3652         int ret;
3653         u32 hash;
3654         pgoff_t idx;
3655         struct page *page = NULL;
3656         struct page *pagecache_page = NULL;
3657         struct hstate *h = hstate_vma(vma);
3658         struct address_space *mapping;
3659         int need_wait_lock = 0;
3660
3661         address &= huge_page_mask(h);
3662
3663         ptep = huge_pte_offset(mm, address);
3664         if (ptep) {
3665                 entry = huge_ptep_get(ptep);
3666                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3667                         migration_entry_wait_huge(vma, mm, ptep);
3668                         return 0;
3669                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3670                         return VM_FAULT_HWPOISON_LARGE |
3671                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3672         } else {
3673                 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3674                 if (!ptep)
3675                         return VM_FAULT_OOM;
3676         }
3677
3678         mapping = vma->vm_file->f_mapping;
3679         idx = vma_hugecache_offset(h, vma, address);
3680
3681         /*
3682          * Serialize hugepage allocation and instantiation, so that we don't
3683          * get spurious allocation failures if two CPUs race to instantiate
3684          * the same page in the page cache.
3685          */
3686         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3687         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3688
3689         entry = huge_ptep_get(ptep);
3690         if (huge_pte_none(entry)) {
3691                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3692                 goto out_mutex;
3693         }
3694
3695         ret = 0;
3696
3697         /*
3698          * entry could be a migration/hwpoison entry at this point, so this
3699          * check prevents the kernel from going below assuming that we have
3700          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3701          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3702          * handle it.
3703          */
3704         if (!pte_present(entry))
3705                 goto out_mutex;
3706
3707         /*
3708          * If we are going to COW the mapping later, we examine the pending
3709          * reservations for this page now. This will ensure that any
3710          * allocations necessary to record that reservation occur outside the
3711          * spinlock. For private mappings, we also lookup the pagecache
3712          * page now as it is used to determine if a reservation has been
3713          * consumed.
3714          */
3715         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3716                 if (vma_needs_reservation(h, vma, address) < 0) {
3717                         ret = VM_FAULT_OOM;
3718                         goto out_mutex;
3719                 }
3720                 /* Just decrements count, does not deallocate */
3721                 vma_end_reservation(h, vma, address);
3722
3723                 if (!(vma->vm_flags & VM_MAYSHARE))
3724                         pagecache_page = hugetlbfs_pagecache_page(h,
3725                                                                 vma, address);
3726         }
3727
3728         ptl = huge_pte_lock(h, mm, ptep);
3729
3730         /* Check for a racing update before calling hugetlb_cow */
3731         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3732                 goto out_ptl;
3733
3734         /*
3735          * hugetlb_cow() requires page locks of pte_page(entry) and
3736          * pagecache_page, so here we need take the former one
3737          * when page != pagecache_page or !pagecache_page.
3738          */
3739         page = pte_page(entry);
3740         if (page != pagecache_page)
3741                 if (!trylock_page(page)) {
3742                         need_wait_lock = 1;
3743                         goto out_ptl;
3744                 }
3745
3746         get_page(page);
3747
3748         if (flags & FAULT_FLAG_WRITE) {
3749                 if (!huge_pte_write(entry)) {
3750                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3751                                         pagecache_page, ptl);
3752                         goto out_put_page;
3753                 }
3754                 entry = huge_pte_mkdirty(entry);
3755         }
3756         entry = pte_mkyoung(entry);
3757         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3758                                                 flags & FAULT_FLAG_WRITE))
3759                 update_mmu_cache(vma, address, ptep);
3760 out_put_page:
3761         if (page != pagecache_page)
3762                 unlock_page(page);
3763         put_page(page);
3764 out_ptl:
3765         spin_unlock(ptl);
3766
3767         if (pagecache_page) {
3768                 unlock_page(pagecache_page);
3769                 put_page(pagecache_page);
3770         }
3771 out_mutex:
3772         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3773         /*
3774          * Generally it's safe to hold refcount during waiting page lock. But
3775          * here we just wait to defer the next page fault to avoid busy loop and
3776          * the page is not used after unlocked before returning from the current
3777          * page fault. So we are safe from accessing freed page, even if we wait
3778          * here without taking refcount.
3779          */
3780         if (need_wait_lock)
3781                 wait_on_page_locked(page);
3782         return ret;
3783 }
3784
3785 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3786                          struct page **pages, struct vm_area_struct **vmas,
3787                          unsigned long *position, unsigned long *nr_pages,
3788                          long i, unsigned int flags)
3789 {
3790         unsigned long pfn_offset;
3791         unsigned long vaddr = *position;
3792         unsigned long remainder = *nr_pages;
3793         struct hstate *h = hstate_vma(vma);
3794
3795         while (vaddr < vma->vm_end && remainder) {
3796                 pte_t *pte;
3797                 spinlock_t *ptl = NULL;
3798                 int absent;
3799                 struct page *page;
3800
3801                 /*
3802                  * If we have a pending SIGKILL, don't keep faulting pages and
3803                  * potentially allocating memory.
3804                  */
3805                 if (unlikely(fatal_signal_pending(current))) {
3806                         remainder = 0;
3807                         break;
3808                 }
3809
3810                 /*
3811                  * Some archs (sparc64, sh*) have multiple pte_ts to
3812                  * each hugepage.  We have to make sure we get the
3813                  * first, for the page indexing below to work.
3814                  *
3815                  * Note that page table lock is not held when pte is null.
3816                  */
3817                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3818                 if (pte)
3819                         ptl = huge_pte_lock(h, mm, pte);
3820                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3821
3822                 /*
3823                  * When coredumping, it suits get_dump_page if we just return
3824                  * an error where there's an empty slot with no huge pagecache
3825                  * to back it.  This way, we avoid allocating a hugepage, and
3826                  * the sparse dumpfile avoids allocating disk blocks, but its
3827                  * huge holes still show up with zeroes where they need to be.
3828                  */
3829                 if (absent && (flags & FOLL_DUMP) &&
3830                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3831                         if (pte)
3832                                 spin_unlock(ptl);
3833                         remainder = 0;
3834                         break;
3835                 }
3836
3837                 /*
3838                  * We need call hugetlb_fault for both hugepages under migration
3839                  * (in which case hugetlb_fault waits for the migration,) and
3840                  * hwpoisoned hugepages (in which case we need to prevent the
3841                  * caller from accessing to them.) In order to do this, we use
3842                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3843                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3844                  * both cases, and because we can't follow correct pages
3845                  * directly from any kind of swap entries.
3846                  */
3847                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3848                     ((flags & FOLL_WRITE) &&
3849                       !huge_pte_write(huge_ptep_get(pte)))) {
3850                         int ret;
3851
3852                         if (pte)
3853                                 spin_unlock(ptl);
3854                         ret = hugetlb_fault(mm, vma, vaddr,
3855                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3856                         if (!(ret & VM_FAULT_ERROR))
3857                                 continue;
3858
3859                         remainder = 0;
3860                         break;
3861                 }
3862
3863                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3864                 page = pte_page(huge_ptep_get(pte));
3865 same_page:
3866                 if (pages) {
3867                         pages[i] = mem_map_offset(page, pfn_offset);
3868                         get_page(pages[i]);
3869                 }
3870
3871                 if (vmas)
3872                         vmas[i] = vma;
3873
3874                 vaddr += PAGE_SIZE;
3875                 ++pfn_offset;
3876                 --remainder;
3877                 ++i;
3878                 if (vaddr < vma->vm_end && remainder &&
3879                                 pfn_offset < pages_per_huge_page(h)) {
3880                         /*
3881                          * We use pfn_offset to avoid touching the pageframes
3882                          * of this compound page.
3883                          */
3884                         goto same_page;
3885                 }
3886                 spin_unlock(ptl);
3887         }
3888         *nr_pages = remainder;
3889         *position = vaddr;
3890
3891         return i ? i : -EFAULT;
3892 }
3893
3894 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3895                 unsigned long address, unsigned long end, pgprot_t newprot)
3896 {
3897         struct mm_struct *mm = vma->vm_mm;
3898         unsigned long start = address;
3899         pte_t *ptep;
3900         pte_t pte;
3901         struct hstate *h = hstate_vma(vma);
3902         unsigned long pages = 0;
3903
3904         BUG_ON(address >= end);
3905         flush_cache_range(vma, address, end);
3906
3907         mmu_notifier_invalidate_range_start(mm, start, end);
3908         i_mmap_lock_write(vma->vm_file->f_mapping);
3909         for (; address < end; address += huge_page_size(h)) {
3910                 spinlock_t *ptl;
3911                 ptep = huge_pte_offset(mm, address);
3912                 if (!ptep)
3913                         continue;
3914                 ptl = huge_pte_lock(h, mm, ptep);
3915                 if (huge_pmd_unshare(mm, &address, ptep)) {
3916                         pages++;
3917                         spin_unlock(ptl);
3918                         continue;
3919                 }
3920                 pte = huge_ptep_get(ptep);
3921                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3922                         spin_unlock(ptl);
3923                         continue;
3924                 }
3925                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3926                         swp_entry_t entry = pte_to_swp_entry(pte);
3927
3928                         if (is_write_migration_entry(entry)) {
3929                                 pte_t newpte;
3930
3931                                 make_migration_entry_read(&entry);
3932                                 newpte = swp_entry_to_pte(entry);
3933                                 set_huge_pte_at(mm, address, ptep, newpte);
3934                                 pages++;
3935                         }
3936                         spin_unlock(ptl);
3937                         continue;
3938                 }
3939                 if (!huge_pte_none(pte)) {
3940                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3941                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3942                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3943                         set_huge_pte_at(mm, address, ptep, pte);
3944                         pages++;
3945                 }
3946                 spin_unlock(ptl);
3947         }
3948         /*
3949          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3950          * may have cleared our pud entry and done put_page on the page table:
3951          * once we release i_mmap_rwsem, another task can do the final put_page
3952          * and that page table be reused and filled with junk.
3953          */
3954         flush_tlb_range(vma, start, end);
3955         mmu_notifier_invalidate_range(mm, start, end);
3956         i_mmap_unlock_write(vma->vm_file->f_mapping);
3957         mmu_notifier_invalidate_range_end(mm, start, end);
3958
3959         return pages << h->order;
3960 }
3961
3962 int hugetlb_reserve_pages(struct inode *inode,
3963                                         long from, long to,
3964                                         struct vm_area_struct *vma,
3965                                         vm_flags_t vm_flags)
3966 {
3967         long ret, chg;
3968         struct hstate *h = hstate_inode(inode);
3969         struct hugepage_subpool *spool = subpool_inode(inode);
3970         struct resv_map *resv_map;
3971         long gbl_reserve;
3972
3973         /*
3974          * Only apply hugepage reservation if asked. At fault time, an
3975          * attempt will be made for VM_NORESERVE to allocate a page
3976          * without using reserves
3977          */
3978         if (vm_flags & VM_NORESERVE)
3979                 return 0;
3980
3981         /*
3982          * Shared mappings base their reservation on the number of pages that
3983          * are already allocated on behalf of the file. Private mappings need
3984          * to reserve the full area even if read-only as mprotect() may be
3985          * called to make the mapping read-write. Assume !vma is a shm mapping
3986          */
3987         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3988                 resv_map = inode_resv_map(inode);
3989
3990                 chg = region_chg(resv_map, from, to);
3991
3992         } else {
3993                 resv_map = resv_map_alloc();
3994                 if (!resv_map)
3995                         return -ENOMEM;
3996
3997                 chg = to - from;
3998
3999                 set_vma_resv_map(vma, resv_map);
4000                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4001         }
4002
4003         if (chg < 0) {
4004                 ret = chg;
4005                 goto out_err;
4006         }
4007
4008         /*
4009          * There must be enough pages in the subpool for the mapping. If
4010          * the subpool has a minimum size, there may be some global
4011          * reservations already in place (gbl_reserve).
4012          */
4013         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4014         if (gbl_reserve < 0) {
4015                 ret = -ENOSPC;
4016                 goto out_err;
4017         }
4018
4019         /*
4020          * Check enough hugepages are available for the reservation.
4021          * Hand the pages back to the subpool if there are not
4022          */
4023         ret = hugetlb_acct_memory(h, gbl_reserve);
4024         if (ret < 0) {
4025                 /* put back original number of pages, chg */
4026                 (void)hugepage_subpool_put_pages(spool, chg);
4027                 goto out_err;
4028         }
4029
4030         /*
4031          * Account for the reservations made. Shared mappings record regions
4032          * that have reservations as they are shared by multiple VMAs.
4033          * When the last VMA disappears, the region map says how much
4034          * the reservation was and the page cache tells how much of
4035          * the reservation was consumed. Private mappings are per-VMA and
4036          * only the consumed reservations are tracked. When the VMA
4037          * disappears, the original reservation is the VMA size and the
4038          * consumed reservations are stored in the map. Hence, nothing
4039          * else has to be done for private mappings here
4040          */
4041         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4042                 long add = region_add(resv_map, from, to);
4043
4044                 if (unlikely(chg > add)) {
4045                         /*
4046                          * pages in this range were added to the reserve
4047                          * map between region_chg and region_add.  This
4048                          * indicates a race with alloc_huge_page.  Adjust
4049                          * the subpool and reserve counts modified above
4050                          * based on the difference.
4051                          */
4052                         long rsv_adjust;
4053
4054                         rsv_adjust = hugepage_subpool_put_pages(spool,
4055                                                                 chg - add);
4056                         hugetlb_acct_memory(h, -rsv_adjust);
4057                 }
4058         }
4059         return 0;
4060 out_err:
4061         if (!vma || vma->vm_flags & VM_MAYSHARE)
4062                 region_abort(resv_map, from, to);
4063         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4064                 kref_put(&resv_map->refs, resv_map_release);
4065         return ret;
4066 }
4067
4068 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4069                                                                 long freed)
4070 {
4071         struct hstate *h = hstate_inode(inode);
4072         struct resv_map *resv_map = inode_resv_map(inode);
4073         long chg = 0;
4074         struct hugepage_subpool *spool = subpool_inode(inode);
4075         long gbl_reserve;
4076
4077         if (resv_map) {
4078                 chg = region_del(resv_map, start, end);
4079                 /*
4080                  * region_del() can fail in the rare case where a region
4081                  * must be split and another region descriptor can not be
4082                  * allocated.  If end == LONG_MAX, it will not fail.
4083                  */
4084                 if (chg < 0)
4085                         return chg;
4086         }
4087
4088         spin_lock(&inode->i_lock);
4089         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4090         spin_unlock(&inode->i_lock);
4091
4092         /*
4093          * If the subpool has a minimum size, the number of global
4094          * reservations to be released may be adjusted.
4095          */
4096         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4097         hugetlb_acct_memory(h, -gbl_reserve);
4098
4099         return 0;
4100 }
4101
4102 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4103 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4104                                 struct vm_area_struct *vma,
4105                                 unsigned long addr, pgoff_t idx)
4106 {
4107         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4108                                 svma->vm_start;
4109         unsigned long sbase = saddr & PUD_MASK;
4110         unsigned long s_end = sbase + PUD_SIZE;
4111
4112         /* Allow segments to share if only one is marked locked */
4113         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4114         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4115
4116         /*
4117          * match the virtual addresses, permission and the alignment of the
4118          * page table page.
4119          */
4120         if (pmd_index(addr) != pmd_index(saddr) ||
4121             vm_flags != svm_flags ||
4122             sbase < svma->vm_start || svma->vm_end < s_end)
4123                 return 0;
4124
4125         return saddr;
4126 }
4127
4128 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4129 {
4130         unsigned long base = addr & PUD_MASK;
4131         unsigned long end = base + PUD_SIZE;
4132
4133         /*
4134          * check on proper vm_flags and page table alignment
4135          */
4136         if (vma->vm_flags & VM_MAYSHARE &&
4137             vma->vm_start <= base && end <= vma->vm_end)
4138                 return true;
4139         return false;
4140 }
4141
4142 /*
4143  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4144  * and returns the corresponding pte. While this is not necessary for the
4145  * !shared pmd case because we can allocate the pmd later as well, it makes the
4146  * code much cleaner. pmd allocation is essential for the shared case because
4147  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4148  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4149  * bad pmd for sharing.
4150  */
4151 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4152 {
4153         struct vm_area_struct *vma = find_vma(mm, addr);
4154         struct address_space *mapping = vma->vm_file->f_mapping;
4155         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4156                         vma->vm_pgoff;
4157         struct vm_area_struct *svma;
4158         unsigned long saddr;
4159         pte_t *spte = NULL;
4160         pte_t *pte;
4161         spinlock_t *ptl;
4162
4163         if (!vma_shareable(vma, addr))
4164                 return (pte_t *)pmd_alloc(mm, pud, addr);
4165
4166         i_mmap_lock_write(mapping);
4167         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4168                 if (svma == vma)
4169                         continue;
4170
4171                 saddr = page_table_shareable(svma, vma, addr, idx);
4172                 if (saddr) {
4173                         spte = huge_pte_offset(svma->vm_mm, saddr);
4174                         if (spte) {
4175                                 mm_inc_nr_pmds(mm);
4176                                 get_page(virt_to_page(spte));
4177                                 break;
4178                         }
4179                 }
4180         }
4181
4182         if (!spte)
4183                 goto out;
4184
4185         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4186         spin_lock(ptl);
4187         if (pud_none(*pud)) {
4188                 pud_populate(mm, pud,
4189                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4190         } else {
4191                 put_page(virt_to_page(spte));
4192                 mm_inc_nr_pmds(mm);
4193         }
4194         spin_unlock(ptl);
4195 out:
4196         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4197         i_mmap_unlock_write(mapping);
4198         return pte;
4199 }
4200
4201 /*
4202  * unmap huge page backed by shared pte.
4203  *
4204  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4205  * indicated by page_count > 1, unmap is achieved by clearing pud and
4206  * decrementing the ref count. If count == 1, the pte page is not shared.
4207  *
4208  * called with page table lock held.
4209  *
4210  * returns: 1 successfully unmapped a shared pte page
4211  *          0 the underlying pte page is not shared, or it is the last user
4212  */
4213 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4214 {
4215         pgd_t *pgd = pgd_offset(mm, *addr);
4216         pud_t *pud = pud_offset(pgd, *addr);
4217
4218         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4219         if (page_count(virt_to_page(ptep)) == 1)
4220                 return 0;
4221
4222         pud_clear(pud);
4223         put_page(virt_to_page(ptep));
4224         mm_dec_nr_pmds(mm);
4225         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4226         return 1;
4227 }
4228 #define want_pmd_share()        (1)
4229 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4230 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4231 {
4232         return NULL;
4233 }
4234
4235 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4236 {
4237         return 0;
4238 }
4239 #define want_pmd_share()        (0)
4240 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4241
4242 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4243 pte_t *huge_pte_alloc(struct mm_struct *mm,
4244                         unsigned long addr, unsigned long sz)
4245 {
4246         pgd_t *pgd;
4247         pud_t *pud;
4248         pte_t *pte = NULL;
4249
4250         pgd = pgd_offset(mm, addr);
4251         pud = pud_alloc(mm, pgd, addr);
4252         if (pud) {
4253                 if (sz == PUD_SIZE) {
4254                         pte = (pte_t *)pud;
4255                 } else {
4256                         BUG_ON(sz != PMD_SIZE);
4257                         if (want_pmd_share() && pud_none(*pud))
4258                                 pte = huge_pmd_share(mm, addr, pud);
4259                         else
4260                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4261                 }
4262         }
4263         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4264
4265         return pte;
4266 }
4267
4268 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4269 {
4270         pgd_t *pgd;
4271         pud_t *pud;
4272         pmd_t *pmd = NULL;
4273
4274         pgd = pgd_offset(mm, addr);
4275         if (pgd_present(*pgd)) {
4276                 pud = pud_offset(pgd, addr);
4277                 if (pud_present(*pud)) {
4278                         if (pud_huge(*pud))
4279                                 return (pte_t *)pud;
4280                         pmd = pmd_offset(pud, addr);
4281                 }
4282         }
4283         return (pte_t *) pmd;
4284 }
4285
4286 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4287
4288 /*
4289  * These functions are overwritable if your architecture needs its own
4290  * behavior.
4291  */
4292 struct page * __weak
4293 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4294                               int write)
4295 {
4296         return ERR_PTR(-EINVAL);
4297 }
4298
4299 struct page * __weak
4300 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4301                 pmd_t *pmd, int flags)
4302 {
4303         struct page *page = NULL;
4304         spinlock_t *ptl;
4305 retry:
4306         ptl = pmd_lockptr(mm, pmd);
4307         spin_lock(ptl);
4308         /*
4309          * make sure that the address range covered by this pmd is not
4310          * unmapped from other threads.
4311          */
4312         if (!pmd_huge(*pmd))
4313                 goto out;
4314         if (pmd_present(*pmd)) {
4315                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4316                 if (flags & FOLL_GET)
4317                         get_page(page);
4318         } else {
4319                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4320                         spin_unlock(ptl);
4321                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4322                         goto retry;
4323                 }
4324                 /*
4325                  * hwpoisoned entry is treated as no_page_table in
4326                  * follow_page_mask().
4327                  */
4328         }
4329 out:
4330         spin_unlock(ptl);
4331         return page;
4332 }
4333
4334 struct page * __weak
4335 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4336                 pud_t *pud, int flags)
4337 {
4338         if (flags & FOLL_GET)
4339                 return NULL;
4340
4341         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4342 }
4343
4344 #ifdef CONFIG_MEMORY_FAILURE
4345
4346 /*
4347  * This function is called from memory failure code.
4348  * Assume the caller holds page lock of the head page.
4349  */
4350 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4351 {
4352         struct hstate *h = page_hstate(hpage);
4353         int nid = page_to_nid(hpage);
4354         int ret = -EBUSY;
4355
4356         spin_lock(&hugetlb_lock);
4357         /*
4358          * Just checking !page_huge_active is not enough, because that could be
4359          * an isolated/hwpoisoned hugepage (which have >0 refcount).
4360          */
4361         if (!page_huge_active(hpage) && !page_count(hpage)) {
4362                 /*
4363                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
4364                  * but dangling hpage->lru can trigger list-debug warnings
4365                  * (this happens when we call unpoison_memory() on it),
4366                  * so let it point to itself with list_del_init().
4367                  */
4368                 list_del_init(&hpage->lru);
4369                 set_page_refcounted(hpage);
4370                 h->free_huge_pages--;
4371                 h->free_huge_pages_node[nid]--;
4372                 ret = 0;
4373         }
4374         spin_unlock(&hugetlb_lock);
4375         return ret;
4376 }
4377 #endif
4378
4379 bool isolate_huge_page(struct page *page, struct list_head *list)
4380 {
4381         bool ret = true;
4382
4383         VM_BUG_ON_PAGE(!PageHead(page), page);
4384         spin_lock(&hugetlb_lock);
4385         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4386                 ret = false;
4387                 goto unlock;
4388         }
4389         clear_page_huge_active(page);
4390         list_move_tail(&page->lru, list);
4391 unlock:
4392         spin_unlock(&hugetlb_lock);
4393         return ret;
4394 }
4395
4396 void putback_active_hugepage(struct page *page)
4397 {
4398         VM_BUG_ON_PAGE(!PageHead(page), page);
4399         spin_lock(&hugetlb_lock);
4400         set_page_huge_active(page);
4401         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4402         spin_unlock(&hugetlb_lock);
4403         put_page(page);
4404 }