x86: mm: consolidate VM_FAULT_RETRY handling
[cascardo/linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37
38 unsigned long hugepages_treat_as_movable;
39
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43
44 __initdata LIST_HEAD(huge_boot_pages);
45
46 /* for command line parsing */
47 static struct hstate * __initdata parsed_hstate;
48 static unsigned long __initdata default_hstate_max_huge_pages;
49 static unsigned long __initdata default_hstate_size;
50
51 /*
52  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
53  * free_huge_pages, and surplus_huge_pages.
54  */
55 DEFINE_SPINLOCK(hugetlb_lock);
56
57 /*
58  * Serializes faults on the same logical page.  This is used to
59  * prevent spurious OOMs when the hugepage pool is fully utilized.
60  */
61 static int num_fault_mutexes;
62 static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
63
64 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
65 {
66         bool free = (spool->count == 0) && (spool->used_hpages == 0);
67
68         spin_unlock(&spool->lock);
69
70         /* If no pages are used, and no other handles to the subpool
71          * remain, free the subpool the subpool remain */
72         if (free)
73                 kfree(spool);
74 }
75
76 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
77 {
78         struct hugepage_subpool *spool;
79
80         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
81         if (!spool)
82                 return NULL;
83
84         spin_lock_init(&spool->lock);
85         spool->count = 1;
86         spool->max_hpages = nr_blocks;
87         spool->used_hpages = 0;
88
89         return spool;
90 }
91
92 void hugepage_put_subpool(struct hugepage_subpool *spool)
93 {
94         spin_lock(&spool->lock);
95         BUG_ON(!spool->count);
96         spool->count--;
97         unlock_or_release_subpool(spool);
98 }
99
100 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
101                                       long delta)
102 {
103         int ret = 0;
104
105         if (!spool)
106                 return 0;
107
108         spin_lock(&spool->lock);
109         if ((spool->used_hpages + delta) <= spool->max_hpages) {
110                 spool->used_hpages += delta;
111         } else {
112                 ret = -ENOMEM;
113         }
114         spin_unlock(&spool->lock);
115
116         return ret;
117 }
118
119 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
120                                        long delta)
121 {
122         if (!spool)
123                 return;
124
125         spin_lock(&spool->lock);
126         spool->used_hpages -= delta;
127         /* If hugetlbfs_put_super couldn't free spool due to
128         * an outstanding quota reference, free it now. */
129         unlock_or_release_subpool(spool);
130 }
131
132 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
133 {
134         return HUGETLBFS_SB(inode->i_sb)->spool;
135 }
136
137 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
138 {
139         return subpool_inode(file_inode(vma->vm_file));
140 }
141
142 /*
143  * Region tracking -- allows tracking of reservations and instantiated pages
144  *                    across the pages in a mapping.
145  *
146  * The region data structures are embedded into a resv_map and
147  * protected by a resv_map's lock
148  */
149 struct file_region {
150         struct list_head link;
151         long from;
152         long to;
153 };
154
155 static long region_add(struct resv_map *resv, long f, long t)
156 {
157         struct list_head *head = &resv->regions;
158         struct file_region *rg, *nrg, *trg;
159
160         spin_lock(&resv->lock);
161         /* Locate the region we are either in or before. */
162         list_for_each_entry(rg, head, link)
163                 if (f <= rg->to)
164                         break;
165
166         /* Round our left edge to the current segment if it encloses us. */
167         if (f > rg->from)
168                 f = rg->from;
169
170         /* Check for and consume any regions we now overlap with. */
171         nrg = rg;
172         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
173                 if (&rg->link == head)
174                         break;
175                 if (rg->from > t)
176                         break;
177
178                 /* If this area reaches higher then extend our area to
179                  * include it completely.  If this is not the first area
180                  * which we intend to reuse, free it. */
181                 if (rg->to > t)
182                         t = rg->to;
183                 if (rg != nrg) {
184                         list_del(&rg->link);
185                         kfree(rg);
186                 }
187         }
188         nrg->from = f;
189         nrg->to = t;
190         spin_unlock(&resv->lock);
191         return 0;
192 }
193
194 static long region_chg(struct resv_map *resv, long f, long t)
195 {
196         struct list_head *head = &resv->regions;
197         struct file_region *rg, *nrg = NULL;
198         long chg = 0;
199
200 retry:
201         spin_lock(&resv->lock);
202         /* Locate the region we are before or in. */
203         list_for_each_entry(rg, head, link)
204                 if (f <= rg->to)
205                         break;
206
207         /* If we are below the current region then a new region is required.
208          * Subtle, allocate a new region at the position but make it zero
209          * size such that we can guarantee to record the reservation. */
210         if (&rg->link == head || t < rg->from) {
211                 if (!nrg) {
212                         spin_unlock(&resv->lock);
213                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
214                         if (!nrg)
215                                 return -ENOMEM;
216
217                         nrg->from = f;
218                         nrg->to   = f;
219                         INIT_LIST_HEAD(&nrg->link);
220                         goto retry;
221                 }
222
223                 list_add(&nrg->link, rg->link.prev);
224                 chg = t - f;
225                 goto out_nrg;
226         }
227
228         /* Round our left edge to the current segment if it encloses us. */
229         if (f > rg->from)
230                 f = rg->from;
231         chg = t - f;
232
233         /* Check for and consume any regions we now overlap with. */
234         list_for_each_entry(rg, rg->link.prev, link) {
235                 if (&rg->link == head)
236                         break;
237                 if (rg->from > t)
238                         goto out;
239
240                 /* We overlap with this area, if it extends further than
241                  * us then we must extend ourselves.  Account for its
242                  * existing reservation. */
243                 if (rg->to > t) {
244                         chg += rg->to - t;
245                         t = rg->to;
246                 }
247                 chg -= rg->to - rg->from;
248         }
249
250 out:
251         spin_unlock(&resv->lock);
252         /*  We already know we raced and no longer need the new region */
253         kfree(nrg);
254         return chg;
255 out_nrg:
256         spin_unlock(&resv->lock);
257         return chg;
258 }
259
260 static long region_truncate(struct resv_map *resv, long end)
261 {
262         struct list_head *head = &resv->regions;
263         struct file_region *rg, *trg;
264         long chg = 0;
265
266         spin_lock(&resv->lock);
267         /* Locate the region we are either in or before. */
268         list_for_each_entry(rg, head, link)
269                 if (end <= rg->to)
270                         break;
271         if (&rg->link == head)
272                 goto out;
273
274         /* If we are in the middle of a region then adjust it. */
275         if (end > rg->from) {
276                 chg = rg->to - end;
277                 rg->to = end;
278                 rg = list_entry(rg->link.next, typeof(*rg), link);
279         }
280
281         /* Drop any remaining regions. */
282         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
283                 if (&rg->link == head)
284                         break;
285                 chg += rg->to - rg->from;
286                 list_del(&rg->link);
287                 kfree(rg);
288         }
289
290 out:
291         spin_unlock(&resv->lock);
292         return chg;
293 }
294
295 static long region_count(struct resv_map *resv, long f, long t)
296 {
297         struct list_head *head = &resv->regions;
298         struct file_region *rg;
299         long chg = 0;
300
301         spin_lock(&resv->lock);
302         /* Locate each segment we overlap with, and count that overlap. */
303         list_for_each_entry(rg, head, link) {
304                 long seg_from;
305                 long seg_to;
306
307                 if (rg->to <= f)
308                         continue;
309                 if (rg->from >= t)
310                         break;
311
312                 seg_from = max(rg->from, f);
313                 seg_to = min(rg->to, t);
314
315                 chg += seg_to - seg_from;
316         }
317         spin_unlock(&resv->lock);
318
319         return chg;
320 }
321
322 /*
323  * Convert the address within this vma to the page offset within
324  * the mapping, in pagecache page units; huge pages here.
325  */
326 static pgoff_t vma_hugecache_offset(struct hstate *h,
327                         struct vm_area_struct *vma, unsigned long address)
328 {
329         return ((address - vma->vm_start) >> huge_page_shift(h)) +
330                         (vma->vm_pgoff >> huge_page_order(h));
331 }
332
333 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
334                                      unsigned long address)
335 {
336         return vma_hugecache_offset(hstate_vma(vma), vma, address);
337 }
338
339 /*
340  * Return the size of the pages allocated when backing a VMA. In the majority
341  * cases this will be same size as used by the page table entries.
342  */
343 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
344 {
345         struct hstate *hstate;
346
347         if (!is_vm_hugetlb_page(vma))
348                 return PAGE_SIZE;
349
350         hstate = hstate_vma(vma);
351
352         return 1UL << huge_page_shift(hstate);
353 }
354 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
355
356 /*
357  * Return the page size being used by the MMU to back a VMA. In the majority
358  * of cases, the page size used by the kernel matches the MMU size. On
359  * architectures where it differs, an architecture-specific version of this
360  * function is required.
361  */
362 #ifndef vma_mmu_pagesize
363 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
364 {
365         return vma_kernel_pagesize(vma);
366 }
367 #endif
368
369 /*
370  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
371  * bits of the reservation map pointer, which are always clear due to
372  * alignment.
373  */
374 #define HPAGE_RESV_OWNER    (1UL << 0)
375 #define HPAGE_RESV_UNMAPPED (1UL << 1)
376 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
377
378 /*
379  * These helpers are used to track how many pages are reserved for
380  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
381  * is guaranteed to have their future faults succeed.
382  *
383  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
384  * the reserve counters are updated with the hugetlb_lock held. It is safe
385  * to reset the VMA at fork() time as it is not in use yet and there is no
386  * chance of the global counters getting corrupted as a result of the values.
387  *
388  * The private mapping reservation is represented in a subtly different
389  * manner to a shared mapping.  A shared mapping has a region map associated
390  * with the underlying file, this region map represents the backing file
391  * pages which have ever had a reservation assigned which this persists even
392  * after the page is instantiated.  A private mapping has a region map
393  * associated with the original mmap which is attached to all VMAs which
394  * reference it, this region map represents those offsets which have consumed
395  * reservation ie. where pages have been instantiated.
396  */
397 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
398 {
399         return (unsigned long)vma->vm_private_data;
400 }
401
402 static void set_vma_private_data(struct vm_area_struct *vma,
403                                                         unsigned long value)
404 {
405         vma->vm_private_data = (void *)value;
406 }
407
408 struct resv_map *resv_map_alloc(void)
409 {
410         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
411         if (!resv_map)
412                 return NULL;
413
414         kref_init(&resv_map->refs);
415         spin_lock_init(&resv_map->lock);
416         INIT_LIST_HEAD(&resv_map->regions);
417
418         return resv_map;
419 }
420
421 void resv_map_release(struct kref *ref)
422 {
423         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
424
425         /* Clear out any active regions before we release the map. */
426         region_truncate(resv_map, 0);
427         kfree(resv_map);
428 }
429
430 static inline struct resv_map *inode_resv_map(struct inode *inode)
431 {
432         return inode->i_mapping->private_data;
433 }
434
435 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
436 {
437         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
438         if (vma->vm_flags & VM_MAYSHARE) {
439                 struct address_space *mapping = vma->vm_file->f_mapping;
440                 struct inode *inode = mapping->host;
441
442                 return inode_resv_map(inode);
443
444         } else {
445                 return (struct resv_map *)(get_vma_private_data(vma) &
446                                                         ~HPAGE_RESV_MASK);
447         }
448 }
449
450 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
451 {
452         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
453         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
454
455         set_vma_private_data(vma, (get_vma_private_data(vma) &
456                                 HPAGE_RESV_MASK) | (unsigned long)map);
457 }
458
459 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
460 {
461         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
462         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
463
464         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
465 }
466
467 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
468 {
469         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
470
471         return (get_vma_private_data(vma) & flag) != 0;
472 }
473
474 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
475 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
476 {
477         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
478         if (!(vma->vm_flags & VM_MAYSHARE))
479                 vma->vm_private_data = (void *)0;
480 }
481
482 /* Returns true if the VMA has associated reserve pages */
483 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
484 {
485         if (vma->vm_flags & VM_NORESERVE) {
486                 /*
487                  * This address is already reserved by other process(chg == 0),
488                  * so, we should decrement reserved count. Without decrementing,
489                  * reserve count remains after releasing inode, because this
490                  * allocated page will go into page cache and is regarded as
491                  * coming from reserved pool in releasing step.  Currently, we
492                  * don't have any other solution to deal with this situation
493                  * properly, so add work-around here.
494                  */
495                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
496                         return 1;
497                 else
498                         return 0;
499         }
500
501         /* Shared mappings always use reserves */
502         if (vma->vm_flags & VM_MAYSHARE)
503                 return 1;
504
505         /*
506          * Only the process that called mmap() has reserves for
507          * private mappings.
508          */
509         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
510                 return 1;
511
512         return 0;
513 }
514
515 static void enqueue_huge_page(struct hstate *h, struct page *page)
516 {
517         int nid = page_to_nid(page);
518         list_move(&page->lru, &h->hugepage_freelists[nid]);
519         h->free_huge_pages++;
520         h->free_huge_pages_node[nid]++;
521 }
522
523 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
524 {
525         struct page *page;
526
527         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
528                 if (!is_migrate_isolate_page(page))
529                         break;
530         /*
531          * if 'non-isolated free hugepage' not found on the list,
532          * the allocation fails.
533          */
534         if (&h->hugepage_freelists[nid] == &page->lru)
535                 return NULL;
536         list_move(&page->lru, &h->hugepage_activelist);
537         set_page_refcounted(page);
538         h->free_huge_pages--;
539         h->free_huge_pages_node[nid]--;
540         return page;
541 }
542
543 /* Movability of hugepages depends on migration support. */
544 static inline gfp_t htlb_alloc_mask(struct hstate *h)
545 {
546         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
547                 return GFP_HIGHUSER_MOVABLE;
548         else
549                 return GFP_HIGHUSER;
550 }
551
552 static struct page *dequeue_huge_page_vma(struct hstate *h,
553                                 struct vm_area_struct *vma,
554                                 unsigned long address, int avoid_reserve,
555                                 long chg)
556 {
557         struct page *page = NULL;
558         struct mempolicy *mpol;
559         nodemask_t *nodemask;
560         struct zonelist *zonelist;
561         struct zone *zone;
562         struct zoneref *z;
563         unsigned int cpuset_mems_cookie;
564
565         /*
566          * A child process with MAP_PRIVATE mappings created by their parent
567          * have no page reserves. This check ensures that reservations are
568          * not "stolen". The child may still get SIGKILLed
569          */
570         if (!vma_has_reserves(vma, chg) &&
571                         h->free_huge_pages - h->resv_huge_pages == 0)
572                 goto err;
573
574         /* If reserves cannot be used, ensure enough pages are in the pool */
575         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
576                 goto err;
577
578 retry_cpuset:
579         cpuset_mems_cookie = read_mems_allowed_begin();
580         zonelist = huge_zonelist(vma, address,
581                                         htlb_alloc_mask(h), &mpol, &nodemask);
582
583         for_each_zone_zonelist_nodemask(zone, z, zonelist,
584                                                 MAX_NR_ZONES - 1, nodemask) {
585                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
586                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
587                         if (page) {
588                                 if (avoid_reserve)
589                                         break;
590                                 if (!vma_has_reserves(vma, chg))
591                                         break;
592
593                                 SetPagePrivate(page);
594                                 h->resv_huge_pages--;
595                                 break;
596                         }
597                 }
598         }
599
600         mpol_cond_put(mpol);
601         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
602                 goto retry_cpuset;
603         return page;
604
605 err:
606         return NULL;
607 }
608
609 /*
610  * common helper functions for hstate_next_node_to_{alloc|free}.
611  * We may have allocated or freed a huge page based on a different
612  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
613  * be outside of *nodes_allowed.  Ensure that we use an allowed
614  * node for alloc or free.
615  */
616 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
617 {
618         nid = next_node(nid, *nodes_allowed);
619         if (nid == MAX_NUMNODES)
620                 nid = first_node(*nodes_allowed);
621         VM_BUG_ON(nid >= MAX_NUMNODES);
622
623         return nid;
624 }
625
626 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
627 {
628         if (!node_isset(nid, *nodes_allowed))
629                 nid = next_node_allowed(nid, nodes_allowed);
630         return nid;
631 }
632
633 /*
634  * returns the previously saved node ["this node"] from which to
635  * allocate a persistent huge page for the pool and advance the
636  * next node from which to allocate, handling wrap at end of node
637  * mask.
638  */
639 static int hstate_next_node_to_alloc(struct hstate *h,
640                                         nodemask_t *nodes_allowed)
641 {
642         int nid;
643
644         VM_BUG_ON(!nodes_allowed);
645
646         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
647         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
648
649         return nid;
650 }
651
652 /*
653  * helper for free_pool_huge_page() - return the previously saved
654  * node ["this node"] from which to free a huge page.  Advance the
655  * next node id whether or not we find a free huge page to free so
656  * that the next attempt to free addresses the next node.
657  */
658 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
659 {
660         int nid;
661
662         VM_BUG_ON(!nodes_allowed);
663
664         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
665         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
666
667         return nid;
668 }
669
670 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
671         for (nr_nodes = nodes_weight(*mask);                            \
672                 nr_nodes > 0 &&                                         \
673                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
674                 nr_nodes--)
675
676 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
677         for (nr_nodes = nodes_weight(*mask);                            \
678                 nr_nodes > 0 &&                                         \
679                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
680                 nr_nodes--)
681
682 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
683 static void destroy_compound_gigantic_page(struct page *page,
684                                         unsigned long order)
685 {
686         int i;
687         int nr_pages = 1 << order;
688         struct page *p = page + 1;
689
690         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
691                 __ClearPageTail(p);
692                 set_page_refcounted(p);
693                 p->first_page = NULL;
694         }
695
696         set_compound_order(page, 0);
697         __ClearPageHead(page);
698 }
699
700 static void free_gigantic_page(struct page *page, unsigned order)
701 {
702         free_contig_range(page_to_pfn(page), 1 << order);
703 }
704
705 static int __alloc_gigantic_page(unsigned long start_pfn,
706                                 unsigned long nr_pages)
707 {
708         unsigned long end_pfn = start_pfn + nr_pages;
709         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
710 }
711
712 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
713                                 unsigned long nr_pages)
714 {
715         unsigned long i, end_pfn = start_pfn + nr_pages;
716         struct page *page;
717
718         for (i = start_pfn; i < end_pfn; i++) {
719                 if (!pfn_valid(i))
720                         return false;
721
722                 page = pfn_to_page(i);
723
724                 if (PageReserved(page))
725                         return false;
726
727                 if (page_count(page) > 0)
728                         return false;
729
730                 if (PageHuge(page))
731                         return false;
732         }
733
734         return true;
735 }
736
737 static bool zone_spans_last_pfn(const struct zone *zone,
738                         unsigned long start_pfn, unsigned long nr_pages)
739 {
740         unsigned long last_pfn = start_pfn + nr_pages - 1;
741         return zone_spans_pfn(zone, last_pfn);
742 }
743
744 static struct page *alloc_gigantic_page(int nid, unsigned order)
745 {
746         unsigned long nr_pages = 1 << order;
747         unsigned long ret, pfn, flags;
748         struct zone *z;
749
750         z = NODE_DATA(nid)->node_zones;
751         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
752                 spin_lock_irqsave(&z->lock, flags);
753
754                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
755                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
756                         if (pfn_range_valid_gigantic(pfn, nr_pages)) {
757                                 /*
758                                  * We release the zone lock here because
759                                  * alloc_contig_range() will also lock the zone
760                                  * at some point. If there's an allocation
761                                  * spinning on this lock, it may win the race
762                                  * and cause alloc_contig_range() to fail...
763                                  */
764                                 spin_unlock_irqrestore(&z->lock, flags);
765                                 ret = __alloc_gigantic_page(pfn, nr_pages);
766                                 if (!ret)
767                                         return pfn_to_page(pfn);
768                                 spin_lock_irqsave(&z->lock, flags);
769                         }
770                         pfn += nr_pages;
771                 }
772
773                 spin_unlock_irqrestore(&z->lock, flags);
774         }
775
776         return NULL;
777 }
778
779 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
780 static void prep_compound_gigantic_page(struct page *page, unsigned long order);
781
782 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
783 {
784         struct page *page;
785
786         page = alloc_gigantic_page(nid, huge_page_order(h));
787         if (page) {
788                 prep_compound_gigantic_page(page, huge_page_order(h));
789                 prep_new_huge_page(h, page, nid);
790         }
791
792         return page;
793 }
794
795 static int alloc_fresh_gigantic_page(struct hstate *h,
796                                 nodemask_t *nodes_allowed)
797 {
798         struct page *page = NULL;
799         int nr_nodes, node;
800
801         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
802                 page = alloc_fresh_gigantic_page_node(h, node);
803                 if (page)
804                         return 1;
805         }
806
807         return 0;
808 }
809
810 static inline bool gigantic_page_supported(void) { return true; }
811 #else
812 static inline bool gigantic_page_supported(void) { return false; }
813 static inline void free_gigantic_page(struct page *page, unsigned order) { }
814 static inline void destroy_compound_gigantic_page(struct page *page,
815                                                 unsigned long order) { }
816 static inline int alloc_fresh_gigantic_page(struct hstate *h,
817                                         nodemask_t *nodes_allowed) { return 0; }
818 #endif
819
820 static void update_and_free_page(struct hstate *h, struct page *page)
821 {
822         int i;
823
824         if (hstate_is_gigantic(h) && !gigantic_page_supported())
825                 return;
826
827         h->nr_huge_pages--;
828         h->nr_huge_pages_node[page_to_nid(page)]--;
829         for (i = 0; i < pages_per_huge_page(h); i++) {
830                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
831                                 1 << PG_referenced | 1 << PG_dirty |
832                                 1 << PG_active | 1 << PG_private |
833                                 1 << PG_writeback);
834         }
835         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
836         set_compound_page_dtor(page, NULL);
837         set_page_refcounted(page);
838         if (hstate_is_gigantic(h)) {
839                 destroy_compound_gigantic_page(page, huge_page_order(h));
840                 free_gigantic_page(page, huge_page_order(h));
841         } else {
842                 arch_release_hugepage(page);
843                 __free_pages(page, huge_page_order(h));
844         }
845 }
846
847 struct hstate *size_to_hstate(unsigned long size)
848 {
849         struct hstate *h;
850
851         for_each_hstate(h) {
852                 if (huge_page_size(h) == size)
853                         return h;
854         }
855         return NULL;
856 }
857
858 void free_huge_page(struct page *page)
859 {
860         /*
861          * Can't pass hstate in here because it is called from the
862          * compound page destructor.
863          */
864         struct hstate *h = page_hstate(page);
865         int nid = page_to_nid(page);
866         struct hugepage_subpool *spool =
867                 (struct hugepage_subpool *)page_private(page);
868         bool restore_reserve;
869
870         set_page_private(page, 0);
871         page->mapping = NULL;
872         BUG_ON(page_count(page));
873         BUG_ON(page_mapcount(page));
874         restore_reserve = PagePrivate(page);
875         ClearPagePrivate(page);
876
877         spin_lock(&hugetlb_lock);
878         hugetlb_cgroup_uncharge_page(hstate_index(h),
879                                      pages_per_huge_page(h), page);
880         if (restore_reserve)
881                 h->resv_huge_pages++;
882
883         if (h->surplus_huge_pages_node[nid]) {
884                 /* remove the page from active list */
885                 list_del(&page->lru);
886                 update_and_free_page(h, page);
887                 h->surplus_huge_pages--;
888                 h->surplus_huge_pages_node[nid]--;
889         } else {
890                 arch_clear_hugepage_flags(page);
891                 enqueue_huge_page(h, page);
892         }
893         spin_unlock(&hugetlb_lock);
894         hugepage_subpool_put_pages(spool, 1);
895 }
896
897 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
898 {
899         INIT_LIST_HEAD(&page->lru);
900         set_compound_page_dtor(page, free_huge_page);
901         spin_lock(&hugetlb_lock);
902         set_hugetlb_cgroup(page, NULL);
903         h->nr_huge_pages++;
904         h->nr_huge_pages_node[nid]++;
905         spin_unlock(&hugetlb_lock);
906         put_page(page); /* free it into the hugepage allocator */
907 }
908
909 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
910 {
911         int i;
912         int nr_pages = 1 << order;
913         struct page *p = page + 1;
914
915         /* we rely on prep_new_huge_page to set the destructor */
916         set_compound_order(page, order);
917         __SetPageHead(page);
918         __ClearPageReserved(page);
919         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
920                 __SetPageTail(p);
921                 /*
922                  * For gigantic hugepages allocated through bootmem at
923                  * boot, it's safer to be consistent with the not-gigantic
924                  * hugepages and clear the PG_reserved bit from all tail pages
925                  * too.  Otherwse drivers using get_user_pages() to access tail
926                  * pages may get the reference counting wrong if they see
927                  * PG_reserved set on a tail page (despite the head page not
928                  * having PG_reserved set).  Enforcing this consistency between
929                  * head and tail pages allows drivers to optimize away a check
930                  * on the head page when they need know if put_page() is needed
931                  * after get_user_pages().
932                  */
933                 __ClearPageReserved(p);
934                 set_page_count(p, 0);
935                 p->first_page = page;
936         }
937 }
938
939 /*
940  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
941  * transparent huge pages.  See the PageTransHuge() documentation for more
942  * details.
943  */
944 int PageHuge(struct page *page)
945 {
946         if (!PageCompound(page))
947                 return 0;
948
949         page = compound_head(page);
950         return get_compound_page_dtor(page) == free_huge_page;
951 }
952 EXPORT_SYMBOL_GPL(PageHuge);
953
954 /*
955  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
956  * normal or transparent huge pages.
957  */
958 int PageHeadHuge(struct page *page_head)
959 {
960         if (!PageHead(page_head))
961                 return 0;
962
963         return get_compound_page_dtor(page_head) == free_huge_page;
964 }
965
966 pgoff_t __basepage_index(struct page *page)
967 {
968         struct page *page_head = compound_head(page);
969         pgoff_t index = page_index(page_head);
970         unsigned long compound_idx;
971
972         if (!PageHuge(page_head))
973                 return page_index(page);
974
975         if (compound_order(page_head) >= MAX_ORDER)
976                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
977         else
978                 compound_idx = page - page_head;
979
980         return (index << compound_order(page_head)) + compound_idx;
981 }
982
983 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
984 {
985         struct page *page;
986
987         page = alloc_pages_exact_node(nid,
988                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
989                                                 __GFP_REPEAT|__GFP_NOWARN,
990                 huge_page_order(h));
991         if (page) {
992                 if (arch_prepare_hugepage(page)) {
993                         __free_pages(page, huge_page_order(h));
994                         return NULL;
995                 }
996                 prep_new_huge_page(h, page, nid);
997         }
998
999         return page;
1000 }
1001
1002 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1003 {
1004         struct page *page;
1005         int nr_nodes, node;
1006         int ret = 0;
1007
1008         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1009                 page = alloc_fresh_huge_page_node(h, node);
1010                 if (page) {
1011                         ret = 1;
1012                         break;
1013                 }
1014         }
1015
1016         if (ret)
1017                 count_vm_event(HTLB_BUDDY_PGALLOC);
1018         else
1019                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1020
1021         return ret;
1022 }
1023
1024 /*
1025  * Free huge page from pool from next node to free.
1026  * Attempt to keep persistent huge pages more or less
1027  * balanced over allowed nodes.
1028  * Called with hugetlb_lock locked.
1029  */
1030 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1031                                                          bool acct_surplus)
1032 {
1033         int nr_nodes, node;
1034         int ret = 0;
1035
1036         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1037                 /*
1038                  * If we're returning unused surplus pages, only examine
1039                  * nodes with surplus pages.
1040                  */
1041                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1042                     !list_empty(&h->hugepage_freelists[node])) {
1043                         struct page *page =
1044                                 list_entry(h->hugepage_freelists[node].next,
1045                                           struct page, lru);
1046                         list_del(&page->lru);
1047                         h->free_huge_pages--;
1048                         h->free_huge_pages_node[node]--;
1049                         if (acct_surplus) {
1050                                 h->surplus_huge_pages--;
1051                                 h->surplus_huge_pages_node[node]--;
1052                         }
1053                         update_and_free_page(h, page);
1054                         ret = 1;
1055                         break;
1056                 }
1057         }
1058
1059         return ret;
1060 }
1061
1062 /*
1063  * Dissolve a given free hugepage into free buddy pages. This function does
1064  * nothing for in-use (including surplus) hugepages.
1065  */
1066 static void dissolve_free_huge_page(struct page *page)
1067 {
1068         spin_lock(&hugetlb_lock);
1069         if (PageHuge(page) && !page_count(page)) {
1070                 struct hstate *h = page_hstate(page);
1071                 int nid = page_to_nid(page);
1072                 list_del(&page->lru);
1073                 h->free_huge_pages--;
1074                 h->free_huge_pages_node[nid]--;
1075                 update_and_free_page(h, page);
1076         }
1077         spin_unlock(&hugetlb_lock);
1078 }
1079
1080 /*
1081  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1082  * make specified memory blocks removable from the system.
1083  * Note that start_pfn should aligned with (minimum) hugepage size.
1084  */
1085 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1086 {
1087         unsigned int order = 8 * sizeof(void *);
1088         unsigned long pfn;
1089         struct hstate *h;
1090
1091         if (!hugepages_supported())
1092                 return;
1093
1094         /* Set scan step to minimum hugepage size */
1095         for_each_hstate(h)
1096                 if (order > huge_page_order(h))
1097                         order = huge_page_order(h);
1098         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
1099         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
1100                 dissolve_free_huge_page(pfn_to_page(pfn));
1101 }
1102
1103 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1104 {
1105         struct page *page;
1106         unsigned int r_nid;
1107
1108         if (hstate_is_gigantic(h))
1109                 return NULL;
1110
1111         /*
1112          * Assume we will successfully allocate the surplus page to
1113          * prevent racing processes from causing the surplus to exceed
1114          * overcommit
1115          *
1116          * This however introduces a different race, where a process B
1117          * tries to grow the static hugepage pool while alloc_pages() is
1118          * called by process A. B will only examine the per-node
1119          * counters in determining if surplus huge pages can be
1120          * converted to normal huge pages in adjust_pool_surplus(). A
1121          * won't be able to increment the per-node counter, until the
1122          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1123          * no more huge pages can be converted from surplus to normal
1124          * state (and doesn't try to convert again). Thus, we have a
1125          * case where a surplus huge page exists, the pool is grown, and
1126          * the surplus huge page still exists after, even though it
1127          * should just have been converted to a normal huge page. This
1128          * does not leak memory, though, as the hugepage will be freed
1129          * once it is out of use. It also does not allow the counters to
1130          * go out of whack in adjust_pool_surplus() as we don't modify
1131          * the node values until we've gotten the hugepage and only the
1132          * per-node value is checked there.
1133          */
1134         spin_lock(&hugetlb_lock);
1135         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1136                 spin_unlock(&hugetlb_lock);
1137                 return NULL;
1138         } else {
1139                 h->nr_huge_pages++;
1140                 h->surplus_huge_pages++;
1141         }
1142         spin_unlock(&hugetlb_lock);
1143
1144         if (nid == NUMA_NO_NODE)
1145                 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1146                                    __GFP_REPEAT|__GFP_NOWARN,
1147                                    huge_page_order(h));
1148         else
1149                 page = alloc_pages_exact_node(nid,
1150                         htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1151                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1152
1153         if (page && arch_prepare_hugepage(page)) {
1154                 __free_pages(page, huge_page_order(h));
1155                 page = NULL;
1156         }
1157
1158         spin_lock(&hugetlb_lock);
1159         if (page) {
1160                 INIT_LIST_HEAD(&page->lru);
1161                 r_nid = page_to_nid(page);
1162                 set_compound_page_dtor(page, free_huge_page);
1163                 set_hugetlb_cgroup(page, NULL);
1164                 /*
1165                  * We incremented the global counters already
1166                  */
1167                 h->nr_huge_pages_node[r_nid]++;
1168                 h->surplus_huge_pages_node[r_nid]++;
1169                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1170         } else {
1171                 h->nr_huge_pages--;
1172                 h->surplus_huge_pages--;
1173                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1174         }
1175         spin_unlock(&hugetlb_lock);
1176
1177         return page;
1178 }
1179
1180 /*
1181  * This allocation function is useful in the context where vma is irrelevant.
1182  * E.g. soft-offlining uses this function because it only cares physical
1183  * address of error page.
1184  */
1185 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1186 {
1187         struct page *page = NULL;
1188
1189         spin_lock(&hugetlb_lock);
1190         if (h->free_huge_pages - h->resv_huge_pages > 0)
1191                 page = dequeue_huge_page_node(h, nid);
1192         spin_unlock(&hugetlb_lock);
1193
1194         if (!page)
1195                 page = alloc_buddy_huge_page(h, nid);
1196
1197         return page;
1198 }
1199
1200 /*
1201  * Increase the hugetlb pool such that it can accommodate a reservation
1202  * of size 'delta'.
1203  */
1204 static int gather_surplus_pages(struct hstate *h, int delta)
1205 {
1206         struct list_head surplus_list;
1207         struct page *page, *tmp;
1208         int ret, i;
1209         int needed, allocated;
1210         bool alloc_ok = true;
1211
1212         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1213         if (needed <= 0) {
1214                 h->resv_huge_pages += delta;
1215                 return 0;
1216         }
1217
1218         allocated = 0;
1219         INIT_LIST_HEAD(&surplus_list);
1220
1221         ret = -ENOMEM;
1222 retry:
1223         spin_unlock(&hugetlb_lock);
1224         for (i = 0; i < needed; i++) {
1225                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1226                 if (!page) {
1227                         alloc_ok = false;
1228                         break;
1229                 }
1230                 list_add(&page->lru, &surplus_list);
1231         }
1232         allocated += i;
1233
1234         /*
1235          * After retaking hugetlb_lock, we need to recalculate 'needed'
1236          * because either resv_huge_pages or free_huge_pages may have changed.
1237          */
1238         spin_lock(&hugetlb_lock);
1239         needed = (h->resv_huge_pages + delta) -
1240                         (h->free_huge_pages + allocated);
1241         if (needed > 0) {
1242                 if (alloc_ok)
1243                         goto retry;
1244                 /*
1245                  * We were not able to allocate enough pages to
1246                  * satisfy the entire reservation so we free what
1247                  * we've allocated so far.
1248                  */
1249                 goto free;
1250         }
1251         /*
1252          * The surplus_list now contains _at_least_ the number of extra pages
1253          * needed to accommodate the reservation.  Add the appropriate number
1254          * of pages to the hugetlb pool and free the extras back to the buddy
1255          * allocator.  Commit the entire reservation here to prevent another
1256          * process from stealing the pages as they are added to the pool but
1257          * before they are reserved.
1258          */
1259         needed += allocated;
1260         h->resv_huge_pages += delta;
1261         ret = 0;
1262
1263         /* Free the needed pages to the hugetlb pool */
1264         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1265                 if ((--needed) < 0)
1266                         break;
1267                 /*
1268                  * This page is now managed by the hugetlb allocator and has
1269                  * no users -- drop the buddy allocator's reference.
1270                  */
1271                 put_page_testzero(page);
1272                 VM_BUG_ON_PAGE(page_count(page), page);
1273                 enqueue_huge_page(h, page);
1274         }
1275 free:
1276         spin_unlock(&hugetlb_lock);
1277
1278         /* Free unnecessary surplus pages to the buddy allocator */
1279         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1280                 put_page(page);
1281         spin_lock(&hugetlb_lock);
1282
1283         return ret;
1284 }
1285
1286 /*
1287  * When releasing a hugetlb pool reservation, any surplus pages that were
1288  * allocated to satisfy the reservation must be explicitly freed if they were
1289  * never used.
1290  * Called with hugetlb_lock held.
1291  */
1292 static void return_unused_surplus_pages(struct hstate *h,
1293                                         unsigned long unused_resv_pages)
1294 {
1295         unsigned long nr_pages;
1296
1297         /* Uncommit the reservation */
1298         h->resv_huge_pages -= unused_resv_pages;
1299
1300         /* Cannot return gigantic pages currently */
1301         if (hstate_is_gigantic(h))
1302                 return;
1303
1304         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1305
1306         /*
1307          * We want to release as many surplus pages as possible, spread
1308          * evenly across all nodes with memory. Iterate across these nodes
1309          * until we can no longer free unreserved surplus pages. This occurs
1310          * when the nodes with surplus pages have no free pages.
1311          * free_pool_huge_page() will balance the the freed pages across the
1312          * on-line nodes with memory and will handle the hstate accounting.
1313          */
1314         while (nr_pages--) {
1315                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1316                         break;
1317                 cond_resched_lock(&hugetlb_lock);
1318         }
1319 }
1320
1321 /*
1322  * Determine if the huge page at addr within the vma has an associated
1323  * reservation.  Where it does not we will need to logically increase
1324  * reservation and actually increase subpool usage before an allocation
1325  * can occur.  Where any new reservation would be required the
1326  * reservation change is prepared, but not committed.  Once the page
1327  * has been allocated from the subpool and instantiated the change should
1328  * be committed via vma_commit_reservation.  No action is required on
1329  * failure.
1330  */
1331 static long vma_needs_reservation(struct hstate *h,
1332                         struct vm_area_struct *vma, unsigned long addr)
1333 {
1334         struct resv_map *resv;
1335         pgoff_t idx;
1336         long chg;
1337
1338         resv = vma_resv_map(vma);
1339         if (!resv)
1340                 return 1;
1341
1342         idx = vma_hugecache_offset(h, vma, addr);
1343         chg = region_chg(resv, idx, idx + 1);
1344
1345         if (vma->vm_flags & VM_MAYSHARE)
1346                 return chg;
1347         else
1348                 return chg < 0 ? chg : 0;
1349 }
1350 static void vma_commit_reservation(struct hstate *h,
1351                         struct vm_area_struct *vma, unsigned long addr)
1352 {
1353         struct resv_map *resv;
1354         pgoff_t idx;
1355
1356         resv = vma_resv_map(vma);
1357         if (!resv)
1358                 return;
1359
1360         idx = vma_hugecache_offset(h, vma, addr);
1361         region_add(resv, idx, idx + 1);
1362 }
1363
1364 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1365                                     unsigned long addr, int avoid_reserve)
1366 {
1367         struct hugepage_subpool *spool = subpool_vma(vma);
1368         struct hstate *h = hstate_vma(vma);
1369         struct page *page;
1370         long chg;
1371         int ret, idx;
1372         struct hugetlb_cgroup *h_cg;
1373
1374         idx = hstate_index(h);
1375         /*
1376          * Processes that did not create the mapping will have no
1377          * reserves and will not have accounted against subpool
1378          * limit. Check that the subpool limit can be made before
1379          * satisfying the allocation MAP_NORESERVE mappings may also
1380          * need pages and subpool limit allocated allocated if no reserve
1381          * mapping overlaps.
1382          */
1383         chg = vma_needs_reservation(h, vma, addr);
1384         if (chg < 0)
1385                 return ERR_PTR(-ENOMEM);
1386         if (chg || avoid_reserve)
1387                 if (hugepage_subpool_get_pages(spool, 1))
1388                         return ERR_PTR(-ENOSPC);
1389
1390         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1391         if (ret)
1392                 goto out_subpool_put;
1393
1394         spin_lock(&hugetlb_lock);
1395         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1396         if (!page) {
1397                 spin_unlock(&hugetlb_lock);
1398                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1399                 if (!page)
1400                         goto out_uncharge_cgroup;
1401
1402                 spin_lock(&hugetlb_lock);
1403                 list_move(&page->lru, &h->hugepage_activelist);
1404                 /* Fall through */
1405         }
1406         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1407         spin_unlock(&hugetlb_lock);
1408
1409         set_page_private(page, (unsigned long)spool);
1410
1411         vma_commit_reservation(h, vma, addr);
1412         return page;
1413
1414 out_uncharge_cgroup:
1415         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1416 out_subpool_put:
1417         if (chg || avoid_reserve)
1418                 hugepage_subpool_put_pages(spool, 1);
1419         return ERR_PTR(-ENOSPC);
1420 }
1421
1422 /*
1423  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1424  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1425  * where no ERR_VALUE is expected to be returned.
1426  */
1427 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1428                                 unsigned long addr, int avoid_reserve)
1429 {
1430         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1431         if (IS_ERR(page))
1432                 page = NULL;
1433         return page;
1434 }
1435
1436 int __weak alloc_bootmem_huge_page(struct hstate *h)
1437 {
1438         struct huge_bootmem_page *m;
1439         int nr_nodes, node;
1440
1441         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1442                 void *addr;
1443
1444                 addr = memblock_virt_alloc_try_nid_nopanic(
1445                                 huge_page_size(h), huge_page_size(h),
1446                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1447                 if (addr) {
1448                         /*
1449                          * Use the beginning of the huge page to store the
1450                          * huge_bootmem_page struct (until gather_bootmem
1451                          * puts them into the mem_map).
1452                          */
1453                         m = addr;
1454                         goto found;
1455                 }
1456         }
1457         return 0;
1458
1459 found:
1460         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1461         /* Put them into a private list first because mem_map is not up yet */
1462         list_add(&m->list, &huge_boot_pages);
1463         m->hstate = h;
1464         return 1;
1465 }
1466
1467 static void __init prep_compound_huge_page(struct page *page, int order)
1468 {
1469         if (unlikely(order > (MAX_ORDER - 1)))
1470                 prep_compound_gigantic_page(page, order);
1471         else
1472                 prep_compound_page(page, order);
1473 }
1474
1475 /* Put bootmem huge pages into the standard lists after mem_map is up */
1476 static void __init gather_bootmem_prealloc(void)
1477 {
1478         struct huge_bootmem_page *m;
1479
1480         list_for_each_entry(m, &huge_boot_pages, list) {
1481                 struct hstate *h = m->hstate;
1482                 struct page *page;
1483
1484 #ifdef CONFIG_HIGHMEM
1485                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1486                 memblock_free_late(__pa(m),
1487                                    sizeof(struct huge_bootmem_page));
1488 #else
1489                 page = virt_to_page(m);
1490 #endif
1491                 WARN_ON(page_count(page) != 1);
1492                 prep_compound_huge_page(page, h->order);
1493                 WARN_ON(PageReserved(page));
1494                 prep_new_huge_page(h, page, page_to_nid(page));
1495                 /*
1496                  * If we had gigantic hugepages allocated at boot time, we need
1497                  * to restore the 'stolen' pages to totalram_pages in order to
1498                  * fix confusing memory reports from free(1) and another
1499                  * side-effects, like CommitLimit going negative.
1500                  */
1501                 if (hstate_is_gigantic(h))
1502                         adjust_managed_page_count(page, 1 << h->order);
1503         }
1504 }
1505
1506 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1507 {
1508         unsigned long i;
1509
1510         for (i = 0; i < h->max_huge_pages; ++i) {
1511                 if (hstate_is_gigantic(h)) {
1512                         if (!alloc_bootmem_huge_page(h))
1513                                 break;
1514                 } else if (!alloc_fresh_huge_page(h,
1515                                          &node_states[N_MEMORY]))
1516                         break;
1517         }
1518         h->max_huge_pages = i;
1519 }
1520
1521 static void __init hugetlb_init_hstates(void)
1522 {
1523         struct hstate *h;
1524
1525         for_each_hstate(h) {
1526                 /* oversize hugepages were init'ed in early boot */
1527                 if (!hstate_is_gigantic(h))
1528                         hugetlb_hstate_alloc_pages(h);
1529         }
1530 }
1531
1532 static char * __init memfmt(char *buf, unsigned long n)
1533 {
1534         if (n >= (1UL << 30))
1535                 sprintf(buf, "%lu GB", n >> 30);
1536         else if (n >= (1UL << 20))
1537                 sprintf(buf, "%lu MB", n >> 20);
1538         else
1539                 sprintf(buf, "%lu KB", n >> 10);
1540         return buf;
1541 }
1542
1543 static void __init report_hugepages(void)
1544 {
1545         struct hstate *h;
1546
1547         for_each_hstate(h) {
1548                 char buf[32];
1549                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1550                         memfmt(buf, huge_page_size(h)),
1551                         h->free_huge_pages);
1552         }
1553 }
1554
1555 #ifdef CONFIG_HIGHMEM
1556 static void try_to_free_low(struct hstate *h, unsigned long count,
1557                                                 nodemask_t *nodes_allowed)
1558 {
1559         int i;
1560
1561         if (hstate_is_gigantic(h))
1562                 return;
1563
1564         for_each_node_mask(i, *nodes_allowed) {
1565                 struct page *page, *next;
1566                 struct list_head *freel = &h->hugepage_freelists[i];
1567                 list_for_each_entry_safe(page, next, freel, lru) {
1568                         if (count >= h->nr_huge_pages)
1569                                 return;
1570                         if (PageHighMem(page))
1571                                 continue;
1572                         list_del(&page->lru);
1573                         update_and_free_page(h, page);
1574                         h->free_huge_pages--;
1575                         h->free_huge_pages_node[page_to_nid(page)]--;
1576                 }
1577         }
1578 }
1579 #else
1580 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1581                                                 nodemask_t *nodes_allowed)
1582 {
1583 }
1584 #endif
1585
1586 /*
1587  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1588  * balanced by operating on them in a round-robin fashion.
1589  * Returns 1 if an adjustment was made.
1590  */
1591 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1592                                 int delta)
1593 {
1594         int nr_nodes, node;
1595
1596         VM_BUG_ON(delta != -1 && delta != 1);
1597
1598         if (delta < 0) {
1599                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1600                         if (h->surplus_huge_pages_node[node])
1601                                 goto found;
1602                 }
1603         } else {
1604                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1605                         if (h->surplus_huge_pages_node[node] <
1606                                         h->nr_huge_pages_node[node])
1607                                 goto found;
1608                 }
1609         }
1610         return 0;
1611
1612 found:
1613         h->surplus_huge_pages += delta;
1614         h->surplus_huge_pages_node[node] += delta;
1615         return 1;
1616 }
1617
1618 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1619 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1620                                                 nodemask_t *nodes_allowed)
1621 {
1622         unsigned long min_count, ret;
1623
1624         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1625                 return h->max_huge_pages;
1626
1627         /*
1628          * Increase the pool size
1629          * First take pages out of surplus state.  Then make up the
1630          * remaining difference by allocating fresh huge pages.
1631          *
1632          * We might race with alloc_buddy_huge_page() here and be unable
1633          * to convert a surplus huge page to a normal huge page. That is
1634          * not critical, though, it just means the overall size of the
1635          * pool might be one hugepage larger than it needs to be, but
1636          * within all the constraints specified by the sysctls.
1637          */
1638         spin_lock(&hugetlb_lock);
1639         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1640                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1641                         break;
1642         }
1643
1644         while (count > persistent_huge_pages(h)) {
1645                 /*
1646                  * If this allocation races such that we no longer need the
1647                  * page, free_huge_page will handle it by freeing the page
1648                  * and reducing the surplus.
1649                  */
1650                 spin_unlock(&hugetlb_lock);
1651                 if (hstate_is_gigantic(h))
1652                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1653                 else
1654                         ret = alloc_fresh_huge_page(h, nodes_allowed);
1655                 spin_lock(&hugetlb_lock);
1656                 if (!ret)
1657                         goto out;
1658
1659                 /* Bail for signals. Probably ctrl-c from user */
1660                 if (signal_pending(current))
1661                         goto out;
1662         }
1663
1664         /*
1665          * Decrease the pool size
1666          * First return free pages to the buddy allocator (being careful
1667          * to keep enough around to satisfy reservations).  Then place
1668          * pages into surplus state as needed so the pool will shrink
1669          * to the desired size as pages become free.
1670          *
1671          * By placing pages into the surplus state independent of the
1672          * overcommit value, we are allowing the surplus pool size to
1673          * exceed overcommit. There are few sane options here. Since
1674          * alloc_buddy_huge_page() is checking the global counter,
1675          * though, we'll note that we're not allowed to exceed surplus
1676          * and won't grow the pool anywhere else. Not until one of the
1677          * sysctls are changed, or the surplus pages go out of use.
1678          */
1679         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1680         min_count = max(count, min_count);
1681         try_to_free_low(h, min_count, nodes_allowed);
1682         while (min_count < persistent_huge_pages(h)) {
1683                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1684                         break;
1685                 cond_resched_lock(&hugetlb_lock);
1686         }
1687         while (count < persistent_huge_pages(h)) {
1688                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1689                         break;
1690         }
1691 out:
1692         ret = persistent_huge_pages(h);
1693         spin_unlock(&hugetlb_lock);
1694         return ret;
1695 }
1696
1697 #define HSTATE_ATTR_RO(_name) \
1698         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1699
1700 #define HSTATE_ATTR(_name) \
1701         static struct kobj_attribute _name##_attr = \
1702                 __ATTR(_name, 0644, _name##_show, _name##_store)
1703
1704 static struct kobject *hugepages_kobj;
1705 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1706
1707 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1708
1709 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1710 {
1711         int i;
1712
1713         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1714                 if (hstate_kobjs[i] == kobj) {
1715                         if (nidp)
1716                                 *nidp = NUMA_NO_NODE;
1717                         return &hstates[i];
1718                 }
1719
1720         return kobj_to_node_hstate(kobj, nidp);
1721 }
1722
1723 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1724                                         struct kobj_attribute *attr, char *buf)
1725 {
1726         struct hstate *h;
1727         unsigned long nr_huge_pages;
1728         int nid;
1729
1730         h = kobj_to_hstate(kobj, &nid);
1731         if (nid == NUMA_NO_NODE)
1732                 nr_huge_pages = h->nr_huge_pages;
1733         else
1734                 nr_huge_pages = h->nr_huge_pages_node[nid];
1735
1736         return sprintf(buf, "%lu\n", nr_huge_pages);
1737 }
1738
1739 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1740                                            struct hstate *h, int nid,
1741                                            unsigned long count, size_t len)
1742 {
1743         int err;
1744         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1745
1746         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
1747                 err = -EINVAL;
1748                 goto out;
1749         }
1750
1751         if (nid == NUMA_NO_NODE) {
1752                 /*
1753                  * global hstate attribute
1754                  */
1755                 if (!(obey_mempolicy &&
1756                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1757                         NODEMASK_FREE(nodes_allowed);
1758                         nodes_allowed = &node_states[N_MEMORY];
1759                 }
1760         } else if (nodes_allowed) {
1761                 /*
1762                  * per node hstate attribute: adjust count to global,
1763                  * but restrict alloc/free to the specified node.
1764                  */
1765                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1766                 init_nodemask_of_node(nodes_allowed, nid);
1767         } else
1768                 nodes_allowed = &node_states[N_MEMORY];
1769
1770         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1771
1772         if (nodes_allowed != &node_states[N_MEMORY])
1773                 NODEMASK_FREE(nodes_allowed);
1774
1775         return len;
1776 out:
1777         NODEMASK_FREE(nodes_allowed);
1778         return err;
1779 }
1780
1781 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1782                                          struct kobject *kobj, const char *buf,
1783                                          size_t len)
1784 {
1785         struct hstate *h;
1786         unsigned long count;
1787         int nid;
1788         int err;
1789
1790         err = kstrtoul(buf, 10, &count);
1791         if (err)
1792                 return err;
1793
1794         h = kobj_to_hstate(kobj, &nid);
1795         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1796 }
1797
1798 static ssize_t nr_hugepages_show(struct kobject *kobj,
1799                                        struct kobj_attribute *attr, char *buf)
1800 {
1801         return nr_hugepages_show_common(kobj, attr, buf);
1802 }
1803
1804 static ssize_t nr_hugepages_store(struct kobject *kobj,
1805                struct kobj_attribute *attr, const char *buf, size_t len)
1806 {
1807         return nr_hugepages_store_common(false, kobj, buf, len);
1808 }
1809 HSTATE_ATTR(nr_hugepages);
1810
1811 #ifdef CONFIG_NUMA
1812
1813 /*
1814  * hstate attribute for optionally mempolicy-based constraint on persistent
1815  * huge page alloc/free.
1816  */
1817 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1818                                        struct kobj_attribute *attr, char *buf)
1819 {
1820         return nr_hugepages_show_common(kobj, attr, buf);
1821 }
1822
1823 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1824                struct kobj_attribute *attr, const char *buf, size_t len)
1825 {
1826         return nr_hugepages_store_common(true, kobj, buf, len);
1827 }
1828 HSTATE_ATTR(nr_hugepages_mempolicy);
1829 #endif
1830
1831
1832 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1833                                         struct kobj_attribute *attr, char *buf)
1834 {
1835         struct hstate *h = kobj_to_hstate(kobj, NULL);
1836         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1837 }
1838
1839 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1840                 struct kobj_attribute *attr, const char *buf, size_t count)
1841 {
1842         int err;
1843         unsigned long input;
1844         struct hstate *h = kobj_to_hstate(kobj, NULL);
1845
1846         if (hstate_is_gigantic(h))
1847                 return -EINVAL;
1848
1849         err = kstrtoul(buf, 10, &input);
1850         if (err)
1851                 return err;
1852
1853         spin_lock(&hugetlb_lock);
1854         h->nr_overcommit_huge_pages = input;
1855         spin_unlock(&hugetlb_lock);
1856
1857         return count;
1858 }
1859 HSTATE_ATTR(nr_overcommit_hugepages);
1860
1861 static ssize_t free_hugepages_show(struct kobject *kobj,
1862                                         struct kobj_attribute *attr, char *buf)
1863 {
1864         struct hstate *h;
1865         unsigned long free_huge_pages;
1866         int nid;
1867
1868         h = kobj_to_hstate(kobj, &nid);
1869         if (nid == NUMA_NO_NODE)
1870                 free_huge_pages = h->free_huge_pages;
1871         else
1872                 free_huge_pages = h->free_huge_pages_node[nid];
1873
1874         return sprintf(buf, "%lu\n", free_huge_pages);
1875 }
1876 HSTATE_ATTR_RO(free_hugepages);
1877
1878 static ssize_t resv_hugepages_show(struct kobject *kobj,
1879                                         struct kobj_attribute *attr, char *buf)
1880 {
1881         struct hstate *h = kobj_to_hstate(kobj, NULL);
1882         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1883 }
1884 HSTATE_ATTR_RO(resv_hugepages);
1885
1886 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1887                                         struct kobj_attribute *attr, char *buf)
1888 {
1889         struct hstate *h;
1890         unsigned long surplus_huge_pages;
1891         int nid;
1892
1893         h = kobj_to_hstate(kobj, &nid);
1894         if (nid == NUMA_NO_NODE)
1895                 surplus_huge_pages = h->surplus_huge_pages;
1896         else
1897                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1898
1899         return sprintf(buf, "%lu\n", surplus_huge_pages);
1900 }
1901 HSTATE_ATTR_RO(surplus_hugepages);
1902
1903 static struct attribute *hstate_attrs[] = {
1904         &nr_hugepages_attr.attr,
1905         &nr_overcommit_hugepages_attr.attr,
1906         &free_hugepages_attr.attr,
1907         &resv_hugepages_attr.attr,
1908         &surplus_hugepages_attr.attr,
1909 #ifdef CONFIG_NUMA
1910         &nr_hugepages_mempolicy_attr.attr,
1911 #endif
1912         NULL,
1913 };
1914
1915 static struct attribute_group hstate_attr_group = {
1916         .attrs = hstate_attrs,
1917 };
1918
1919 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1920                                     struct kobject **hstate_kobjs,
1921                                     struct attribute_group *hstate_attr_group)
1922 {
1923         int retval;
1924         int hi = hstate_index(h);
1925
1926         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1927         if (!hstate_kobjs[hi])
1928                 return -ENOMEM;
1929
1930         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1931         if (retval)
1932                 kobject_put(hstate_kobjs[hi]);
1933
1934         return retval;
1935 }
1936
1937 static void __init hugetlb_sysfs_init(void)
1938 {
1939         struct hstate *h;
1940         int err;
1941
1942         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1943         if (!hugepages_kobj)
1944                 return;
1945
1946         for_each_hstate(h) {
1947                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1948                                          hstate_kobjs, &hstate_attr_group);
1949                 if (err)
1950                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
1951         }
1952 }
1953
1954 #ifdef CONFIG_NUMA
1955
1956 /*
1957  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1958  * with node devices in node_devices[] using a parallel array.  The array
1959  * index of a node device or _hstate == node id.
1960  * This is here to avoid any static dependency of the node device driver, in
1961  * the base kernel, on the hugetlb module.
1962  */
1963 struct node_hstate {
1964         struct kobject          *hugepages_kobj;
1965         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1966 };
1967 struct node_hstate node_hstates[MAX_NUMNODES];
1968
1969 /*
1970  * A subset of global hstate attributes for node devices
1971  */
1972 static struct attribute *per_node_hstate_attrs[] = {
1973         &nr_hugepages_attr.attr,
1974         &free_hugepages_attr.attr,
1975         &surplus_hugepages_attr.attr,
1976         NULL,
1977 };
1978
1979 static struct attribute_group per_node_hstate_attr_group = {
1980         .attrs = per_node_hstate_attrs,
1981 };
1982
1983 /*
1984  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1985  * Returns node id via non-NULL nidp.
1986  */
1987 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1988 {
1989         int nid;
1990
1991         for (nid = 0; nid < nr_node_ids; nid++) {
1992                 struct node_hstate *nhs = &node_hstates[nid];
1993                 int i;
1994                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1995                         if (nhs->hstate_kobjs[i] == kobj) {
1996                                 if (nidp)
1997                                         *nidp = nid;
1998                                 return &hstates[i];
1999                         }
2000         }
2001
2002         BUG();
2003         return NULL;
2004 }
2005
2006 /*
2007  * Unregister hstate attributes from a single node device.
2008  * No-op if no hstate attributes attached.
2009  */
2010 static void hugetlb_unregister_node(struct node *node)
2011 {
2012         struct hstate *h;
2013         struct node_hstate *nhs = &node_hstates[node->dev.id];
2014
2015         if (!nhs->hugepages_kobj)
2016                 return;         /* no hstate attributes */
2017
2018         for_each_hstate(h) {
2019                 int idx = hstate_index(h);
2020                 if (nhs->hstate_kobjs[idx]) {
2021                         kobject_put(nhs->hstate_kobjs[idx]);
2022                         nhs->hstate_kobjs[idx] = NULL;
2023                 }
2024         }
2025
2026         kobject_put(nhs->hugepages_kobj);
2027         nhs->hugepages_kobj = NULL;
2028 }
2029
2030 /*
2031  * hugetlb module exit:  unregister hstate attributes from node devices
2032  * that have them.
2033  */
2034 static void hugetlb_unregister_all_nodes(void)
2035 {
2036         int nid;
2037
2038         /*
2039          * disable node device registrations.
2040          */
2041         register_hugetlbfs_with_node(NULL, NULL);
2042
2043         /*
2044          * remove hstate attributes from any nodes that have them.
2045          */
2046         for (nid = 0; nid < nr_node_ids; nid++)
2047                 hugetlb_unregister_node(node_devices[nid]);
2048 }
2049
2050 /*
2051  * Register hstate attributes for a single node device.
2052  * No-op if attributes already registered.
2053  */
2054 static void hugetlb_register_node(struct node *node)
2055 {
2056         struct hstate *h;
2057         struct node_hstate *nhs = &node_hstates[node->dev.id];
2058         int err;
2059
2060         if (nhs->hugepages_kobj)
2061                 return;         /* already allocated */
2062
2063         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2064                                                         &node->dev.kobj);
2065         if (!nhs->hugepages_kobj)
2066                 return;
2067
2068         for_each_hstate(h) {
2069                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2070                                                 nhs->hstate_kobjs,
2071                                                 &per_node_hstate_attr_group);
2072                 if (err) {
2073                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2074                                 h->name, node->dev.id);
2075                         hugetlb_unregister_node(node);
2076                         break;
2077                 }
2078         }
2079 }
2080
2081 /*
2082  * hugetlb init time:  register hstate attributes for all registered node
2083  * devices of nodes that have memory.  All on-line nodes should have
2084  * registered their associated device by this time.
2085  */
2086 static void __init hugetlb_register_all_nodes(void)
2087 {
2088         int nid;
2089
2090         for_each_node_state(nid, N_MEMORY) {
2091                 struct node *node = node_devices[nid];
2092                 if (node->dev.id == nid)
2093                         hugetlb_register_node(node);
2094         }
2095
2096         /*
2097          * Let the node device driver know we're here so it can
2098          * [un]register hstate attributes on node hotplug.
2099          */
2100         register_hugetlbfs_with_node(hugetlb_register_node,
2101                                      hugetlb_unregister_node);
2102 }
2103 #else   /* !CONFIG_NUMA */
2104
2105 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2106 {
2107         BUG();
2108         if (nidp)
2109                 *nidp = -1;
2110         return NULL;
2111 }
2112
2113 static void hugetlb_unregister_all_nodes(void) { }
2114
2115 static void hugetlb_register_all_nodes(void) { }
2116
2117 #endif
2118
2119 static void __exit hugetlb_exit(void)
2120 {
2121         struct hstate *h;
2122
2123         hugetlb_unregister_all_nodes();
2124
2125         for_each_hstate(h) {
2126                 kobject_put(hstate_kobjs[hstate_index(h)]);
2127         }
2128
2129         kobject_put(hugepages_kobj);
2130         kfree(htlb_fault_mutex_table);
2131 }
2132 module_exit(hugetlb_exit);
2133
2134 static int __init hugetlb_init(void)
2135 {
2136         int i;
2137
2138         if (!hugepages_supported())
2139                 return 0;
2140
2141         if (!size_to_hstate(default_hstate_size)) {
2142                 default_hstate_size = HPAGE_SIZE;
2143                 if (!size_to_hstate(default_hstate_size))
2144                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2145         }
2146         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2147         if (default_hstate_max_huge_pages)
2148                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2149
2150         hugetlb_init_hstates();
2151         gather_bootmem_prealloc();
2152         report_hugepages();
2153
2154         hugetlb_sysfs_init();
2155         hugetlb_register_all_nodes();
2156         hugetlb_cgroup_file_init();
2157
2158 #ifdef CONFIG_SMP
2159         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2160 #else
2161         num_fault_mutexes = 1;
2162 #endif
2163         htlb_fault_mutex_table =
2164                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2165         BUG_ON(!htlb_fault_mutex_table);
2166
2167         for (i = 0; i < num_fault_mutexes; i++)
2168                 mutex_init(&htlb_fault_mutex_table[i]);
2169         return 0;
2170 }
2171 module_init(hugetlb_init);
2172
2173 /* Should be called on processing a hugepagesz=... option */
2174 void __init hugetlb_add_hstate(unsigned order)
2175 {
2176         struct hstate *h;
2177         unsigned long i;
2178
2179         if (size_to_hstate(PAGE_SIZE << order)) {
2180                 pr_warning("hugepagesz= specified twice, ignoring\n");
2181                 return;
2182         }
2183         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2184         BUG_ON(order == 0);
2185         h = &hstates[hugetlb_max_hstate++];
2186         h->order = order;
2187         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2188         h->nr_huge_pages = 0;
2189         h->free_huge_pages = 0;
2190         for (i = 0; i < MAX_NUMNODES; ++i)
2191                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2192         INIT_LIST_HEAD(&h->hugepage_activelist);
2193         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2194         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2195         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2196                                         huge_page_size(h)/1024);
2197
2198         parsed_hstate = h;
2199 }
2200
2201 static int __init hugetlb_nrpages_setup(char *s)
2202 {
2203         unsigned long *mhp;
2204         static unsigned long *last_mhp;
2205
2206         /*
2207          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2208          * so this hugepages= parameter goes to the "default hstate".
2209          */
2210         if (!hugetlb_max_hstate)
2211                 mhp = &default_hstate_max_huge_pages;
2212         else
2213                 mhp = &parsed_hstate->max_huge_pages;
2214
2215         if (mhp == last_mhp) {
2216                 pr_warning("hugepages= specified twice without "
2217                            "interleaving hugepagesz=, ignoring\n");
2218                 return 1;
2219         }
2220
2221         if (sscanf(s, "%lu", mhp) <= 0)
2222                 *mhp = 0;
2223
2224         /*
2225          * Global state is always initialized later in hugetlb_init.
2226          * But we need to allocate >= MAX_ORDER hstates here early to still
2227          * use the bootmem allocator.
2228          */
2229         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2230                 hugetlb_hstate_alloc_pages(parsed_hstate);
2231
2232         last_mhp = mhp;
2233
2234         return 1;
2235 }
2236 __setup("hugepages=", hugetlb_nrpages_setup);
2237
2238 static int __init hugetlb_default_setup(char *s)
2239 {
2240         default_hstate_size = memparse(s, &s);
2241         return 1;
2242 }
2243 __setup("default_hugepagesz=", hugetlb_default_setup);
2244
2245 static unsigned int cpuset_mems_nr(unsigned int *array)
2246 {
2247         int node;
2248         unsigned int nr = 0;
2249
2250         for_each_node_mask(node, cpuset_current_mems_allowed)
2251                 nr += array[node];
2252
2253         return nr;
2254 }
2255
2256 #ifdef CONFIG_SYSCTL
2257 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2258                          struct ctl_table *table, int write,
2259                          void __user *buffer, size_t *length, loff_t *ppos)
2260 {
2261         struct hstate *h = &default_hstate;
2262         unsigned long tmp = h->max_huge_pages;
2263         int ret;
2264
2265         if (!hugepages_supported())
2266                 return -ENOTSUPP;
2267
2268         table->data = &tmp;
2269         table->maxlen = sizeof(unsigned long);
2270         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2271         if (ret)
2272                 goto out;
2273
2274         if (write)
2275                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2276                                                   NUMA_NO_NODE, tmp, *length);
2277 out:
2278         return ret;
2279 }
2280
2281 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2282                           void __user *buffer, size_t *length, loff_t *ppos)
2283 {
2284
2285         return hugetlb_sysctl_handler_common(false, table, write,
2286                                                         buffer, length, ppos);
2287 }
2288
2289 #ifdef CONFIG_NUMA
2290 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2291                           void __user *buffer, size_t *length, loff_t *ppos)
2292 {
2293         return hugetlb_sysctl_handler_common(true, table, write,
2294                                                         buffer, length, ppos);
2295 }
2296 #endif /* CONFIG_NUMA */
2297
2298 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2299                         void __user *buffer,
2300                         size_t *length, loff_t *ppos)
2301 {
2302         struct hstate *h = &default_hstate;
2303         unsigned long tmp;
2304         int ret;
2305
2306         if (!hugepages_supported())
2307                 return -ENOTSUPP;
2308
2309         tmp = h->nr_overcommit_huge_pages;
2310
2311         if (write && hstate_is_gigantic(h))
2312                 return -EINVAL;
2313
2314         table->data = &tmp;
2315         table->maxlen = sizeof(unsigned long);
2316         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2317         if (ret)
2318                 goto out;
2319
2320         if (write) {
2321                 spin_lock(&hugetlb_lock);
2322                 h->nr_overcommit_huge_pages = tmp;
2323                 spin_unlock(&hugetlb_lock);
2324         }
2325 out:
2326         return ret;
2327 }
2328
2329 #endif /* CONFIG_SYSCTL */
2330
2331 void hugetlb_report_meminfo(struct seq_file *m)
2332 {
2333         struct hstate *h = &default_hstate;
2334         if (!hugepages_supported())
2335                 return;
2336         seq_printf(m,
2337                         "HugePages_Total:   %5lu\n"
2338                         "HugePages_Free:    %5lu\n"
2339                         "HugePages_Rsvd:    %5lu\n"
2340                         "HugePages_Surp:    %5lu\n"
2341                         "Hugepagesize:   %8lu kB\n",
2342                         h->nr_huge_pages,
2343                         h->free_huge_pages,
2344                         h->resv_huge_pages,
2345                         h->surplus_huge_pages,
2346                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2347 }
2348
2349 int hugetlb_report_node_meminfo(int nid, char *buf)
2350 {
2351         struct hstate *h = &default_hstate;
2352         if (!hugepages_supported())
2353                 return 0;
2354         return sprintf(buf,
2355                 "Node %d HugePages_Total: %5u\n"
2356                 "Node %d HugePages_Free:  %5u\n"
2357                 "Node %d HugePages_Surp:  %5u\n",
2358                 nid, h->nr_huge_pages_node[nid],
2359                 nid, h->free_huge_pages_node[nid],
2360                 nid, h->surplus_huge_pages_node[nid]);
2361 }
2362
2363 void hugetlb_show_meminfo(void)
2364 {
2365         struct hstate *h;
2366         int nid;
2367
2368         if (!hugepages_supported())
2369                 return;
2370
2371         for_each_node_state(nid, N_MEMORY)
2372                 for_each_hstate(h)
2373                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2374                                 nid,
2375                                 h->nr_huge_pages_node[nid],
2376                                 h->free_huge_pages_node[nid],
2377                                 h->surplus_huge_pages_node[nid],
2378                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2379 }
2380
2381 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2382 unsigned long hugetlb_total_pages(void)
2383 {
2384         struct hstate *h;
2385         unsigned long nr_total_pages = 0;
2386
2387         for_each_hstate(h)
2388                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2389         return nr_total_pages;
2390 }
2391
2392 static int hugetlb_acct_memory(struct hstate *h, long delta)
2393 {
2394         int ret = -ENOMEM;
2395
2396         spin_lock(&hugetlb_lock);
2397         /*
2398          * When cpuset is configured, it breaks the strict hugetlb page
2399          * reservation as the accounting is done on a global variable. Such
2400          * reservation is completely rubbish in the presence of cpuset because
2401          * the reservation is not checked against page availability for the
2402          * current cpuset. Application can still potentially OOM'ed by kernel
2403          * with lack of free htlb page in cpuset that the task is in.
2404          * Attempt to enforce strict accounting with cpuset is almost
2405          * impossible (or too ugly) because cpuset is too fluid that
2406          * task or memory node can be dynamically moved between cpusets.
2407          *
2408          * The change of semantics for shared hugetlb mapping with cpuset is
2409          * undesirable. However, in order to preserve some of the semantics,
2410          * we fall back to check against current free page availability as
2411          * a best attempt and hopefully to minimize the impact of changing
2412          * semantics that cpuset has.
2413          */
2414         if (delta > 0) {
2415                 if (gather_surplus_pages(h, delta) < 0)
2416                         goto out;
2417
2418                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2419                         return_unused_surplus_pages(h, delta);
2420                         goto out;
2421                 }
2422         }
2423
2424         ret = 0;
2425         if (delta < 0)
2426                 return_unused_surplus_pages(h, (unsigned long) -delta);
2427
2428 out:
2429         spin_unlock(&hugetlb_lock);
2430         return ret;
2431 }
2432
2433 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2434 {
2435         struct resv_map *resv = vma_resv_map(vma);
2436
2437         /*
2438          * This new VMA should share its siblings reservation map if present.
2439          * The VMA will only ever have a valid reservation map pointer where
2440          * it is being copied for another still existing VMA.  As that VMA
2441          * has a reference to the reservation map it cannot disappear until
2442          * after this open call completes.  It is therefore safe to take a
2443          * new reference here without additional locking.
2444          */
2445         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2446                 kref_get(&resv->refs);
2447 }
2448
2449 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2450 {
2451         struct hstate *h = hstate_vma(vma);
2452         struct resv_map *resv = vma_resv_map(vma);
2453         struct hugepage_subpool *spool = subpool_vma(vma);
2454         unsigned long reserve, start, end;
2455
2456         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2457                 return;
2458
2459         start = vma_hugecache_offset(h, vma, vma->vm_start);
2460         end = vma_hugecache_offset(h, vma, vma->vm_end);
2461
2462         reserve = (end - start) - region_count(resv, start, end);
2463
2464         kref_put(&resv->refs, resv_map_release);
2465
2466         if (reserve) {
2467                 hugetlb_acct_memory(h, -reserve);
2468                 hugepage_subpool_put_pages(spool, reserve);
2469         }
2470 }
2471
2472 /*
2473  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2474  * handle_mm_fault() to try to instantiate regular-sized pages in the
2475  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2476  * this far.
2477  */
2478 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2479 {
2480         BUG();
2481         return 0;
2482 }
2483
2484 const struct vm_operations_struct hugetlb_vm_ops = {
2485         .fault = hugetlb_vm_op_fault,
2486         .open = hugetlb_vm_op_open,
2487         .close = hugetlb_vm_op_close,
2488 };
2489
2490 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2491                                 int writable)
2492 {
2493         pte_t entry;
2494
2495         if (writable) {
2496                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2497                                          vma->vm_page_prot)));
2498         } else {
2499                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2500                                            vma->vm_page_prot));
2501         }
2502         entry = pte_mkyoung(entry);
2503         entry = pte_mkhuge(entry);
2504         entry = arch_make_huge_pte(entry, vma, page, writable);
2505
2506         return entry;
2507 }
2508
2509 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2510                                    unsigned long address, pte_t *ptep)
2511 {
2512         pte_t entry;
2513
2514         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2515         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2516                 update_mmu_cache(vma, address, ptep);
2517 }
2518
2519 static int is_hugetlb_entry_migration(pte_t pte)
2520 {
2521         swp_entry_t swp;
2522
2523         if (huge_pte_none(pte) || pte_present(pte))
2524                 return 0;
2525         swp = pte_to_swp_entry(pte);
2526         if (non_swap_entry(swp) && is_migration_entry(swp))
2527                 return 1;
2528         else
2529                 return 0;
2530 }
2531
2532 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2533 {
2534         swp_entry_t swp;
2535
2536         if (huge_pte_none(pte) || pte_present(pte))
2537                 return 0;
2538         swp = pte_to_swp_entry(pte);
2539         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2540                 return 1;
2541         else
2542                 return 0;
2543 }
2544
2545 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2546                             struct vm_area_struct *vma)
2547 {
2548         pte_t *src_pte, *dst_pte, entry;
2549         struct page *ptepage;
2550         unsigned long addr;
2551         int cow;
2552         struct hstate *h = hstate_vma(vma);
2553         unsigned long sz = huge_page_size(h);
2554         unsigned long mmun_start;       /* For mmu_notifiers */
2555         unsigned long mmun_end;         /* For mmu_notifiers */
2556         int ret = 0;
2557
2558         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2559
2560         mmun_start = vma->vm_start;
2561         mmun_end = vma->vm_end;
2562         if (cow)
2563                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2564
2565         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2566                 spinlock_t *src_ptl, *dst_ptl;
2567                 src_pte = huge_pte_offset(src, addr);
2568                 if (!src_pte)
2569                         continue;
2570                 dst_pte = huge_pte_alloc(dst, addr, sz);
2571                 if (!dst_pte) {
2572                         ret = -ENOMEM;
2573                         break;
2574                 }
2575
2576                 /* If the pagetables are shared don't copy or take references */
2577                 if (dst_pte == src_pte)
2578                         continue;
2579
2580                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2581                 src_ptl = huge_pte_lockptr(h, src, src_pte);
2582                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2583                 entry = huge_ptep_get(src_pte);
2584                 if (huge_pte_none(entry)) { /* skip none entry */
2585                         ;
2586                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2587                                     is_hugetlb_entry_hwpoisoned(entry))) {
2588                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
2589
2590                         if (is_write_migration_entry(swp_entry) && cow) {
2591                                 /*
2592                                  * COW mappings require pages in both
2593                                  * parent and child to be set to read.
2594                                  */
2595                                 make_migration_entry_read(&swp_entry);
2596                                 entry = swp_entry_to_pte(swp_entry);
2597                                 set_huge_pte_at(src, addr, src_pte, entry);
2598                         }
2599                         set_huge_pte_at(dst, addr, dst_pte, entry);
2600                 } else {
2601                         if (cow)
2602                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2603                         entry = huge_ptep_get(src_pte);
2604                         ptepage = pte_page(entry);
2605                         get_page(ptepage);
2606                         page_dup_rmap(ptepage);
2607                         set_huge_pte_at(dst, addr, dst_pte, entry);
2608                 }
2609                 spin_unlock(src_ptl);
2610                 spin_unlock(dst_ptl);
2611         }
2612
2613         if (cow)
2614                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2615
2616         return ret;
2617 }
2618
2619 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2620                             unsigned long start, unsigned long end,
2621                             struct page *ref_page)
2622 {
2623         int force_flush = 0;
2624         struct mm_struct *mm = vma->vm_mm;
2625         unsigned long address;
2626         pte_t *ptep;
2627         pte_t pte;
2628         spinlock_t *ptl;
2629         struct page *page;
2630         struct hstate *h = hstate_vma(vma);
2631         unsigned long sz = huge_page_size(h);
2632         const unsigned long mmun_start = start; /* For mmu_notifiers */
2633         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2634
2635         WARN_ON(!is_vm_hugetlb_page(vma));
2636         BUG_ON(start & ~huge_page_mask(h));
2637         BUG_ON(end & ~huge_page_mask(h));
2638
2639         tlb_start_vma(tlb, vma);
2640         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2641         address = start;
2642 again:
2643         for (; address < end; address += sz) {
2644                 ptep = huge_pte_offset(mm, address);
2645                 if (!ptep)
2646                         continue;
2647
2648                 ptl = huge_pte_lock(h, mm, ptep);
2649                 if (huge_pmd_unshare(mm, &address, ptep))
2650                         goto unlock;
2651
2652                 pte = huge_ptep_get(ptep);
2653                 if (huge_pte_none(pte))
2654                         goto unlock;
2655
2656                 /*
2657                  * HWPoisoned hugepage is already unmapped and dropped reference
2658                  */
2659                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2660                         huge_pte_clear(mm, address, ptep);
2661                         goto unlock;
2662                 }
2663
2664                 page = pte_page(pte);
2665                 /*
2666                  * If a reference page is supplied, it is because a specific
2667                  * page is being unmapped, not a range. Ensure the page we
2668                  * are about to unmap is the actual page of interest.
2669                  */
2670                 if (ref_page) {
2671                         if (page != ref_page)
2672                                 goto unlock;
2673
2674                         /*
2675                          * Mark the VMA as having unmapped its page so that
2676                          * future faults in this VMA will fail rather than
2677                          * looking like data was lost
2678                          */
2679                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2680                 }
2681
2682                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2683                 tlb_remove_tlb_entry(tlb, ptep, address);
2684                 if (huge_pte_dirty(pte))
2685                         set_page_dirty(page);
2686
2687                 page_remove_rmap(page);
2688                 force_flush = !__tlb_remove_page(tlb, page);
2689                 if (force_flush) {
2690                         address += sz;
2691                         spin_unlock(ptl);
2692                         break;
2693                 }
2694                 /* Bail out after unmapping reference page if supplied */
2695                 if (ref_page) {
2696                         spin_unlock(ptl);
2697                         break;
2698                 }
2699 unlock:
2700                 spin_unlock(ptl);
2701         }
2702         /*
2703          * mmu_gather ran out of room to batch pages, we break out of
2704          * the PTE lock to avoid doing the potential expensive TLB invalidate
2705          * and page-free while holding it.
2706          */
2707         if (force_flush) {
2708                 force_flush = 0;
2709                 tlb_flush_mmu(tlb);
2710                 if (address < end && !ref_page)
2711                         goto again;
2712         }
2713         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2714         tlb_end_vma(tlb, vma);
2715 }
2716
2717 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2718                           struct vm_area_struct *vma, unsigned long start,
2719                           unsigned long end, struct page *ref_page)
2720 {
2721         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2722
2723         /*
2724          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2725          * test will fail on a vma being torn down, and not grab a page table
2726          * on its way out.  We're lucky that the flag has such an appropriate
2727          * name, and can in fact be safely cleared here. We could clear it
2728          * before the __unmap_hugepage_range above, but all that's necessary
2729          * is to clear it before releasing the i_mmap_rwsem. This works
2730          * because in the context this is called, the VMA is about to be
2731          * destroyed and the i_mmap_rwsem is held.
2732          */
2733         vma->vm_flags &= ~VM_MAYSHARE;
2734 }
2735
2736 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2737                           unsigned long end, struct page *ref_page)
2738 {
2739         struct mm_struct *mm;
2740         struct mmu_gather tlb;
2741
2742         mm = vma->vm_mm;
2743
2744         tlb_gather_mmu(&tlb, mm, start, end);
2745         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2746         tlb_finish_mmu(&tlb, start, end);
2747 }
2748
2749 /*
2750  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2751  * mappping it owns the reserve page for. The intention is to unmap the page
2752  * from other VMAs and let the children be SIGKILLed if they are faulting the
2753  * same region.
2754  */
2755 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2756                               struct page *page, unsigned long address)
2757 {
2758         struct hstate *h = hstate_vma(vma);
2759         struct vm_area_struct *iter_vma;
2760         struct address_space *mapping;
2761         pgoff_t pgoff;
2762
2763         /*
2764          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2765          * from page cache lookup which is in HPAGE_SIZE units.
2766          */
2767         address = address & huge_page_mask(h);
2768         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2769                         vma->vm_pgoff;
2770         mapping = file_inode(vma->vm_file)->i_mapping;
2771
2772         /*
2773          * Take the mapping lock for the duration of the table walk. As
2774          * this mapping should be shared between all the VMAs,
2775          * __unmap_hugepage_range() is called as the lock is already held
2776          */
2777         i_mmap_lock_write(mapping);
2778         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2779                 /* Do not unmap the current VMA */
2780                 if (iter_vma == vma)
2781                         continue;
2782
2783                 /*
2784                  * Unmap the page from other VMAs without their own reserves.
2785                  * They get marked to be SIGKILLed if they fault in these
2786                  * areas. This is because a future no-page fault on this VMA
2787                  * could insert a zeroed page instead of the data existing
2788                  * from the time of fork. This would look like data corruption
2789                  */
2790                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2791                         unmap_hugepage_range(iter_vma, address,
2792                                              address + huge_page_size(h), page);
2793         }
2794         i_mmap_unlock_write(mapping);
2795 }
2796
2797 /*
2798  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2799  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2800  * cannot race with other handlers or page migration.
2801  * Keep the pte_same checks anyway to make transition from the mutex easier.
2802  */
2803 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2804                         unsigned long address, pte_t *ptep, pte_t pte,
2805                         struct page *pagecache_page, spinlock_t *ptl)
2806 {
2807         struct hstate *h = hstate_vma(vma);
2808         struct page *old_page, *new_page;
2809         int ret = 0, outside_reserve = 0;
2810         unsigned long mmun_start;       /* For mmu_notifiers */
2811         unsigned long mmun_end;         /* For mmu_notifiers */
2812
2813         old_page = pte_page(pte);
2814
2815 retry_avoidcopy:
2816         /* If no-one else is actually using this page, avoid the copy
2817          * and just make the page writable */
2818         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2819                 page_move_anon_rmap(old_page, vma, address);
2820                 set_huge_ptep_writable(vma, address, ptep);
2821                 return 0;
2822         }
2823
2824         /*
2825          * If the process that created a MAP_PRIVATE mapping is about to
2826          * perform a COW due to a shared page count, attempt to satisfy
2827          * the allocation without using the existing reserves. The pagecache
2828          * page is used to determine if the reserve at this address was
2829          * consumed or not. If reserves were used, a partial faulted mapping
2830          * at the time of fork() could consume its reserves on COW instead
2831          * of the full address range.
2832          */
2833         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2834                         old_page != pagecache_page)
2835                 outside_reserve = 1;
2836
2837         page_cache_get(old_page);
2838
2839         /*
2840          * Drop page table lock as buddy allocator may be called. It will
2841          * be acquired again before returning to the caller, as expected.
2842          */
2843         spin_unlock(ptl);
2844         new_page = alloc_huge_page(vma, address, outside_reserve);
2845
2846         if (IS_ERR(new_page)) {
2847                 /*
2848                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2849                  * it is due to references held by a child and an insufficient
2850                  * huge page pool. To guarantee the original mappers
2851                  * reliability, unmap the page from child processes. The child
2852                  * may get SIGKILLed if it later faults.
2853                  */
2854                 if (outside_reserve) {
2855                         page_cache_release(old_page);
2856                         BUG_ON(huge_pte_none(pte));
2857                         unmap_ref_private(mm, vma, old_page, address);
2858                         BUG_ON(huge_pte_none(pte));
2859                         spin_lock(ptl);
2860                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2861                         if (likely(ptep &&
2862                                    pte_same(huge_ptep_get(ptep), pte)))
2863                                 goto retry_avoidcopy;
2864                         /*
2865                          * race occurs while re-acquiring page table
2866                          * lock, and our job is done.
2867                          */
2868                         return 0;
2869                 }
2870
2871                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
2872                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
2873                 goto out_release_old;
2874         }
2875
2876         /*
2877          * When the original hugepage is shared one, it does not have
2878          * anon_vma prepared.
2879          */
2880         if (unlikely(anon_vma_prepare(vma))) {
2881                 ret = VM_FAULT_OOM;
2882                 goto out_release_all;
2883         }
2884
2885         copy_user_huge_page(new_page, old_page, address, vma,
2886                             pages_per_huge_page(h));
2887         __SetPageUptodate(new_page);
2888
2889         mmun_start = address & huge_page_mask(h);
2890         mmun_end = mmun_start + huge_page_size(h);
2891         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2892
2893         /*
2894          * Retake the page table lock to check for racing updates
2895          * before the page tables are altered
2896          */
2897         spin_lock(ptl);
2898         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2899         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
2900                 ClearPagePrivate(new_page);
2901
2902                 /* Break COW */
2903                 huge_ptep_clear_flush(vma, address, ptep);
2904                 set_huge_pte_at(mm, address, ptep,
2905                                 make_huge_pte(vma, new_page, 1));
2906                 page_remove_rmap(old_page);
2907                 hugepage_add_new_anon_rmap(new_page, vma, address);
2908                 /* Make the old page be freed below */
2909                 new_page = old_page;
2910         }
2911         spin_unlock(ptl);
2912         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2913 out_release_all:
2914         page_cache_release(new_page);
2915 out_release_old:
2916         page_cache_release(old_page);
2917
2918         spin_lock(ptl); /* Caller expects lock to be held */
2919         return ret;
2920 }
2921
2922 /* Return the pagecache page at a given address within a VMA */
2923 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2924                         struct vm_area_struct *vma, unsigned long address)
2925 {
2926         struct address_space *mapping;
2927         pgoff_t idx;
2928
2929         mapping = vma->vm_file->f_mapping;
2930         idx = vma_hugecache_offset(h, vma, address);
2931
2932         return find_lock_page(mapping, idx);
2933 }
2934
2935 /*
2936  * Return whether there is a pagecache page to back given address within VMA.
2937  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2938  */
2939 static bool hugetlbfs_pagecache_present(struct hstate *h,
2940                         struct vm_area_struct *vma, unsigned long address)
2941 {
2942         struct address_space *mapping;
2943         pgoff_t idx;
2944         struct page *page;
2945
2946         mapping = vma->vm_file->f_mapping;
2947         idx = vma_hugecache_offset(h, vma, address);
2948
2949         page = find_get_page(mapping, idx);
2950         if (page)
2951                 put_page(page);
2952         return page != NULL;
2953 }
2954
2955 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2956                            struct address_space *mapping, pgoff_t idx,
2957                            unsigned long address, pte_t *ptep, unsigned int flags)
2958 {
2959         struct hstate *h = hstate_vma(vma);
2960         int ret = VM_FAULT_SIGBUS;
2961         int anon_rmap = 0;
2962         unsigned long size;
2963         struct page *page;
2964         pte_t new_pte;
2965         spinlock_t *ptl;
2966
2967         /*
2968          * Currently, we are forced to kill the process in the event the
2969          * original mapper has unmapped pages from the child due to a failed
2970          * COW. Warn that such a situation has occurred as it may not be obvious
2971          */
2972         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2973                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
2974                            current->pid);
2975                 return ret;
2976         }
2977
2978         /*
2979          * Use page lock to guard against racing truncation
2980          * before we get page_table_lock.
2981          */
2982 retry:
2983         page = find_lock_page(mapping, idx);
2984         if (!page) {
2985                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2986                 if (idx >= size)
2987                         goto out;
2988                 page = alloc_huge_page(vma, address, 0);
2989                 if (IS_ERR(page)) {
2990                         ret = PTR_ERR(page);
2991                         if (ret == -ENOMEM)
2992                                 ret = VM_FAULT_OOM;
2993                         else
2994                                 ret = VM_FAULT_SIGBUS;
2995                         goto out;
2996                 }
2997                 clear_huge_page(page, address, pages_per_huge_page(h));
2998                 __SetPageUptodate(page);
2999
3000                 if (vma->vm_flags & VM_MAYSHARE) {
3001                         int err;
3002                         struct inode *inode = mapping->host;
3003
3004                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3005                         if (err) {
3006                                 put_page(page);
3007                                 if (err == -EEXIST)
3008                                         goto retry;
3009                                 goto out;
3010                         }
3011                         ClearPagePrivate(page);
3012
3013                         spin_lock(&inode->i_lock);
3014                         inode->i_blocks += blocks_per_huge_page(h);
3015                         spin_unlock(&inode->i_lock);
3016                 } else {
3017                         lock_page(page);
3018                         if (unlikely(anon_vma_prepare(vma))) {
3019                                 ret = VM_FAULT_OOM;
3020                                 goto backout_unlocked;
3021                         }
3022                         anon_rmap = 1;
3023                 }
3024         } else {
3025                 /*
3026                  * If memory error occurs between mmap() and fault, some process
3027                  * don't have hwpoisoned swap entry for errored virtual address.
3028                  * So we need to block hugepage fault by PG_hwpoison bit check.
3029                  */
3030                 if (unlikely(PageHWPoison(page))) {
3031                         ret = VM_FAULT_HWPOISON |
3032                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3033                         goto backout_unlocked;
3034                 }
3035         }
3036
3037         /*
3038          * If we are going to COW a private mapping later, we examine the
3039          * pending reservations for this page now. This will ensure that
3040          * any allocations necessary to record that reservation occur outside
3041          * the spinlock.
3042          */
3043         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3044                 if (vma_needs_reservation(h, vma, address) < 0) {
3045                         ret = VM_FAULT_OOM;
3046                         goto backout_unlocked;
3047                 }
3048
3049         ptl = huge_pte_lockptr(h, mm, ptep);
3050         spin_lock(ptl);
3051         size = i_size_read(mapping->host) >> huge_page_shift(h);
3052         if (idx >= size)
3053                 goto backout;
3054
3055         ret = 0;
3056         if (!huge_pte_none(huge_ptep_get(ptep)))
3057                 goto backout;
3058
3059         if (anon_rmap) {
3060                 ClearPagePrivate(page);
3061                 hugepage_add_new_anon_rmap(page, vma, address);
3062         } else
3063                 page_dup_rmap(page);
3064         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3065                                 && (vma->vm_flags & VM_SHARED)));
3066         set_huge_pte_at(mm, address, ptep, new_pte);
3067
3068         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3069                 /* Optimization, do the COW without a second fault */
3070                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3071         }
3072
3073         spin_unlock(ptl);
3074         unlock_page(page);
3075 out:
3076         return ret;
3077
3078 backout:
3079         spin_unlock(ptl);
3080 backout_unlocked:
3081         unlock_page(page);
3082         put_page(page);
3083         goto out;
3084 }
3085
3086 #ifdef CONFIG_SMP
3087 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3088                             struct vm_area_struct *vma,
3089                             struct address_space *mapping,
3090                             pgoff_t idx, unsigned long address)
3091 {
3092         unsigned long key[2];
3093         u32 hash;
3094
3095         if (vma->vm_flags & VM_SHARED) {
3096                 key[0] = (unsigned long) mapping;
3097                 key[1] = idx;
3098         } else {
3099                 key[0] = (unsigned long) mm;
3100                 key[1] = address >> huge_page_shift(h);
3101         }
3102
3103         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3104
3105         return hash & (num_fault_mutexes - 1);
3106 }
3107 #else
3108 /*
3109  * For uniprocesor systems we always use a single mutex, so just
3110  * return 0 and avoid the hashing overhead.
3111  */
3112 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3113                             struct vm_area_struct *vma,
3114                             struct address_space *mapping,
3115                             pgoff_t idx, unsigned long address)
3116 {
3117         return 0;
3118 }
3119 #endif
3120
3121 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3122                         unsigned long address, unsigned int flags)
3123 {
3124         pte_t *ptep, entry;
3125         spinlock_t *ptl;
3126         int ret;
3127         u32 hash;
3128         pgoff_t idx;
3129         struct page *page = NULL;
3130         struct page *pagecache_page = NULL;
3131         struct hstate *h = hstate_vma(vma);
3132         struct address_space *mapping;
3133
3134         address &= huge_page_mask(h);
3135
3136         ptep = huge_pte_offset(mm, address);
3137         if (ptep) {
3138                 entry = huge_ptep_get(ptep);
3139                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3140                         migration_entry_wait_huge(vma, mm, ptep);
3141                         return 0;
3142                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3143                         return VM_FAULT_HWPOISON_LARGE |
3144                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3145         }
3146
3147         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3148         if (!ptep)
3149                 return VM_FAULT_OOM;
3150
3151         mapping = vma->vm_file->f_mapping;
3152         idx = vma_hugecache_offset(h, vma, address);
3153
3154         /*
3155          * Serialize hugepage allocation and instantiation, so that we don't
3156          * get spurious allocation failures if two CPUs race to instantiate
3157          * the same page in the page cache.
3158          */
3159         hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3160         mutex_lock(&htlb_fault_mutex_table[hash]);
3161
3162         entry = huge_ptep_get(ptep);
3163         if (huge_pte_none(entry)) {
3164                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3165                 goto out_mutex;
3166         }
3167
3168         ret = 0;
3169
3170         /*
3171          * If we are going to COW the mapping later, we examine the pending
3172          * reservations for this page now. This will ensure that any
3173          * allocations necessary to record that reservation occur outside the
3174          * spinlock. For private mappings, we also lookup the pagecache
3175          * page now as it is used to determine if a reservation has been
3176          * consumed.
3177          */
3178         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3179                 if (vma_needs_reservation(h, vma, address) < 0) {
3180                         ret = VM_FAULT_OOM;
3181                         goto out_mutex;
3182                 }
3183
3184                 if (!(vma->vm_flags & VM_MAYSHARE))
3185                         pagecache_page = hugetlbfs_pagecache_page(h,
3186                                                                 vma, address);
3187         }
3188
3189         /*
3190          * hugetlb_cow() requires page locks of pte_page(entry) and
3191          * pagecache_page, so here we need take the former one
3192          * when page != pagecache_page or !pagecache_page.
3193          * Note that locking order is always pagecache_page -> page,
3194          * so no worry about deadlock.
3195          */
3196         page = pte_page(entry);
3197         get_page(page);
3198         if (page != pagecache_page)
3199                 lock_page(page);
3200
3201         ptl = huge_pte_lockptr(h, mm, ptep);
3202         spin_lock(ptl);
3203         /* Check for a racing update before calling hugetlb_cow */
3204         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3205                 goto out_ptl;
3206
3207
3208         if (flags & FAULT_FLAG_WRITE) {
3209                 if (!huge_pte_write(entry)) {
3210                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3211                                         pagecache_page, ptl);
3212                         goto out_ptl;
3213                 }
3214                 entry = huge_pte_mkdirty(entry);
3215         }
3216         entry = pte_mkyoung(entry);
3217         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3218                                                 flags & FAULT_FLAG_WRITE))
3219                 update_mmu_cache(vma, address, ptep);
3220
3221 out_ptl:
3222         spin_unlock(ptl);
3223
3224         if (pagecache_page) {
3225                 unlock_page(pagecache_page);
3226                 put_page(pagecache_page);
3227         }
3228         if (page != pagecache_page)
3229                 unlock_page(page);
3230         put_page(page);
3231
3232 out_mutex:
3233         mutex_unlock(&htlb_fault_mutex_table[hash]);
3234         return ret;
3235 }
3236
3237 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3238                          struct page **pages, struct vm_area_struct **vmas,
3239                          unsigned long *position, unsigned long *nr_pages,
3240                          long i, unsigned int flags)
3241 {
3242         unsigned long pfn_offset;
3243         unsigned long vaddr = *position;
3244         unsigned long remainder = *nr_pages;
3245         struct hstate *h = hstate_vma(vma);
3246
3247         while (vaddr < vma->vm_end && remainder) {
3248                 pte_t *pte;
3249                 spinlock_t *ptl = NULL;
3250                 int absent;
3251                 struct page *page;
3252
3253                 /*
3254                  * Some archs (sparc64, sh*) have multiple pte_ts to
3255                  * each hugepage.  We have to make sure we get the
3256                  * first, for the page indexing below to work.
3257                  *
3258                  * Note that page table lock is not held when pte is null.
3259                  */
3260                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3261                 if (pte)
3262                         ptl = huge_pte_lock(h, mm, pte);
3263                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3264
3265                 /*
3266                  * When coredumping, it suits get_dump_page if we just return
3267                  * an error where there's an empty slot with no huge pagecache
3268                  * to back it.  This way, we avoid allocating a hugepage, and
3269                  * the sparse dumpfile avoids allocating disk blocks, but its
3270                  * huge holes still show up with zeroes where they need to be.
3271                  */
3272                 if (absent && (flags & FOLL_DUMP) &&
3273                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3274                         if (pte)
3275                                 spin_unlock(ptl);
3276                         remainder = 0;
3277                         break;
3278                 }
3279
3280                 /*
3281                  * We need call hugetlb_fault for both hugepages under migration
3282                  * (in which case hugetlb_fault waits for the migration,) and
3283                  * hwpoisoned hugepages (in which case we need to prevent the
3284                  * caller from accessing to them.) In order to do this, we use
3285                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3286                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3287                  * both cases, and because we can't follow correct pages
3288                  * directly from any kind of swap entries.
3289                  */
3290                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3291                     ((flags & FOLL_WRITE) &&
3292                       !huge_pte_write(huge_ptep_get(pte)))) {
3293                         int ret;
3294
3295                         if (pte)
3296                                 spin_unlock(ptl);
3297                         ret = hugetlb_fault(mm, vma, vaddr,
3298                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3299                         if (!(ret & VM_FAULT_ERROR))
3300                                 continue;
3301
3302                         remainder = 0;
3303                         break;
3304                 }
3305
3306                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3307                 page = pte_page(huge_ptep_get(pte));
3308 same_page:
3309                 if (pages) {
3310                         pages[i] = mem_map_offset(page, pfn_offset);
3311                         get_page_foll(pages[i]);
3312                 }
3313
3314                 if (vmas)
3315                         vmas[i] = vma;
3316
3317                 vaddr += PAGE_SIZE;
3318                 ++pfn_offset;
3319                 --remainder;
3320                 ++i;
3321                 if (vaddr < vma->vm_end && remainder &&
3322                                 pfn_offset < pages_per_huge_page(h)) {
3323                         /*
3324                          * We use pfn_offset to avoid touching the pageframes
3325                          * of this compound page.
3326                          */
3327                         goto same_page;
3328                 }
3329                 spin_unlock(ptl);
3330         }
3331         *nr_pages = remainder;
3332         *position = vaddr;
3333
3334         return i ? i : -EFAULT;
3335 }
3336
3337 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3338                 unsigned long address, unsigned long end, pgprot_t newprot)
3339 {
3340         struct mm_struct *mm = vma->vm_mm;
3341         unsigned long start = address;
3342         pte_t *ptep;
3343         pte_t pte;
3344         struct hstate *h = hstate_vma(vma);
3345         unsigned long pages = 0;
3346
3347         BUG_ON(address >= end);
3348         flush_cache_range(vma, address, end);
3349
3350         mmu_notifier_invalidate_range_start(mm, start, end);
3351         i_mmap_lock_write(vma->vm_file->f_mapping);
3352         for (; address < end; address += huge_page_size(h)) {
3353                 spinlock_t *ptl;
3354                 ptep = huge_pte_offset(mm, address);
3355                 if (!ptep)
3356                         continue;
3357                 ptl = huge_pte_lock(h, mm, ptep);
3358                 if (huge_pmd_unshare(mm, &address, ptep)) {
3359                         pages++;
3360                         spin_unlock(ptl);
3361                         continue;
3362                 }
3363                 if (!huge_pte_none(huge_ptep_get(ptep))) {
3364                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3365                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3366                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3367                         set_huge_pte_at(mm, address, ptep, pte);
3368                         pages++;
3369                 }
3370                 spin_unlock(ptl);
3371         }
3372         /*
3373          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3374          * may have cleared our pud entry and done put_page on the page table:
3375          * once we release i_mmap_rwsem, another task can do the final put_page
3376          * and that page table be reused and filled with junk.
3377          */
3378         flush_tlb_range(vma, start, end);
3379         i_mmap_unlock_write(vma->vm_file->f_mapping);
3380         mmu_notifier_invalidate_range_end(mm, start, end);
3381
3382         return pages << h->order;
3383 }
3384
3385 int hugetlb_reserve_pages(struct inode *inode,
3386                                         long from, long to,
3387                                         struct vm_area_struct *vma,
3388                                         vm_flags_t vm_flags)
3389 {
3390         long ret, chg;
3391         struct hstate *h = hstate_inode(inode);
3392         struct hugepage_subpool *spool = subpool_inode(inode);
3393         struct resv_map *resv_map;
3394
3395         /*
3396          * Only apply hugepage reservation if asked. At fault time, an
3397          * attempt will be made for VM_NORESERVE to allocate a page
3398          * without using reserves
3399          */
3400         if (vm_flags & VM_NORESERVE)
3401                 return 0;
3402
3403         /*
3404          * Shared mappings base their reservation on the number of pages that
3405          * are already allocated on behalf of the file. Private mappings need
3406          * to reserve the full area even if read-only as mprotect() may be
3407          * called to make the mapping read-write. Assume !vma is a shm mapping
3408          */
3409         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3410                 resv_map = inode_resv_map(inode);
3411
3412                 chg = region_chg(resv_map, from, to);
3413
3414         } else {
3415                 resv_map = resv_map_alloc();
3416                 if (!resv_map)
3417                         return -ENOMEM;
3418
3419                 chg = to - from;
3420
3421                 set_vma_resv_map(vma, resv_map);
3422                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3423         }
3424
3425         if (chg < 0) {
3426                 ret = chg;
3427                 goto out_err;
3428         }
3429
3430         /* There must be enough pages in the subpool for the mapping */
3431         if (hugepage_subpool_get_pages(spool, chg)) {
3432                 ret = -ENOSPC;
3433                 goto out_err;
3434         }
3435
3436         /*
3437          * Check enough hugepages are available for the reservation.
3438          * Hand the pages back to the subpool if there are not
3439          */
3440         ret = hugetlb_acct_memory(h, chg);
3441         if (ret < 0) {
3442                 hugepage_subpool_put_pages(spool, chg);
3443                 goto out_err;
3444         }
3445
3446         /*
3447          * Account for the reservations made. Shared mappings record regions
3448          * that have reservations as they are shared by multiple VMAs.
3449          * When the last VMA disappears, the region map says how much
3450          * the reservation was and the page cache tells how much of
3451          * the reservation was consumed. Private mappings are per-VMA and
3452          * only the consumed reservations are tracked. When the VMA
3453          * disappears, the original reservation is the VMA size and the
3454          * consumed reservations are stored in the map. Hence, nothing
3455          * else has to be done for private mappings here
3456          */
3457         if (!vma || vma->vm_flags & VM_MAYSHARE)
3458                 region_add(resv_map, from, to);
3459         return 0;
3460 out_err:
3461         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3462                 kref_put(&resv_map->refs, resv_map_release);
3463         return ret;
3464 }
3465
3466 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3467 {
3468         struct hstate *h = hstate_inode(inode);
3469         struct resv_map *resv_map = inode_resv_map(inode);
3470         long chg = 0;
3471         struct hugepage_subpool *spool = subpool_inode(inode);
3472
3473         if (resv_map)
3474                 chg = region_truncate(resv_map, offset);
3475         spin_lock(&inode->i_lock);
3476         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3477         spin_unlock(&inode->i_lock);
3478
3479         hugepage_subpool_put_pages(spool, (chg - freed));
3480         hugetlb_acct_memory(h, -(chg - freed));
3481 }
3482
3483 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3484 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3485                                 struct vm_area_struct *vma,
3486                                 unsigned long addr, pgoff_t idx)
3487 {
3488         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3489                                 svma->vm_start;
3490         unsigned long sbase = saddr & PUD_MASK;
3491         unsigned long s_end = sbase + PUD_SIZE;
3492
3493         /* Allow segments to share if only one is marked locked */
3494         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3495         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3496
3497         /*
3498          * match the virtual addresses, permission and the alignment of the
3499          * page table page.
3500          */
3501         if (pmd_index(addr) != pmd_index(saddr) ||
3502             vm_flags != svm_flags ||
3503             sbase < svma->vm_start || svma->vm_end < s_end)
3504                 return 0;
3505
3506         return saddr;
3507 }
3508
3509 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3510 {
3511         unsigned long base = addr & PUD_MASK;
3512         unsigned long end = base + PUD_SIZE;
3513
3514         /*
3515          * check on proper vm_flags and page table alignment
3516          */
3517         if (vma->vm_flags & VM_MAYSHARE &&
3518             vma->vm_start <= base && end <= vma->vm_end)
3519                 return 1;
3520         return 0;
3521 }
3522
3523 /*
3524  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3525  * and returns the corresponding pte. While this is not necessary for the
3526  * !shared pmd case because we can allocate the pmd later as well, it makes the
3527  * code much cleaner. pmd allocation is essential for the shared case because
3528  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3529  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3530  * bad pmd for sharing.
3531  */
3532 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3533 {
3534         struct vm_area_struct *vma = find_vma(mm, addr);
3535         struct address_space *mapping = vma->vm_file->f_mapping;
3536         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3537                         vma->vm_pgoff;
3538         struct vm_area_struct *svma;
3539         unsigned long saddr;
3540         pte_t *spte = NULL;
3541         pte_t *pte;
3542         spinlock_t *ptl;
3543
3544         if (!vma_shareable(vma, addr))
3545                 return (pte_t *)pmd_alloc(mm, pud, addr);
3546
3547         i_mmap_lock_write(mapping);
3548         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3549                 if (svma == vma)
3550                         continue;
3551
3552                 saddr = page_table_shareable(svma, vma, addr, idx);
3553                 if (saddr) {
3554                         spte = huge_pte_offset(svma->vm_mm, saddr);
3555                         if (spte) {
3556                                 get_page(virt_to_page(spte));
3557                                 break;
3558                         }
3559                 }
3560         }
3561
3562         if (!spte)
3563                 goto out;
3564
3565         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3566         spin_lock(ptl);
3567         if (pud_none(*pud))
3568                 pud_populate(mm, pud,
3569                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3570         else
3571                 put_page(virt_to_page(spte));
3572         spin_unlock(ptl);
3573 out:
3574         pte = (pte_t *)pmd_alloc(mm, pud, addr);
3575         i_mmap_unlock_write(mapping);
3576         return pte;
3577 }
3578
3579 /*
3580  * unmap huge page backed by shared pte.
3581  *
3582  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3583  * indicated by page_count > 1, unmap is achieved by clearing pud and
3584  * decrementing the ref count. If count == 1, the pte page is not shared.
3585  *
3586  * called with page table lock held.
3587  *
3588  * returns: 1 successfully unmapped a shared pte page
3589  *          0 the underlying pte page is not shared, or it is the last user
3590  */
3591 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3592 {
3593         pgd_t *pgd = pgd_offset(mm, *addr);
3594         pud_t *pud = pud_offset(pgd, *addr);
3595
3596         BUG_ON(page_count(virt_to_page(ptep)) == 0);
3597         if (page_count(virt_to_page(ptep)) == 1)
3598                 return 0;
3599
3600         pud_clear(pud);
3601         put_page(virt_to_page(ptep));
3602         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3603         return 1;
3604 }
3605 #define want_pmd_share()        (1)
3606 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3607 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3608 {
3609         return NULL;
3610 }
3611 #define want_pmd_share()        (0)
3612 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3613
3614 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3615 pte_t *huge_pte_alloc(struct mm_struct *mm,
3616                         unsigned long addr, unsigned long sz)
3617 {
3618         pgd_t *pgd;
3619         pud_t *pud;
3620         pte_t *pte = NULL;
3621
3622         pgd = pgd_offset(mm, addr);
3623         pud = pud_alloc(mm, pgd, addr);
3624         if (pud) {
3625                 if (sz == PUD_SIZE) {
3626                         pte = (pte_t *)pud;
3627                 } else {
3628                         BUG_ON(sz != PMD_SIZE);
3629                         if (want_pmd_share() && pud_none(*pud))
3630                                 pte = huge_pmd_share(mm, addr, pud);
3631                         else
3632                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3633                 }
3634         }
3635         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3636
3637         return pte;
3638 }
3639
3640 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3641 {
3642         pgd_t *pgd;
3643         pud_t *pud;
3644         pmd_t *pmd = NULL;
3645
3646         pgd = pgd_offset(mm, addr);
3647         if (pgd_present(*pgd)) {
3648                 pud = pud_offset(pgd, addr);
3649                 if (pud_present(*pud)) {
3650                         if (pud_huge(*pud))
3651                                 return (pte_t *)pud;
3652                         pmd = pmd_offset(pud, addr);
3653                 }
3654         }
3655         return (pte_t *) pmd;
3656 }
3657
3658 struct page *
3659 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3660                 pmd_t *pmd, int write)
3661 {
3662         struct page *page;
3663
3664         page = pte_page(*(pte_t *)pmd);
3665         if (page)
3666                 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
3667         return page;
3668 }
3669
3670 struct page *
3671 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3672                 pud_t *pud, int write)
3673 {
3674         struct page *page;
3675
3676         page = pte_page(*(pte_t *)pud);
3677         if (page)
3678                 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
3679         return page;
3680 }
3681
3682 #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3683
3684 /* Can be overriden by architectures */
3685 struct page * __weak
3686 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3687                pud_t *pud, int write)
3688 {
3689         BUG();
3690         return NULL;
3691 }
3692
3693 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3694
3695 #ifdef CONFIG_MEMORY_FAILURE
3696
3697 /* Should be called in hugetlb_lock */
3698 static int is_hugepage_on_freelist(struct page *hpage)
3699 {
3700         struct page *page;
3701         struct page *tmp;
3702         struct hstate *h = page_hstate(hpage);
3703         int nid = page_to_nid(hpage);
3704
3705         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3706                 if (page == hpage)
3707                         return 1;
3708         return 0;
3709 }
3710
3711 /*
3712  * This function is called from memory failure code.
3713  * Assume the caller holds page lock of the head page.
3714  */
3715 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3716 {
3717         struct hstate *h = page_hstate(hpage);
3718         int nid = page_to_nid(hpage);
3719         int ret = -EBUSY;
3720
3721         spin_lock(&hugetlb_lock);
3722         if (is_hugepage_on_freelist(hpage)) {
3723                 /*
3724                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
3725                  * but dangling hpage->lru can trigger list-debug warnings
3726                  * (this happens when we call unpoison_memory() on it),
3727                  * so let it point to itself with list_del_init().
3728                  */
3729                 list_del_init(&hpage->lru);
3730                 set_page_refcounted(hpage);
3731                 h->free_huge_pages--;
3732                 h->free_huge_pages_node[nid]--;
3733                 ret = 0;
3734         }
3735         spin_unlock(&hugetlb_lock);
3736         return ret;
3737 }
3738 #endif
3739
3740 bool isolate_huge_page(struct page *page, struct list_head *list)
3741 {
3742         VM_BUG_ON_PAGE(!PageHead(page), page);
3743         if (!get_page_unless_zero(page))
3744                 return false;
3745         spin_lock(&hugetlb_lock);
3746         list_move_tail(&page->lru, list);
3747         spin_unlock(&hugetlb_lock);
3748         return true;
3749 }
3750
3751 void putback_active_hugepage(struct page *page)
3752 {
3753         VM_BUG_ON_PAGE(!PageHead(page), page);
3754         spin_lock(&hugetlb_lock);
3755         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3756         spin_unlock(&hugetlb_lock);
3757         put_page(page);
3758 }
3759
3760 bool is_hugepage_active(struct page *page)
3761 {
3762         VM_BUG_ON_PAGE(!PageHuge(page), page);
3763         /*
3764          * This function can be called for a tail page because the caller,
3765          * scan_movable_pages, scans through a given pfn-range which typically
3766          * covers one memory block. In systems using gigantic hugepage (1GB
3767          * for x86_64,) a hugepage is larger than a memory block, and we don't
3768          * support migrating such large hugepages for now, so return false
3769          * when called for tail pages.
3770          */
3771         if (PageTail(page))
3772                 return false;
3773         /*
3774          * Refcount of a hwpoisoned hugepages is 1, but they are not active,
3775          * so we should return false for them.
3776          */
3777         if (unlikely(PageHWPoison(page)))
3778                 return false;
3779         return page_count(page) > 0;
3780 }