mm/hugetlb.c: fix error-path memory leak in nr_hugepages_store_common()
[cascardo/linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/io.h>
28
29 #include <linux/hugetlb.h>
30 #include <linux/node.h>
31 #include "internal.h"
32
33 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36
37 static int max_hstate;
38 unsigned int default_hstate_idx;
39 struct hstate hstates[HUGE_MAX_HSTATE];
40
41 __initdata LIST_HEAD(huge_boot_pages);
42
43 /* for command line parsing */
44 static struct hstate * __initdata parsed_hstate;
45 static unsigned long __initdata default_hstate_max_huge_pages;
46 static unsigned long __initdata default_hstate_size;
47
48 #define for_each_hstate(h) \
49         for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50
51 /*
52  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53  */
54 static DEFINE_SPINLOCK(hugetlb_lock);
55
56 /*
57  * Region tracking -- allows tracking of reservations and instantiated pages
58  *                    across the pages in a mapping.
59  *
60  * The region data structures are protected by a combination of the mmap_sem
61  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
62  * must either hold the mmap_sem for write, or the mmap_sem for read and
63  * the hugetlb_instantiation mutex:
64  *
65  *      down_write(&mm->mmap_sem);
66  * or
67  *      down_read(&mm->mmap_sem);
68  *      mutex_lock(&hugetlb_instantiation_mutex);
69  */
70 struct file_region {
71         struct list_head link;
72         long from;
73         long to;
74 };
75
76 static long region_add(struct list_head *head, long f, long t)
77 {
78         struct file_region *rg, *nrg, *trg;
79
80         /* Locate the region we are either in or before. */
81         list_for_each_entry(rg, head, link)
82                 if (f <= rg->to)
83                         break;
84
85         /* Round our left edge to the current segment if it encloses us. */
86         if (f > rg->from)
87                 f = rg->from;
88
89         /* Check for and consume any regions we now overlap with. */
90         nrg = rg;
91         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
92                 if (&rg->link == head)
93                         break;
94                 if (rg->from > t)
95                         break;
96
97                 /* If this area reaches higher then extend our area to
98                  * include it completely.  If this is not the first area
99                  * which we intend to reuse, free it. */
100                 if (rg->to > t)
101                         t = rg->to;
102                 if (rg != nrg) {
103                         list_del(&rg->link);
104                         kfree(rg);
105                 }
106         }
107         nrg->from = f;
108         nrg->to = t;
109         return 0;
110 }
111
112 static long region_chg(struct list_head *head, long f, long t)
113 {
114         struct file_region *rg, *nrg;
115         long chg = 0;
116
117         /* Locate the region we are before or in. */
118         list_for_each_entry(rg, head, link)
119                 if (f <= rg->to)
120                         break;
121
122         /* If we are below the current region then a new region is required.
123          * Subtle, allocate a new region at the position but make it zero
124          * size such that we can guarantee to record the reservation. */
125         if (&rg->link == head || t < rg->from) {
126                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
127                 if (!nrg)
128                         return -ENOMEM;
129                 nrg->from = f;
130                 nrg->to   = f;
131                 INIT_LIST_HEAD(&nrg->link);
132                 list_add(&nrg->link, rg->link.prev);
133
134                 return t - f;
135         }
136
137         /* Round our left edge to the current segment if it encloses us. */
138         if (f > rg->from)
139                 f = rg->from;
140         chg = t - f;
141
142         /* Check for and consume any regions we now overlap with. */
143         list_for_each_entry(rg, rg->link.prev, link) {
144                 if (&rg->link == head)
145                         break;
146                 if (rg->from > t)
147                         return chg;
148
149                 /* We overlap with this area, if it extends futher than
150                  * us then we must extend ourselves.  Account for its
151                  * existing reservation. */
152                 if (rg->to > t) {
153                         chg += rg->to - t;
154                         t = rg->to;
155                 }
156                 chg -= rg->to - rg->from;
157         }
158         return chg;
159 }
160
161 static long region_truncate(struct list_head *head, long end)
162 {
163         struct file_region *rg, *trg;
164         long chg = 0;
165
166         /* Locate the region we are either in or before. */
167         list_for_each_entry(rg, head, link)
168                 if (end <= rg->to)
169                         break;
170         if (&rg->link == head)
171                 return 0;
172
173         /* If we are in the middle of a region then adjust it. */
174         if (end > rg->from) {
175                 chg = rg->to - end;
176                 rg->to = end;
177                 rg = list_entry(rg->link.next, typeof(*rg), link);
178         }
179
180         /* Drop any remaining regions. */
181         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
182                 if (&rg->link == head)
183                         break;
184                 chg += rg->to - rg->from;
185                 list_del(&rg->link);
186                 kfree(rg);
187         }
188         return chg;
189 }
190
191 static long region_count(struct list_head *head, long f, long t)
192 {
193         struct file_region *rg;
194         long chg = 0;
195
196         /* Locate each segment we overlap with, and count that overlap. */
197         list_for_each_entry(rg, head, link) {
198                 int seg_from;
199                 int seg_to;
200
201                 if (rg->to <= f)
202                         continue;
203                 if (rg->from >= t)
204                         break;
205
206                 seg_from = max(rg->from, f);
207                 seg_to = min(rg->to, t);
208
209                 chg += seg_to - seg_from;
210         }
211
212         return chg;
213 }
214
215 /*
216  * Convert the address within this vma to the page offset within
217  * the mapping, in pagecache page units; huge pages here.
218  */
219 static pgoff_t vma_hugecache_offset(struct hstate *h,
220                         struct vm_area_struct *vma, unsigned long address)
221 {
222         return ((address - vma->vm_start) >> huge_page_shift(h)) +
223                         (vma->vm_pgoff >> huge_page_order(h));
224 }
225
226 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
227                                      unsigned long address)
228 {
229         return vma_hugecache_offset(hstate_vma(vma), vma, address);
230 }
231
232 /*
233  * Return the size of the pages allocated when backing a VMA. In the majority
234  * cases this will be same size as used by the page table entries.
235  */
236 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
237 {
238         struct hstate *hstate;
239
240         if (!is_vm_hugetlb_page(vma))
241                 return PAGE_SIZE;
242
243         hstate = hstate_vma(vma);
244
245         return 1UL << (hstate->order + PAGE_SHIFT);
246 }
247 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
248
249 /*
250  * Return the page size being used by the MMU to back a VMA. In the majority
251  * of cases, the page size used by the kernel matches the MMU size. On
252  * architectures where it differs, an architecture-specific version of this
253  * function is required.
254  */
255 #ifndef vma_mmu_pagesize
256 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
257 {
258         return vma_kernel_pagesize(vma);
259 }
260 #endif
261
262 /*
263  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
264  * bits of the reservation map pointer, which are always clear due to
265  * alignment.
266  */
267 #define HPAGE_RESV_OWNER    (1UL << 0)
268 #define HPAGE_RESV_UNMAPPED (1UL << 1)
269 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
270
271 /*
272  * These helpers are used to track how many pages are reserved for
273  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
274  * is guaranteed to have their future faults succeed.
275  *
276  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
277  * the reserve counters are updated with the hugetlb_lock held. It is safe
278  * to reset the VMA at fork() time as it is not in use yet and there is no
279  * chance of the global counters getting corrupted as a result of the values.
280  *
281  * The private mapping reservation is represented in a subtly different
282  * manner to a shared mapping.  A shared mapping has a region map associated
283  * with the underlying file, this region map represents the backing file
284  * pages which have ever had a reservation assigned which this persists even
285  * after the page is instantiated.  A private mapping has a region map
286  * associated with the original mmap which is attached to all VMAs which
287  * reference it, this region map represents those offsets which have consumed
288  * reservation ie. where pages have been instantiated.
289  */
290 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
291 {
292         return (unsigned long)vma->vm_private_data;
293 }
294
295 static void set_vma_private_data(struct vm_area_struct *vma,
296                                                         unsigned long value)
297 {
298         vma->vm_private_data = (void *)value;
299 }
300
301 struct resv_map {
302         struct kref refs;
303         struct list_head regions;
304 };
305
306 static struct resv_map *resv_map_alloc(void)
307 {
308         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
309         if (!resv_map)
310                 return NULL;
311
312         kref_init(&resv_map->refs);
313         INIT_LIST_HEAD(&resv_map->regions);
314
315         return resv_map;
316 }
317
318 static void resv_map_release(struct kref *ref)
319 {
320         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
321
322         /* Clear out any active regions before we release the map. */
323         region_truncate(&resv_map->regions, 0);
324         kfree(resv_map);
325 }
326
327 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
328 {
329         VM_BUG_ON(!is_vm_hugetlb_page(vma));
330         if (!(vma->vm_flags & VM_MAYSHARE))
331                 return (struct resv_map *)(get_vma_private_data(vma) &
332                                                         ~HPAGE_RESV_MASK);
333         return NULL;
334 }
335
336 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
337 {
338         VM_BUG_ON(!is_vm_hugetlb_page(vma));
339         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
340
341         set_vma_private_data(vma, (get_vma_private_data(vma) &
342                                 HPAGE_RESV_MASK) | (unsigned long)map);
343 }
344
345 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
346 {
347         VM_BUG_ON(!is_vm_hugetlb_page(vma));
348         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
349
350         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
351 }
352
353 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
354 {
355         VM_BUG_ON(!is_vm_hugetlb_page(vma));
356
357         return (get_vma_private_data(vma) & flag) != 0;
358 }
359
360 /* Decrement the reserved pages in the hugepage pool by one */
361 static void decrement_hugepage_resv_vma(struct hstate *h,
362                         struct vm_area_struct *vma)
363 {
364         if (vma->vm_flags & VM_NORESERVE)
365                 return;
366
367         if (vma->vm_flags & VM_MAYSHARE) {
368                 /* Shared mappings always use reserves */
369                 h->resv_huge_pages--;
370         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
371                 /*
372                  * Only the process that called mmap() has reserves for
373                  * private mappings.
374                  */
375                 h->resv_huge_pages--;
376         }
377 }
378
379 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
380 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
381 {
382         VM_BUG_ON(!is_vm_hugetlb_page(vma));
383         if (!(vma->vm_flags & VM_MAYSHARE))
384                 vma->vm_private_data = (void *)0;
385 }
386
387 /* Returns true if the VMA has associated reserve pages */
388 static int vma_has_reserves(struct vm_area_struct *vma)
389 {
390         if (vma->vm_flags & VM_MAYSHARE)
391                 return 1;
392         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
393                 return 1;
394         return 0;
395 }
396
397 static void copy_gigantic_page(struct page *dst, struct page *src)
398 {
399         int i;
400         struct hstate *h = page_hstate(src);
401         struct page *dst_base = dst;
402         struct page *src_base = src;
403
404         for (i = 0; i < pages_per_huge_page(h); ) {
405                 cond_resched();
406                 copy_highpage(dst, src);
407
408                 i++;
409                 dst = mem_map_next(dst, dst_base, i);
410                 src = mem_map_next(src, src_base, i);
411         }
412 }
413
414 void copy_huge_page(struct page *dst, struct page *src)
415 {
416         int i;
417         struct hstate *h = page_hstate(src);
418
419         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
420                 copy_gigantic_page(dst, src);
421                 return;
422         }
423
424         might_sleep();
425         for (i = 0; i < pages_per_huge_page(h); i++) {
426                 cond_resched();
427                 copy_highpage(dst + i, src + i);
428         }
429 }
430
431 static void enqueue_huge_page(struct hstate *h, struct page *page)
432 {
433         int nid = page_to_nid(page);
434         list_add(&page->lru, &h->hugepage_freelists[nid]);
435         h->free_huge_pages++;
436         h->free_huge_pages_node[nid]++;
437 }
438
439 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
440 {
441         struct page *page;
442
443         if (list_empty(&h->hugepage_freelists[nid]))
444                 return NULL;
445         page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
446         list_del(&page->lru);
447         set_page_refcounted(page);
448         h->free_huge_pages--;
449         h->free_huge_pages_node[nid]--;
450         return page;
451 }
452
453 static struct page *dequeue_huge_page_vma(struct hstate *h,
454                                 struct vm_area_struct *vma,
455                                 unsigned long address, int avoid_reserve)
456 {
457         struct page *page = NULL;
458         struct mempolicy *mpol;
459         nodemask_t *nodemask;
460         struct zonelist *zonelist;
461         struct zone *zone;
462         struct zoneref *z;
463
464         get_mems_allowed();
465         zonelist = huge_zonelist(vma, address,
466                                         htlb_alloc_mask, &mpol, &nodemask);
467         /*
468          * A child process with MAP_PRIVATE mappings created by their parent
469          * have no page reserves. This check ensures that reservations are
470          * not "stolen". The child may still get SIGKILLed
471          */
472         if (!vma_has_reserves(vma) &&
473                         h->free_huge_pages - h->resv_huge_pages == 0)
474                 goto err;
475
476         /* If reserves cannot be used, ensure enough pages are in the pool */
477         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
478                 goto err;;
479
480         for_each_zone_zonelist_nodemask(zone, z, zonelist,
481                                                 MAX_NR_ZONES - 1, nodemask) {
482                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
483                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
484                         if (page) {
485                                 if (!avoid_reserve)
486                                         decrement_hugepage_resv_vma(h, vma);
487                                 break;
488                         }
489                 }
490         }
491 err:
492         mpol_cond_put(mpol);
493         put_mems_allowed();
494         return page;
495 }
496
497 static void update_and_free_page(struct hstate *h, struct page *page)
498 {
499         int i;
500
501         VM_BUG_ON(h->order >= MAX_ORDER);
502
503         h->nr_huge_pages--;
504         h->nr_huge_pages_node[page_to_nid(page)]--;
505         for (i = 0; i < pages_per_huge_page(h); i++) {
506                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
507                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
508                                 1 << PG_private | 1<< PG_writeback);
509         }
510         set_compound_page_dtor(page, NULL);
511         set_page_refcounted(page);
512         arch_release_hugepage(page);
513         __free_pages(page, huge_page_order(h));
514 }
515
516 struct hstate *size_to_hstate(unsigned long size)
517 {
518         struct hstate *h;
519
520         for_each_hstate(h) {
521                 if (huge_page_size(h) == size)
522                         return h;
523         }
524         return NULL;
525 }
526
527 static void free_huge_page(struct page *page)
528 {
529         /*
530          * Can't pass hstate in here because it is called from the
531          * compound page destructor.
532          */
533         struct hstate *h = page_hstate(page);
534         int nid = page_to_nid(page);
535         struct address_space *mapping;
536
537         mapping = (struct address_space *) page_private(page);
538         set_page_private(page, 0);
539         page->mapping = NULL;
540         BUG_ON(page_count(page));
541         BUG_ON(page_mapcount(page));
542         INIT_LIST_HEAD(&page->lru);
543
544         spin_lock(&hugetlb_lock);
545         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
546                 update_and_free_page(h, page);
547                 h->surplus_huge_pages--;
548                 h->surplus_huge_pages_node[nid]--;
549         } else {
550                 enqueue_huge_page(h, page);
551         }
552         spin_unlock(&hugetlb_lock);
553         if (mapping)
554                 hugetlb_put_quota(mapping, 1);
555 }
556
557 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
558 {
559         set_compound_page_dtor(page, free_huge_page);
560         spin_lock(&hugetlb_lock);
561         h->nr_huge_pages++;
562         h->nr_huge_pages_node[nid]++;
563         spin_unlock(&hugetlb_lock);
564         put_page(page); /* free it into the hugepage allocator */
565 }
566
567 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
568 {
569         int i;
570         int nr_pages = 1 << order;
571         struct page *p = page + 1;
572
573         /* we rely on prep_new_huge_page to set the destructor */
574         set_compound_order(page, order);
575         __SetPageHead(page);
576         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
577                 __SetPageTail(p);
578                 p->first_page = page;
579         }
580 }
581
582 int PageHuge(struct page *page)
583 {
584         compound_page_dtor *dtor;
585
586         if (!PageCompound(page))
587                 return 0;
588
589         page = compound_head(page);
590         dtor = get_compound_page_dtor(page);
591
592         return dtor == free_huge_page;
593 }
594
595 EXPORT_SYMBOL_GPL(PageHuge);
596
597 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
598 {
599         struct page *page;
600
601         if (h->order >= MAX_ORDER)
602                 return NULL;
603
604         page = alloc_pages_exact_node(nid,
605                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
606                                                 __GFP_REPEAT|__GFP_NOWARN,
607                 huge_page_order(h));
608         if (page) {
609                 if (arch_prepare_hugepage(page)) {
610                         __free_pages(page, huge_page_order(h));
611                         return NULL;
612                 }
613                 prep_new_huge_page(h, page, nid);
614         }
615
616         return page;
617 }
618
619 /*
620  * common helper functions for hstate_next_node_to_{alloc|free}.
621  * We may have allocated or freed a huge page based on a different
622  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
623  * be outside of *nodes_allowed.  Ensure that we use an allowed
624  * node for alloc or free.
625  */
626 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
627 {
628         nid = next_node(nid, *nodes_allowed);
629         if (nid == MAX_NUMNODES)
630                 nid = first_node(*nodes_allowed);
631         VM_BUG_ON(nid >= MAX_NUMNODES);
632
633         return nid;
634 }
635
636 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
637 {
638         if (!node_isset(nid, *nodes_allowed))
639                 nid = next_node_allowed(nid, nodes_allowed);
640         return nid;
641 }
642
643 /*
644  * returns the previously saved node ["this node"] from which to
645  * allocate a persistent huge page for the pool and advance the
646  * next node from which to allocate, handling wrap at end of node
647  * mask.
648  */
649 static int hstate_next_node_to_alloc(struct hstate *h,
650                                         nodemask_t *nodes_allowed)
651 {
652         int nid;
653
654         VM_BUG_ON(!nodes_allowed);
655
656         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
657         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
658
659         return nid;
660 }
661
662 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
663 {
664         struct page *page;
665         int start_nid;
666         int next_nid;
667         int ret = 0;
668
669         start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
670         next_nid = start_nid;
671
672         do {
673                 page = alloc_fresh_huge_page_node(h, next_nid);
674                 if (page) {
675                         ret = 1;
676                         break;
677                 }
678                 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
679         } while (next_nid != start_nid);
680
681         if (ret)
682                 count_vm_event(HTLB_BUDDY_PGALLOC);
683         else
684                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
685
686         return ret;
687 }
688
689 /*
690  * helper for free_pool_huge_page() - return the previously saved
691  * node ["this node"] from which to free a huge page.  Advance the
692  * next node id whether or not we find a free huge page to free so
693  * that the next attempt to free addresses the next node.
694  */
695 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
696 {
697         int nid;
698
699         VM_BUG_ON(!nodes_allowed);
700
701         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
702         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
703
704         return nid;
705 }
706
707 /*
708  * Free huge page from pool from next node to free.
709  * Attempt to keep persistent huge pages more or less
710  * balanced over allowed nodes.
711  * Called with hugetlb_lock locked.
712  */
713 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
714                                                          bool acct_surplus)
715 {
716         int start_nid;
717         int next_nid;
718         int ret = 0;
719
720         start_nid = hstate_next_node_to_free(h, nodes_allowed);
721         next_nid = start_nid;
722
723         do {
724                 /*
725                  * If we're returning unused surplus pages, only examine
726                  * nodes with surplus pages.
727                  */
728                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
729                     !list_empty(&h->hugepage_freelists[next_nid])) {
730                         struct page *page =
731                                 list_entry(h->hugepage_freelists[next_nid].next,
732                                           struct page, lru);
733                         list_del(&page->lru);
734                         h->free_huge_pages--;
735                         h->free_huge_pages_node[next_nid]--;
736                         if (acct_surplus) {
737                                 h->surplus_huge_pages--;
738                                 h->surplus_huge_pages_node[next_nid]--;
739                         }
740                         update_and_free_page(h, page);
741                         ret = 1;
742                         break;
743                 }
744                 next_nid = hstate_next_node_to_free(h, nodes_allowed);
745         } while (next_nid != start_nid);
746
747         return ret;
748 }
749
750 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
751 {
752         struct page *page;
753         unsigned int r_nid;
754
755         if (h->order >= MAX_ORDER)
756                 return NULL;
757
758         /*
759          * Assume we will successfully allocate the surplus page to
760          * prevent racing processes from causing the surplus to exceed
761          * overcommit
762          *
763          * This however introduces a different race, where a process B
764          * tries to grow the static hugepage pool while alloc_pages() is
765          * called by process A. B will only examine the per-node
766          * counters in determining if surplus huge pages can be
767          * converted to normal huge pages in adjust_pool_surplus(). A
768          * won't be able to increment the per-node counter, until the
769          * lock is dropped by B, but B doesn't drop hugetlb_lock until
770          * no more huge pages can be converted from surplus to normal
771          * state (and doesn't try to convert again). Thus, we have a
772          * case where a surplus huge page exists, the pool is grown, and
773          * the surplus huge page still exists after, even though it
774          * should just have been converted to a normal huge page. This
775          * does not leak memory, though, as the hugepage will be freed
776          * once it is out of use. It also does not allow the counters to
777          * go out of whack in adjust_pool_surplus() as we don't modify
778          * the node values until we've gotten the hugepage and only the
779          * per-node value is checked there.
780          */
781         spin_lock(&hugetlb_lock);
782         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
783                 spin_unlock(&hugetlb_lock);
784                 return NULL;
785         } else {
786                 h->nr_huge_pages++;
787                 h->surplus_huge_pages++;
788         }
789         spin_unlock(&hugetlb_lock);
790
791         if (nid == NUMA_NO_NODE)
792                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
793                                    __GFP_REPEAT|__GFP_NOWARN,
794                                    huge_page_order(h));
795         else
796                 page = alloc_pages_exact_node(nid,
797                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
798                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
799
800         if (page && arch_prepare_hugepage(page)) {
801                 __free_pages(page, huge_page_order(h));
802                 return NULL;
803         }
804
805         spin_lock(&hugetlb_lock);
806         if (page) {
807                 r_nid = page_to_nid(page);
808                 set_compound_page_dtor(page, free_huge_page);
809                 /*
810                  * We incremented the global counters already
811                  */
812                 h->nr_huge_pages_node[r_nid]++;
813                 h->surplus_huge_pages_node[r_nid]++;
814                 __count_vm_event(HTLB_BUDDY_PGALLOC);
815         } else {
816                 h->nr_huge_pages--;
817                 h->surplus_huge_pages--;
818                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
819         }
820         spin_unlock(&hugetlb_lock);
821
822         return page;
823 }
824
825 /*
826  * This allocation function is useful in the context where vma is irrelevant.
827  * E.g. soft-offlining uses this function because it only cares physical
828  * address of error page.
829  */
830 struct page *alloc_huge_page_node(struct hstate *h, int nid)
831 {
832         struct page *page;
833
834         spin_lock(&hugetlb_lock);
835         page = dequeue_huge_page_node(h, nid);
836         spin_unlock(&hugetlb_lock);
837
838         if (!page)
839                 page = alloc_buddy_huge_page(h, nid);
840
841         return page;
842 }
843
844 /*
845  * Increase the hugetlb pool such that it can accomodate a reservation
846  * of size 'delta'.
847  */
848 static int gather_surplus_pages(struct hstate *h, int delta)
849 {
850         struct list_head surplus_list;
851         struct page *page, *tmp;
852         int ret, i;
853         int needed, allocated;
854
855         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
856         if (needed <= 0) {
857                 h->resv_huge_pages += delta;
858                 return 0;
859         }
860
861         allocated = 0;
862         INIT_LIST_HEAD(&surplus_list);
863
864         ret = -ENOMEM;
865 retry:
866         spin_unlock(&hugetlb_lock);
867         for (i = 0; i < needed; i++) {
868                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
869                 if (!page)
870                         /*
871                          * We were not able to allocate enough pages to
872                          * satisfy the entire reservation so we free what
873                          * we've allocated so far.
874                          */
875                         goto free;
876
877                 list_add(&page->lru, &surplus_list);
878         }
879         allocated += needed;
880
881         /*
882          * After retaking hugetlb_lock, we need to recalculate 'needed'
883          * because either resv_huge_pages or free_huge_pages may have changed.
884          */
885         spin_lock(&hugetlb_lock);
886         needed = (h->resv_huge_pages + delta) -
887                         (h->free_huge_pages + allocated);
888         if (needed > 0)
889                 goto retry;
890
891         /*
892          * The surplus_list now contains _at_least_ the number of extra pages
893          * needed to accomodate the reservation.  Add the appropriate number
894          * of pages to the hugetlb pool and free the extras back to the buddy
895          * allocator.  Commit the entire reservation here to prevent another
896          * process from stealing the pages as they are added to the pool but
897          * before they are reserved.
898          */
899         needed += allocated;
900         h->resv_huge_pages += delta;
901         ret = 0;
902
903         spin_unlock(&hugetlb_lock);
904         /* Free the needed pages to the hugetlb pool */
905         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
906                 if ((--needed) < 0)
907                         break;
908                 list_del(&page->lru);
909                 /*
910                  * This page is now managed by the hugetlb allocator and has
911                  * no users -- drop the buddy allocator's reference.
912                  */
913                 put_page_testzero(page);
914                 VM_BUG_ON(page_count(page));
915                 enqueue_huge_page(h, page);
916         }
917
918         /* Free unnecessary surplus pages to the buddy allocator */
919 free:
920         if (!list_empty(&surplus_list)) {
921                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
922                         list_del(&page->lru);
923                         put_page(page);
924                 }
925         }
926         spin_lock(&hugetlb_lock);
927
928         return ret;
929 }
930
931 /*
932  * When releasing a hugetlb pool reservation, any surplus pages that were
933  * allocated to satisfy the reservation must be explicitly freed if they were
934  * never used.
935  * Called with hugetlb_lock held.
936  */
937 static void return_unused_surplus_pages(struct hstate *h,
938                                         unsigned long unused_resv_pages)
939 {
940         unsigned long nr_pages;
941
942         /* Uncommit the reservation */
943         h->resv_huge_pages -= unused_resv_pages;
944
945         /* Cannot return gigantic pages currently */
946         if (h->order >= MAX_ORDER)
947                 return;
948
949         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
950
951         /*
952          * We want to release as many surplus pages as possible, spread
953          * evenly across all nodes with memory. Iterate across these nodes
954          * until we can no longer free unreserved surplus pages. This occurs
955          * when the nodes with surplus pages have no free pages.
956          * free_pool_huge_page() will balance the the freed pages across the
957          * on-line nodes with memory and will handle the hstate accounting.
958          */
959         while (nr_pages--) {
960                 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
961                         break;
962         }
963 }
964
965 /*
966  * Determine if the huge page at addr within the vma has an associated
967  * reservation.  Where it does not we will need to logically increase
968  * reservation and actually increase quota before an allocation can occur.
969  * Where any new reservation would be required the reservation change is
970  * prepared, but not committed.  Once the page has been quota'd allocated
971  * an instantiated the change should be committed via vma_commit_reservation.
972  * No action is required on failure.
973  */
974 static long vma_needs_reservation(struct hstate *h,
975                         struct vm_area_struct *vma, unsigned long addr)
976 {
977         struct address_space *mapping = vma->vm_file->f_mapping;
978         struct inode *inode = mapping->host;
979
980         if (vma->vm_flags & VM_MAYSHARE) {
981                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
982                 return region_chg(&inode->i_mapping->private_list,
983                                                         idx, idx + 1);
984
985         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
986                 return 1;
987
988         } else  {
989                 long err;
990                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
991                 struct resv_map *reservations = vma_resv_map(vma);
992
993                 err = region_chg(&reservations->regions, idx, idx + 1);
994                 if (err < 0)
995                         return err;
996                 return 0;
997         }
998 }
999 static void vma_commit_reservation(struct hstate *h,
1000                         struct vm_area_struct *vma, unsigned long addr)
1001 {
1002         struct address_space *mapping = vma->vm_file->f_mapping;
1003         struct inode *inode = mapping->host;
1004
1005         if (vma->vm_flags & VM_MAYSHARE) {
1006                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1007                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1008
1009         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1010                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1011                 struct resv_map *reservations = vma_resv_map(vma);
1012
1013                 /* Mark this page used in the map. */
1014                 region_add(&reservations->regions, idx, idx + 1);
1015         }
1016 }
1017
1018 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1019                                     unsigned long addr, int avoid_reserve)
1020 {
1021         struct hstate *h = hstate_vma(vma);
1022         struct page *page;
1023         struct address_space *mapping = vma->vm_file->f_mapping;
1024         struct inode *inode = mapping->host;
1025         long chg;
1026
1027         /*
1028          * Processes that did not create the mapping will have no reserves and
1029          * will not have accounted against quota. Check that the quota can be
1030          * made before satisfying the allocation
1031          * MAP_NORESERVE mappings may also need pages and quota allocated
1032          * if no reserve mapping overlaps.
1033          */
1034         chg = vma_needs_reservation(h, vma, addr);
1035         if (chg < 0)
1036                 return ERR_PTR(chg);
1037         if (chg)
1038                 if (hugetlb_get_quota(inode->i_mapping, chg))
1039                         return ERR_PTR(-ENOSPC);
1040
1041         spin_lock(&hugetlb_lock);
1042         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1043         spin_unlock(&hugetlb_lock);
1044
1045         if (!page) {
1046                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1047                 if (!page) {
1048                         hugetlb_put_quota(inode->i_mapping, chg);
1049                         return ERR_PTR(-VM_FAULT_SIGBUS);
1050                 }
1051         }
1052
1053         set_page_private(page, (unsigned long) mapping);
1054
1055         vma_commit_reservation(h, vma, addr);
1056
1057         return page;
1058 }
1059
1060 int __weak alloc_bootmem_huge_page(struct hstate *h)
1061 {
1062         struct huge_bootmem_page *m;
1063         int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1064
1065         while (nr_nodes) {
1066                 void *addr;
1067
1068                 addr = __alloc_bootmem_node_nopanic(
1069                                 NODE_DATA(hstate_next_node_to_alloc(h,
1070                                                 &node_states[N_HIGH_MEMORY])),
1071                                 huge_page_size(h), huge_page_size(h), 0);
1072
1073                 if (addr) {
1074                         /*
1075                          * Use the beginning of the huge page to store the
1076                          * huge_bootmem_page struct (until gather_bootmem
1077                          * puts them into the mem_map).
1078                          */
1079                         m = addr;
1080                         goto found;
1081                 }
1082                 nr_nodes--;
1083         }
1084         return 0;
1085
1086 found:
1087         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1088         /* Put them into a private list first because mem_map is not up yet */
1089         list_add(&m->list, &huge_boot_pages);
1090         m->hstate = h;
1091         return 1;
1092 }
1093
1094 static void prep_compound_huge_page(struct page *page, int order)
1095 {
1096         if (unlikely(order > (MAX_ORDER - 1)))
1097                 prep_compound_gigantic_page(page, order);
1098         else
1099                 prep_compound_page(page, order);
1100 }
1101
1102 /* Put bootmem huge pages into the standard lists after mem_map is up */
1103 static void __init gather_bootmem_prealloc(void)
1104 {
1105         struct huge_bootmem_page *m;
1106
1107         list_for_each_entry(m, &huge_boot_pages, list) {
1108                 struct page *page = virt_to_page(m);
1109                 struct hstate *h = m->hstate;
1110                 __ClearPageReserved(page);
1111                 WARN_ON(page_count(page) != 1);
1112                 prep_compound_huge_page(page, h->order);
1113                 prep_new_huge_page(h, page, page_to_nid(page));
1114         }
1115 }
1116
1117 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1118 {
1119         unsigned long i;
1120
1121         for (i = 0; i < h->max_huge_pages; ++i) {
1122                 if (h->order >= MAX_ORDER) {
1123                         if (!alloc_bootmem_huge_page(h))
1124                                 break;
1125                 } else if (!alloc_fresh_huge_page(h,
1126                                          &node_states[N_HIGH_MEMORY]))
1127                         break;
1128         }
1129         h->max_huge_pages = i;
1130 }
1131
1132 static void __init hugetlb_init_hstates(void)
1133 {
1134         struct hstate *h;
1135
1136         for_each_hstate(h) {
1137                 /* oversize hugepages were init'ed in early boot */
1138                 if (h->order < MAX_ORDER)
1139                         hugetlb_hstate_alloc_pages(h);
1140         }
1141 }
1142
1143 static char * __init memfmt(char *buf, unsigned long n)
1144 {
1145         if (n >= (1UL << 30))
1146                 sprintf(buf, "%lu GB", n >> 30);
1147         else if (n >= (1UL << 20))
1148                 sprintf(buf, "%lu MB", n >> 20);
1149         else
1150                 sprintf(buf, "%lu KB", n >> 10);
1151         return buf;
1152 }
1153
1154 static void __init report_hugepages(void)
1155 {
1156         struct hstate *h;
1157
1158         for_each_hstate(h) {
1159                 char buf[32];
1160                 printk(KERN_INFO "HugeTLB registered %s page size, "
1161                                  "pre-allocated %ld pages\n",
1162                         memfmt(buf, huge_page_size(h)),
1163                         h->free_huge_pages);
1164         }
1165 }
1166
1167 #ifdef CONFIG_HIGHMEM
1168 static void try_to_free_low(struct hstate *h, unsigned long count,
1169                                                 nodemask_t *nodes_allowed)
1170 {
1171         int i;
1172
1173         if (h->order >= MAX_ORDER)
1174                 return;
1175
1176         for_each_node_mask(i, *nodes_allowed) {
1177                 struct page *page, *next;
1178                 struct list_head *freel = &h->hugepage_freelists[i];
1179                 list_for_each_entry_safe(page, next, freel, lru) {
1180                         if (count >= h->nr_huge_pages)
1181                                 return;
1182                         if (PageHighMem(page))
1183                                 continue;
1184                         list_del(&page->lru);
1185                         update_and_free_page(h, page);
1186                         h->free_huge_pages--;
1187                         h->free_huge_pages_node[page_to_nid(page)]--;
1188                 }
1189         }
1190 }
1191 #else
1192 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1193                                                 nodemask_t *nodes_allowed)
1194 {
1195 }
1196 #endif
1197
1198 /*
1199  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1200  * balanced by operating on them in a round-robin fashion.
1201  * Returns 1 if an adjustment was made.
1202  */
1203 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1204                                 int delta)
1205 {
1206         int start_nid, next_nid;
1207         int ret = 0;
1208
1209         VM_BUG_ON(delta != -1 && delta != 1);
1210
1211         if (delta < 0)
1212                 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1213         else
1214                 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1215         next_nid = start_nid;
1216
1217         do {
1218                 int nid = next_nid;
1219                 if (delta < 0)  {
1220                         /*
1221                          * To shrink on this node, there must be a surplus page
1222                          */
1223                         if (!h->surplus_huge_pages_node[nid]) {
1224                                 next_nid = hstate_next_node_to_alloc(h,
1225                                                                 nodes_allowed);
1226                                 continue;
1227                         }
1228                 }
1229                 if (delta > 0) {
1230                         /*
1231                          * Surplus cannot exceed the total number of pages
1232                          */
1233                         if (h->surplus_huge_pages_node[nid] >=
1234                                                 h->nr_huge_pages_node[nid]) {
1235                                 next_nid = hstate_next_node_to_free(h,
1236                                                                 nodes_allowed);
1237                                 continue;
1238                         }
1239                 }
1240
1241                 h->surplus_huge_pages += delta;
1242                 h->surplus_huge_pages_node[nid] += delta;
1243                 ret = 1;
1244                 break;
1245         } while (next_nid != start_nid);
1246
1247         return ret;
1248 }
1249
1250 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1251 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1252                                                 nodemask_t *nodes_allowed)
1253 {
1254         unsigned long min_count, ret;
1255
1256         if (h->order >= MAX_ORDER)
1257                 return h->max_huge_pages;
1258
1259         /*
1260          * Increase the pool size
1261          * First take pages out of surplus state.  Then make up the
1262          * remaining difference by allocating fresh huge pages.
1263          *
1264          * We might race with alloc_buddy_huge_page() here and be unable
1265          * to convert a surplus huge page to a normal huge page. That is
1266          * not critical, though, it just means the overall size of the
1267          * pool might be one hugepage larger than it needs to be, but
1268          * within all the constraints specified by the sysctls.
1269          */
1270         spin_lock(&hugetlb_lock);
1271         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1272                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1273                         break;
1274         }
1275
1276         while (count > persistent_huge_pages(h)) {
1277                 /*
1278                  * If this allocation races such that we no longer need the
1279                  * page, free_huge_page will handle it by freeing the page
1280                  * and reducing the surplus.
1281                  */
1282                 spin_unlock(&hugetlb_lock);
1283                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1284                 spin_lock(&hugetlb_lock);
1285                 if (!ret)
1286                         goto out;
1287
1288                 /* Bail for signals. Probably ctrl-c from user */
1289                 if (signal_pending(current))
1290                         goto out;
1291         }
1292
1293         /*
1294          * Decrease the pool size
1295          * First return free pages to the buddy allocator (being careful
1296          * to keep enough around to satisfy reservations).  Then place
1297          * pages into surplus state as needed so the pool will shrink
1298          * to the desired size as pages become free.
1299          *
1300          * By placing pages into the surplus state independent of the
1301          * overcommit value, we are allowing the surplus pool size to
1302          * exceed overcommit. There are few sane options here. Since
1303          * alloc_buddy_huge_page() is checking the global counter,
1304          * though, we'll note that we're not allowed to exceed surplus
1305          * and won't grow the pool anywhere else. Not until one of the
1306          * sysctls are changed, or the surplus pages go out of use.
1307          */
1308         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1309         min_count = max(count, min_count);
1310         try_to_free_low(h, min_count, nodes_allowed);
1311         while (min_count < persistent_huge_pages(h)) {
1312                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1313                         break;
1314         }
1315         while (count < persistent_huge_pages(h)) {
1316                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1317                         break;
1318         }
1319 out:
1320         ret = persistent_huge_pages(h);
1321         spin_unlock(&hugetlb_lock);
1322         return ret;
1323 }
1324
1325 #define HSTATE_ATTR_RO(_name) \
1326         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1327
1328 #define HSTATE_ATTR(_name) \
1329         static struct kobj_attribute _name##_attr = \
1330                 __ATTR(_name, 0644, _name##_show, _name##_store)
1331
1332 static struct kobject *hugepages_kobj;
1333 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1334
1335 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1336
1337 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1338 {
1339         int i;
1340
1341         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1342                 if (hstate_kobjs[i] == kobj) {
1343                         if (nidp)
1344                                 *nidp = NUMA_NO_NODE;
1345                         return &hstates[i];
1346                 }
1347
1348         return kobj_to_node_hstate(kobj, nidp);
1349 }
1350
1351 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1352                                         struct kobj_attribute *attr, char *buf)
1353 {
1354         struct hstate *h;
1355         unsigned long nr_huge_pages;
1356         int nid;
1357
1358         h = kobj_to_hstate(kobj, &nid);
1359         if (nid == NUMA_NO_NODE)
1360                 nr_huge_pages = h->nr_huge_pages;
1361         else
1362                 nr_huge_pages = h->nr_huge_pages_node[nid];
1363
1364         return sprintf(buf, "%lu\n", nr_huge_pages);
1365 }
1366 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1367                         struct kobject *kobj, struct kobj_attribute *attr,
1368                         const char *buf, size_t len)
1369 {
1370         int err;
1371         int nid;
1372         unsigned long count;
1373         struct hstate *h;
1374         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1375
1376         err = strict_strtoul(buf, 10, &count);
1377         if (err) {
1378                 NODEMASK_FREE(nodes_allowed);
1379                 return 0;
1380         }
1381
1382         h = kobj_to_hstate(kobj, &nid);
1383         if (nid == NUMA_NO_NODE) {
1384                 /*
1385                  * global hstate attribute
1386                  */
1387                 if (!(obey_mempolicy &&
1388                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1389                         NODEMASK_FREE(nodes_allowed);
1390                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1391                 }
1392         } else if (nodes_allowed) {
1393                 /*
1394                  * per node hstate attribute: adjust count to global,
1395                  * but restrict alloc/free to the specified node.
1396                  */
1397                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1398                 init_nodemask_of_node(nodes_allowed, nid);
1399         } else
1400                 nodes_allowed = &node_states[N_HIGH_MEMORY];
1401
1402         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1403
1404         if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1405                 NODEMASK_FREE(nodes_allowed);
1406
1407         return len;
1408 }
1409
1410 static ssize_t nr_hugepages_show(struct kobject *kobj,
1411                                        struct kobj_attribute *attr, char *buf)
1412 {
1413         return nr_hugepages_show_common(kobj, attr, buf);
1414 }
1415
1416 static ssize_t nr_hugepages_store(struct kobject *kobj,
1417                struct kobj_attribute *attr, const char *buf, size_t len)
1418 {
1419         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1420 }
1421 HSTATE_ATTR(nr_hugepages);
1422
1423 #ifdef CONFIG_NUMA
1424
1425 /*
1426  * hstate attribute for optionally mempolicy-based constraint on persistent
1427  * huge page alloc/free.
1428  */
1429 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1430                                        struct kobj_attribute *attr, char *buf)
1431 {
1432         return nr_hugepages_show_common(kobj, attr, buf);
1433 }
1434
1435 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1436                struct kobj_attribute *attr, const char *buf, size_t len)
1437 {
1438         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1439 }
1440 HSTATE_ATTR(nr_hugepages_mempolicy);
1441 #endif
1442
1443
1444 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1445                                         struct kobj_attribute *attr, char *buf)
1446 {
1447         struct hstate *h = kobj_to_hstate(kobj, NULL);
1448         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1449 }
1450 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1451                 struct kobj_attribute *attr, const char *buf, size_t count)
1452 {
1453         int err;
1454         unsigned long input;
1455         struct hstate *h = kobj_to_hstate(kobj, NULL);
1456
1457         err = strict_strtoul(buf, 10, &input);
1458         if (err)
1459                 return 0;
1460
1461         spin_lock(&hugetlb_lock);
1462         h->nr_overcommit_huge_pages = input;
1463         spin_unlock(&hugetlb_lock);
1464
1465         return count;
1466 }
1467 HSTATE_ATTR(nr_overcommit_hugepages);
1468
1469 static ssize_t free_hugepages_show(struct kobject *kobj,
1470                                         struct kobj_attribute *attr, char *buf)
1471 {
1472         struct hstate *h;
1473         unsigned long free_huge_pages;
1474         int nid;
1475
1476         h = kobj_to_hstate(kobj, &nid);
1477         if (nid == NUMA_NO_NODE)
1478                 free_huge_pages = h->free_huge_pages;
1479         else
1480                 free_huge_pages = h->free_huge_pages_node[nid];
1481
1482         return sprintf(buf, "%lu\n", free_huge_pages);
1483 }
1484 HSTATE_ATTR_RO(free_hugepages);
1485
1486 static ssize_t resv_hugepages_show(struct kobject *kobj,
1487                                         struct kobj_attribute *attr, char *buf)
1488 {
1489         struct hstate *h = kobj_to_hstate(kobj, NULL);
1490         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1491 }
1492 HSTATE_ATTR_RO(resv_hugepages);
1493
1494 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1495                                         struct kobj_attribute *attr, char *buf)
1496 {
1497         struct hstate *h;
1498         unsigned long surplus_huge_pages;
1499         int nid;
1500
1501         h = kobj_to_hstate(kobj, &nid);
1502         if (nid == NUMA_NO_NODE)
1503                 surplus_huge_pages = h->surplus_huge_pages;
1504         else
1505                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1506
1507         return sprintf(buf, "%lu\n", surplus_huge_pages);
1508 }
1509 HSTATE_ATTR_RO(surplus_hugepages);
1510
1511 static struct attribute *hstate_attrs[] = {
1512         &nr_hugepages_attr.attr,
1513         &nr_overcommit_hugepages_attr.attr,
1514         &free_hugepages_attr.attr,
1515         &resv_hugepages_attr.attr,
1516         &surplus_hugepages_attr.attr,
1517 #ifdef CONFIG_NUMA
1518         &nr_hugepages_mempolicy_attr.attr,
1519 #endif
1520         NULL,
1521 };
1522
1523 static struct attribute_group hstate_attr_group = {
1524         .attrs = hstate_attrs,
1525 };
1526
1527 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1528                                     struct kobject **hstate_kobjs,
1529                                     struct attribute_group *hstate_attr_group)
1530 {
1531         int retval;
1532         int hi = h - hstates;
1533
1534         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1535         if (!hstate_kobjs[hi])
1536                 return -ENOMEM;
1537
1538         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1539         if (retval)
1540                 kobject_put(hstate_kobjs[hi]);
1541
1542         return retval;
1543 }
1544
1545 static void __init hugetlb_sysfs_init(void)
1546 {
1547         struct hstate *h;
1548         int err;
1549
1550         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1551         if (!hugepages_kobj)
1552                 return;
1553
1554         for_each_hstate(h) {
1555                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1556                                          hstate_kobjs, &hstate_attr_group);
1557                 if (err)
1558                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1559                                                                 h->name);
1560         }
1561 }
1562
1563 #ifdef CONFIG_NUMA
1564
1565 /*
1566  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1567  * with node sysdevs in node_devices[] using a parallel array.  The array
1568  * index of a node sysdev or _hstate == node id.
1569  * This is here to avoid any static dependency of the node sysdev driver, in
1570  * the base kernel, on the hugetlb module.
1571  */
1572 struct node_hstate {
1573         struct kobject          *hugepages_kobj;
1574         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1575 };
1576 struct node_hstate node_hstates[MAX_NUMNODES];
1577
1578 /*
1579  * A subset of global hstate attributes for node sysdevs
1580  */
1581 static struct attribute *per_node_hstate_attrs[] = {
1582         &nr_hugepages_attr.attr,
1583         &free_hugepages_attr.attr,
1584         &surplus_hugepages_attr.attr,
1585         NULL,
1586 };
1587
1588 static struct attribute_group per_node_hstate_attr_group = {
1589         .attrs = per_node_hstate_attrs,
1590 };
1591
1592 /*
1593  * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1594  * Returns node id via non-NULL nidp.
1595  */
1596 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1597 {
1598         int nid;
1599
1600         for (nid = 0; nid < nr_node_ids; nid++) {
1601                 struct node_hstate *nhs = &node_hstates[nid];
1602                 int i;
1603                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1604                         if (nhs->hstate_kobjs[i] == kobj) {
1605                                 if (nidp)
1606                                         *nidp = nid;
1607                                 return &hstates[i];
1608                         }
1609         }
1610
1611         BUG();
1612         return NULL;
1613 }
1614
1615 /*
1616  * Unregister hstate attributes from a single node sysdev.
1617  * No-op if no hstate attributes attached.
1618  */
1619 void hugetlb_unregister_node(struct node *node)
1620 {
1621         struct hstate *h;
1622         struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1623
1624         if (!nhs->hugepages_kobj)
1625                 return;         /* no hstate attributes */
1626
1627         for_each_hstate(h)
1628                 if (nhs->hstate_kobjs[h - hstates]) {
1629                         kobject_put(nhs->hstate_kobjs[h - hstates]);
1630                         nhs->hstate_kobjs[h - hstates] = NULL;
1631                 }
1632
1633         kobject_put(nhs->hugepages_kobj);
1634         nhs->hugepages_kobj = NULL;
1635 }
1636
1637 /*
1638  * hugetlb module exit:  unregister hstate attributes from node sysdevs
1639  * that have them.
1640  */
1641 static void hugetlb_unregister_all_nodes(void)
1642 {
1643         int nid;
1644
1645         /*
1646          * disable node sysdev registrations.
1647          */
1648         register_hugetlbfs_with_node(NULL, NULL);
1649
1650         /*
1651          * remove hstate attributes from any nodes that have them.
1652          */
1653         for (nid = 0; nid < nr_node_ids; nid++)
1654                 hugetlb_unregister_node(&node_devices[nid]);
1655 }
1656
1657 /*
1658  * Register hstate attributes for a single node sysdev.
1659  * No-op if attributes already registered.
1660  */
1661 void hugetlb_register_node(struct node *node)
1662 {
1663         struct hstate *h;
1664         struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1665         int err;
1666
1667         if (nhs->hugepages_kobj)
1668                 return;         /* already allocated */
1669
1670         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1671                                                         &node->sysdev.kobj);
1672         if (!nhs->hugepages_kobj)
1673                 return;
1674
1675         for_each_hstate(h) {
1676                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1677                                                 nhs->hstate_kobjs,
1678                                                 &per_node_hstate_attr_group);
1679                 if (err) {
1680                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1681                                         " for node %d\n",
1682                                                 h->name, node->sysdev.id);
1683                         hugetlb_unregister_node(node);
1684                         break;
1685                 }
1686         }
1687 }
1688
1689 /*
1690  * hugetlb init time:  register hstate attributes for all registered node
1691  * sysdevs of nodes that have memory.  All on-line nodes should have
1692  * registered their associated sysdev by this time.
1693  */
1694 static void hugetlb_register_all_nodes(void)
1695 {
1696         int nid;
1697
1698         for_each_node_state(nid, N_HIGH_MEMORY) {
1699                 struct node *node = &node_devices[nid];
1700                 if (node->sysdev.id == nid)
1701                         hugetlb_register_node(node);
1702         }
1703
1704         /*
1705          * Let the node sysdev driver know we're here so it can
1706          * [un]register hstate attributes on node hotplug.
1707          */
1708         register_hugetlbfs_with_node(hugetlb_register_node,
1709                                      hugetlb_unregister_node);
1710 }
1711 #else   /* !CONFIG_NUMA */
1712
1713 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1714 {
1715         BUG();
1716         if (nidp)
1717                 *nidp = -1;
1718         return NULL;
1719 }
1720
1721 static void hugetlb_unregister_all_nodes(void) { }
1722
1723 static void hugetlb_register_all_nodes(void) { }
1724
1725 #endif
1726
1727 static void __exit hugetlb_exit(void)
1728 {
1729         struct hstate *h;
1730
1731         hugetlb_unregister_all_nodes();
1732
1733         for_each_hstate(h) {
1734                 kobject_put(hstate_kobjs[h - hstates]);
1735         }
1736
1737         kobject_put(hugepages_kobj);
1738 }
1739 module_exit(hugetlb_exit);
1740
1741 static int __init hugetlb_init(void)
1742 {
1743         /* Some platform decide whether they support huge pages at boot
1744          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1745          * there is no such support
1746          */
1747         if (HPAGE_SHIFT == 0)
1748                 return 0;
1749
1750         if (!size_to_hstate(default_hstate_size)) {
1751                 default_hstate_size = HPAGE_SIZE;
1752                 if (!size_to_hstate(default_hstate_size))
1753                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1754         }
1755         default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1756         if (default_hstate_max_huge_pages)
1757                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1758
1759         hugetlb_init_hstates();
1760
1761         gather_bootmem_prealloc();
1762
1763         report_hugepages();
1764
1765         hugetlb_sysfs_init();
1766
1767         hugetlb_register_all_nodes();
1768
1769         return 0;
1770 }
1771 module_init(hugetlb_init);
1772
1773 /* Should be called on processing a hugepagesz=... option */
1774 void __init hugetlb_add_hstate(unsigned order)
1775 {
1776         struct hstate *h;
1777         unsigned long i;
1778
1779         if (size_to_hstate(PAGE_SIZE << order)) {
1780                 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1781                 return;
1782         }
1783         BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1784         BUG_ON(order == 0);
1785         h = &hstates[max_hstate++];
1786         h->order = order;
1787         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1788         h->nr_huge_pages = 0;
1789         h->free_huge_pages = 0;
1790         for (i = 0; i < MAX_NUMNODES; ++i)
1791                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1792         h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1793         h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1794         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1795                                         huge_page_size(h)/1024);
1796
1797         parsed_hstate = h;
1798 }
1799
1800 static int __init hugetlb_nrpages_setup(char *s)
1801 {
1802         unsigned long *mhp;
1803         static unsigned long *last_mhp;
1804
1805         /*
1806          * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1807          * so this hugepages= parameter goes to the "default hstate".
1808          */
1809         if (!max_hstate)
1810                 mhp = &default_hstate_max_huge_pages;
1811         else
1812                 mhp = &parsed_hstate->max_huge_pages;
1813
1814         if (mhp == last_mhp) {
1815                 printk(KERN_WARNING "hugepages= specified twice without "
1816                         "interleaving hugepagesz=, ignoring\n");
1817                 return 1;
1818         }
1819
1820         if (sscanf(s, "%lu", mhp) <= 0)
1821                 *mhp = 0;
1822
1823         /*
1824          * Global state is always initialized later in hugetlb_init.
1825          * But we need to allocate >= MAX_ORDER hstates here early to still
1826          * use the bootmem allocator.
1827          */
1828         if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1829                 hugetlb_hstate_alloc_pages(parsed_hstate);
1830
1831         last_mhp = mhp;
1832
1833         return 1;
1834 }
1835 __setup("hugepages=", hugetlb_nrpages_setup);
1836
1837 static int __init hugetlb_default_setup(char *s)
1838 {
1839         default_hstate_size = memparse(s, &s);
1840         return 1;
1841 }
1842 __setup("default_hugepagesz=", hugetlb_default_setup);
1843
1844 static unsigned int cpuset_mems_nr(unsigned int *array)
1845 {
1846         int node;
1847         unsigned int nr = 0;
1848
1849         for_each_node_mask(node, cpuset_current_mems_allowed)
1850                 nr += array[node];
1851
1852         return nr;
1853 }
1854
1855 #ifdef CONFIG_SYSCTL
1856 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1857                          struct ctl_table *table, int write,
1858                          void __user *buffer, size_t *length, loff_t *ppos)
1859 {
1860         struct hstate *h = &default_hstate;
1861         unsigned long tmp;
1862
1863         if (!write)
1864                 tmp = h->max_huge_pages;
1865
1866         table->data = &tmp;
1867         table->maxlen = sizeof(unsigned long);
1868         proc_doulongvec_minmax(table, write, buffer, length, ppos);
1869
1870         if (write) {
1871                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1872                                                 GFP_KERNEL | __GFP_NORETRY);
1873                 if (!(obey_mempolicy &&
1874                                init_nodemask_of_mempolicy(nodes_allowed))) {
1875                         NODEMASK_FREE(nodes_allowed);
1876                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1877                 }
1878                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1879
1880                 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1881                         NODEMASK_FREE(nodes_allowed);
1882         }
1883
1884         return 0;
1885 }
1886
1887 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1888                           void __user *buffer, size_t *length, loff_t *ppos)
1889 {
1890
1891         return hugetlb_sysctl_handler_common(false, table, write,
1892                                                         buffer, length, ppos);
1893 }
1894
1895 #ifdef CONFIG_NUMA
1896 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1897                           void __user *buffer, size_t *length, loff_t *ppos)
1898 {
1899         return hugetlb_sysctl_handler_common(true, table, write,
1900                                                         buffer, length, ppos);
1901 }
1902 #endif /* CONFIG_NUMA */
1903
1904 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1905                         void __user *buffer,
1906                         size_t *length, loff_t *ppos)
1907 {
1908         proc_dointvec(table, write, buffer, length, ppos);
1909         if (hugepages_treat_as_movable)
1910                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1911         else
1912                 htlb_alloc_mask = GFP_HIGHUSER;
1913         return 0;
1914 }
1915
1916 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1917                         void __user *buffer,
1918                         size_t *length, loff_t *ppos)
1919 {
1920         struct hstate *h = &default_hstate;
1921         unsigned long tmp;
1922
1923         if (!write)
1924                 tmp = h->nr_overcommit_huge_pages;
1925
1926         table->data = &tmp;
1927         table->maxlen = sizeof(unsigned long);
1928         proc_doulongvec_minmax(table, write, buffer, length, ppos);
1929
1930         if (write) {
1931                 spin_lock(&hugetlb_lock);
1932                 h->nr_overcommit_huge_pages = tmp;
1933                 spin_unlock(&hugetlb_lock);
1934         }
1935
1936         return 0;
1937 }
1938
1939 #endif /* CONFIG_SYSCTL */
1940
1941 void hugetlb_report_meminfo(struct seq_file *m)
1942 {
1943         struct hstate *h = &default_hstate;
1944         seq_printf(m,
1945                         "HugePages_Total:   %5lu\n"
1946                         "HugePages_Free:    %5lu\n"
1947                         "HugePages_Rsvd:    %5lu\n"
1948                         "HugePages_Surp:    %5lu\n"
1949                         "Hugepagesize:   %8lu kB\n",
1950                         h->nr_huge_pages,
1951                         h->free_huge_pages,
1952                         h->resv_huge_pages,
1953                         h->surplus_huge_pages,
1954                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1955 }
1956
1957 int hugetlb_report_node_meminfo(int nid, char *buf)
1958 {
1959         struct hstate *h = &default_hstate;
1960         return sprintf(buf,
1961                 "Node %d HugePages_Total: %5u\n"
1962                 "Node %d HugePages_Free:  %5u\n"
1963                 "Node %d HugePages_Surp:  %5u\n",
1964                 nid, h->nr_huge_pages_node[nid],
1965                 nid, h->free_huge_pages_node[nid],
1966                 nid, h->surplus_huge_pages_node[nid]);
1967 }
1968
1969 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1970 unsigned long hugetlb_total_pages(void)
1971 {
1972         struct hstate *h = &default_hstate;
1973         return h->nr_huge_pages * pages_per_huge_page(h);
1974 }
1975
1976 static int hugetlb_acct_memory(struct hstate *h, long delta)
1977 {
1978         int ret = -ENOMEM;
1979
1980         spin_lock(&hugetlb_lock);
1981         /*
1982          * When cpuset is configured, it breaks the strict hugetlb page
1983          * reservation as the accounting is done on a global variable. Such
1984          * reservation is completely rubbish in the presence of cpuset because
1985          * the reservation is not checked against page availability for the
1986          * current cpuset. Application can still potentially OOM'ed by kernel
1987          * with lack of free htlb page in cpuset that the task is in.
1988          * Attempt to enforce strict accounting with cpuset is almost
1989          * impossible (or too ugly) because cpuset is too fluid that
1990          * task or memory node can be dynamically moved between cpusets.
1991          *
1992          * The change of semantics for shared hugetlb mapping with cpuset is
1993          * undesirable. However, in order to preserve some of the semantics,
1994          * we fall back to check against current free page availability as
1995          * a best attempt and hopefully to minimize the impact of changing
1996          * semantics that cpuset has.
1997          */
1998         if (delta > 0) {
1999                 if (gather_surplus_pages(h, delta) < 0)
2000                         goto out;
2001
2002                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2003                         return_unused_surplus_pages(h, delta);
2004                         goto out;
2005                 }
2006         }
2007
2008         ret = 0;
2009         if (delta < 0)
2010                 return_unused_surplus_pages(h, (unsigned long) -delta);
2011
2012 out:
2013         spin_unlock(&hugetlb_lock);
2014         return ret;
2015 }
2016
2017 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2018 {
2019         struct resv_map *reservations = vma_resv_map(vma);
2020
2021         /*
2022          * This new VMA should share its siblings reservation map if present.
2023          * The VMA will only ever have a valid reservation map pointer where
2024          * it is being copied for another still existing VMA.  As that VMA
2025          * has a reference to the reservation map it cannot dissappear until
2026          * after this open call completes.  It is therefore safe to take a
2027          * new reference here without additional locking.
2028          */
2029         if (reservations)
2030                 kref_get(&reservations->refs);
2031 }
2032
2033 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2034 {
2035         struct hstate *h = hstate_vma(vma);
2036         struct resv_map *reservations = vma_resv_map(vma);
2037         unsigned long reserve;
2038         unsigned long start;
2039         unsigned long end;
2040
2041         if (reservations) {
2042                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2043                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2044
2045                 reserve = (end - start) -
2046                         region_count(&reservations->regions, start, end);
2047
2048                 kref_put(&reservations->refs, resv_map_release);
2049
2050                 if (reserve) {
2051                         hugetlb_acct_memory(h, -reserve);
2052                         hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2053                 }
2054         }
2055 }
2056
2057 /*
2058  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2059  * handle_mm_fault() to try to instantiate regular-sized pages in the
2060  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2061  * this far.
2062  */
2063 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2064 {
2065         BUG();
2066         return 0;
2067 }
2068
2069 const struct vm_operations_struct hugetlb_vm_ops = {
2070         .fault = hugetlb_vm_op_fault,
2071         .open = hugetlb_vm_op_open,
2072         .close = hugetlb_vm_op_close,
2073 };
2074
2075 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2076                                 int writable)
2077 {
2078         pte_t entry;
2079
2080         if (writable) {
2081                 entry =
2082                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2083         } else {
2084                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2085         }
2086         entry = pte_mkyoung(entry);
2087         entry = pte_mkhuge(entry);
2088
2089         return entry;
2090 }
2091
2092 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2093                                    unsigned long address, pte_t *ptep)
2094 {
2095         pte_t entry;
2096
2097         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2098         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
2099                 update_mmu_cache(vma, address, ptep);
2100         }
2101 }
2102
2103
2104 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2105                             struct vm_area_struct *vma)
2106 {
2107         pte_t *src_pte, *dst_pte, entry;
2108         struct page *ptepage;
2109         unsigned long addr;
2110         int cow;
2111         struct hstate *h = hstate_vma(vma);
2112         unsigned long sz = huge_page_size(h);
2113
2114         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2115
2116         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2117                 src_pte = huge_pte_offset(src, addr);
2118                 if (!src_pte)
2119                         continue;
2120                 dst_pte = huge_pte_alloc(dst, addr, sz);
2121                 if (!dst_pte)
2122                         goto nomem;
2123
2124                 /* If the pagetables are shared don't copy or take references */
2125                 if (dst_pte == src_pte)
2126                         continue;
2127
2128                 spin_lock(&dst->page_table_lock);
2129                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2130                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2131                         if (cow)
2132                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2133                         entry = huge_ptep_get(src_pte);
2134                         ptepage = pte_page(entry);
2135                         get_page(ptepage);
2136                         page_dup_rmap(ptepage);
2137                         set_huge_pte_at(dst, addr, dst_pte, entry);
2138                 }
2139                 spin_unlock(&src->page_table_lock);
2140                 spin_unlock(&dst->page_table_lock);
2141         }
2142         return 0;
2143
2144 nomem:
2145         return -ENOMEM;
2146 }
2147
2148 static int is_hugetlb_entry_migration(pte_t pte)
2149 {
2150         swp_entry_t swp;
2151
2152         if (huge_pte_none(pte) || pte_present(pte))
2153                 return 0;
2154         swp = pte_to_swp_entry(pte);
2155         if (non_swap_entry(swp) && is_migration_entry(swp)) {
2156                 return 1;
2157         } else
2158                 return 0;
2159 }
2160
2161 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2162 {
2163         swp_entry_t swp;
2164
2165         if (huge_pte_none(pte) || pte_present(pte))
2166                 return 0;
2167         swp = pte_to_swp_entry(pte);
2168         if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
2169                 return 1;
2170         } else
2171                 return 0;
2172 }
2173
2174 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2175                             unsigned long end, struct page *ref_page)
2176 {
2177         struct mm_struct *mm = vma->vm_mm;
2178         unsigned long address;
2179         pte_t *ptep;
2180         pte_t pte;
2181         struct page *page;
2182         struct page *tmp;
2183         struct hstate *h = hstate_vma(vma);
2184         unsigned long sz = huge_page_size(h);
2185
2186         /*
2187          * A page gathering list, protected by per file i_mmap_lock. The
2188          * lock is used to avoid list corruption from multiple unmapping
2189          * of the same page since we are using page->lru.
2190          */
2191         LIST_HEAD(page_list);
2192
2193         WARN_ON(!is_vm_hugetlb_page(vma));
2194         BUG_ON(start & ~huge_page_mask(h));
2195         BUG_ON(end & ~huge_page_mask(h));
2196
2197         mmu_notifier_invalidate_range_start(mm, start, end);
2198         spin_lock(&mm->page_table_lock);
2199         for (address = start; address < end; address += sz) {
2200                 ptep = huge_pte_offset(mm, address);
2201                 if (!ptep)
2202                         continue;
2203
2204                 if (huge_pmd_unshare(mm, &address, ptep))
2205                         continue;
2206
2207                 /*
2208                  * If a reference page is supplied, it is because a specific
2209                  * page is being unmapped, not a range. Ensure the page we
2210                  * are about to unmap is the actual page of interest.
2211                  */
2212                 if (ref_page) {
2213                         pte = huge_ptep_get(ptep);
2214                         if (huge_pte_none(pte))
2215                                 continue;
2216                         page = pte_page(pte);
2217                         if (page != ref_page)
2218                                 continue;
2219
2220                         /*
2221                          * Mark the VMA as having unmapped its page so that
2222                          * future faults in this VMA will fail rather than
2223                          * looking like data was lost
2224                          */
2225                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2226                 }
2227
2228                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2229                 if (huge_pte_none(pte))
2230                         continue;
2231
2232                 /*
2233                  * HWPoisoned hugepage is already unmapped and dropped reference
2234                  */
2235                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2236                         continue;
2237
2238                 page = pte_page(pte);
2239                 if (pte_dirty(pte))
2240                         set_page_dirty(page);
2241                 list_add(&page->lru, &page_list);
2242         }
2243         spin_unlock(&mm->page_table_lock);
2244         flush_tlb_range(vma, start, end);
2245         mmu_notifier_invalidate_range_end(mm, start, end);
2246         list_for_each_entry_safe(page, tmp, &page_list, lru) {
2247                 page_remove_rmap(page);
2248                 list_del(&page->lru);
2249                 put_page(page);
2250         }
2251 }
2252
2253 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2254                           unsigned long end, struct page *ref_page)
2255 {
2256         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2257         __unmap_hugepage_range(vma, start, end, ref_page);
2258         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2259 }
2260
2261 /*
2262  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2263  * mappping it owns the reserve page for. The intention is to unmap the page
2264  * from other VMAs and let the children be SIGKILLed if they are faulting the
2265  * same region.
2266  */
2267 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2268                                 struct page *page, unsigned long address)
2269 {
2270         struct hstate *h = hstate_vma(vma);
2271         struct vm_area_struct *iter_vma;
2272         struct address_space *mapping;
2273         struct prio_tree_iter iter;
2274         pgoff_t pgoff;
2275
2276         /*
2277          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2278          * from page cache lookup which is in HPAGE_SIZE units.
2279          */
2280         address = address & huge_page_mask(h);
2281         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2282                 + (vma->vm_pgoff >> PAGE_SHIFT);
2283         mapping = (struct address_space *)page_private(page);
2284
2285         /*
2286          * Take the mapping lock for the duration of the table walk. As
2287          * this mapping should be shared between all the VMAs,
2288          * __unmap_hugepage_range() is called as the lock is already held
2289          */
2290         spin_lock(&mapping->i_mmap_lock);
2291         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2292                 /* Do not unmap the current VMA */
2293                 if (iter_vma == vma)
2294                         continue;
2295
2296                 /*
2297                  * Unmap the page from other VMAs without their own reserves.
2298                  * They get marked to be SIGKILLed if they fault in these
2299                  * areas. This is because a future no-page fault on this VMA
2300                  * could insert a zeroed page instead of the data existing
2301                  * from the time of fork. This would look like data corruption
2302                  */
2303                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2304                         __unmap_hugepage_range(iter_vma,
2305                                 address, address + huge_page_size(h),
2306                                 page);
2307         }
2308         spin_unlock(&mapping->i_mmap_lock);
2309
2310         return 1;
2311 }
2312
2313 /*
2314  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2315  */
2316 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2317                         unsigned long address, pte_t *ptep, pte_t pte,
2318                         struct page *pagecache_page)
2319 {
2320         struct hstate *h = hstate_vma(vma);
2321         struct page *old_page, *new_page;
2322         int avoidcopy;
2323         int outside_reserve = 0;
2324
2325         old_page = pte_page(pte);
2326
2327 retry_avoidcopy:
2328         /* If no-one else is actually using this page, avoid the copy
2329          * and just make the page writable */
2330         avoidcopy = (page_mapcount(old_page) == 1);
2331         if (avoidcopy) {
2332                 if (PageAnon(old_page))
2333                         page_move_anon_rmap(old_page, vma, address);
2334                 set_huge_ptep_writable(vma, address, ptep);
2335                 return 0;
2336         }
2337
2338         /*
2339          * If the process that created a MAP_PRIVATE mapping is about to
2340          * perform a COW due to a shared page count, attempt to satisfy
2341          * the allocation without using the existing reserves. The pagecache
2342          * page is used to determine if the reserve at this address was
2343          * consumed or not. If reserves were used, a partial faulted mapping
2344          * at the time of fork() could consume its reserves on COW instead
2345          * of the full address range.
2346          */
2347         if (!(vma->vm_flags & VM_MAYSHARE) &&
2348                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2349                         old_page != pagecache_page)
2350                 outside_reserve = 1;
2351
2352         page_cache_get(old_page);
2353
2354         /* Drop page_table_lock as buddy allocator may be called */
2355         spin_unlock(&mm->page_table_lock);
2356         new_page = alloc_huge_page(vma, address, outside_reserve);
2357
2358         if (IS_ERR(new_page)) {
2359                 page_cache_release(old_page);
2360
2361                 /*
2362                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2363                  * it is due to references held by a child and an insufficient
2364                  * huge page pool. To guarantee the original mappers
2365                  * reliability, unmap the page from child processes. The child
2366                  * may get SIGKILLed if it later faults.
2367                  */
2368                 if (outside_reserve) {
2369                         BUG_ON(huge_pte_none(pte));
2370                         if (unmap_ref_private(mm, vma, old_page, address)) {
2371                                 BUG_ON(page_count(old_page) != 1);
2372                                 BUG_ON(huge_pte_none(pte));
2373                                 spin_lock(&mm->page_table_lock);
2374                                 goto retry_avoidcopy;
2375                         }
2376                         WARN_ON_ONCE(1);
2377                 }
2378
2379                 /* Caller expects lock to be held */
2380                 spin_lock(&mm->page_table_lock);
2381                 return -PTR_ERR(new_page);
2382         }
2383
2384         /*
2385          * When the original hugepage is shared one, it does not have
2386          * anon_vma prepared.
2387          */
2388         if (unlikely(anon_vma_prepare(vma))) {
2389                 /* Caller expects lock to be held */
2390                 spin_lock(&mm->page_table_lock);
2391                 return VM_FAULT_OOM;
2392         }
2393
2394         copy_user_huge_page(new_page, old_page, address, vma,
2395                             pages_per_huge_page(h));
2396         __SetPageUptodate(new_page);
2397
2398         /*
2399          * Retake the page_table_lock to check for racing updates
2400          * before the page tables are altered
2401          */
2402         spin_lock(&mm->page_table_lock);
2403         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2404         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2405                 /* Break COW */
2406                 mmu_notifier_invalidate_range_start(mm,
2407                         address & huge_page_mask(h),
2408                         (address & huge_page_mask(h)) + huge_page_size(h));
2409                 huge_ptep_clear_flush(vma, address, ptep);
2410                 set_huge_pte_at(mm, address, ptep,
2411                                 make_huge_pte(vma, new_page, 1));
2412                 page_remove_rmap(old_page);
2413                 hugepage_add_new_anon_rmap(new_page, vma, address);
2414                 /* Make the old page be freed below */
2415                 new_page = old_page;
2416                 mmu_notifier_invalidate_range_end(mm,
2417                         address & huge_page_mask(h),
2418                         (address & huge_page_mask(h)) + huge_page_size(h));
2419         }
2420         page_cache_release(new_page);
2421         page_cache_release(old_page);
2422         return 0;
2423 }
2424
2425 /* Return the pagecache page at a given address within a VMA */
2426 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2427                         struct vm_area_struct *vma, unsigned long address)
2428 {
2429         struct address_space *mapping;
2430         pgoff_t idx;
2431
2432         mapping = vma->vm_file->f_mapping;
2433         idx = vma_hugecache_offset(h, vma, address);
2434
2435         return find_lock_page(mapping, idx);
2436 }
2437
2438 /*
2439  * Return whether there is a pagecache page to back given address within VMA.
2440  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2441  */
2442 static bool hugetlbfs_pagecache_present(struct hstate *h,
2443                         struct vm_area_struct *vma, unsigned long address)
2444 {
2445         struct address_space *mapping;
2446         pgoff_t idx;
2447         struct page *page;
2448
2449         mapping = vma->vm_file->f_mapping;
2450         idx = vma_hugecache_offset(h, vma, address);
2451
2452         page = find_get_page(mapping, idx);
2453         if (page)
2454                 put_page(page);
2455         return page != NULL;
2456 }
2457
2458 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2459                         unsigned long address, pte_t *ptep, unsigned int flags)
2460 {
2461         struct hstate *h = hstate_vma(vma);
2462         int ret = VM_FAULT_SIGBUS;
2463         pgoff_t idx;
2464         unsigned long size;
2465         struct page *page;
2466         struct address_space *mapping;
2467         pte_t new_pte;
2468
2469         /*
2470          * Currently, we are forced to kill the process in the event the
2471          * original mapper has unmapped pages from the child due to a failed
2472          * COW. Warn that such a situation has occured as it may not be obvious
2473          */
2474         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2475                 printk(KERN_WARNING
2476                         "PID %d killed due to inadequate hugepage pool\n",
2477                         current->pid);
2478                 return ret;
2479         }
2480
2481         mapping = vma->vm_file->f_mapping;
2482         idx = vma_hugecache_offset(h, vma, address);
2483
2484         /*
2485          * Use page lock to guard against racing truncation
2486          * before we get page_table_lock.
2487          */
2488 retry:
2489         page = find_lock_page(mapping, idx);
2490         if (!page) {
2491                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2492                 if (idx >= size)
2493                         goto out;
2494                 page = alloc_huge_page(vma, address, 0);
2495                 if (IS_ERR(page)) {
2496                         ret = -PTR_ERR(page);
2497                         goto out;
2498                 }
2499                 clear_huge_page(page, address, pages_per_huge_page(h));
2500                 __SetPageUptodate(page);
2501
2502                 if (vma->vm_flags & VM_MAYSHARE) {
2503                         int err;
2504                         struct inode *inode = mapping->host;
2505
2506                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2507                         if (err) {
2508                                 put_page(page);
2509                                 if (err == -EEXIST)
2510                                         goto retry;
2511                                 goto out;
2512                         }
2513
2514                         spin_lock(&inode->i_lock);
2515                         inode->i_blocks += blocks_per_huge_page(h);
2516                         spin_unlock(&inode->i_lock);
2517                         page_dup_rmap(page);
2518                 } else {
2519                         lock_page(page);
2520                         if (unlikely(anon_vma_prepare(vma))) {
2521                                 ret = VM_FAULT_OOM;
2522                                 goto backout_unlocked;
2523                         }
2524                         hugepage_add_new_anon_rmap(page, vma, address);
2525                 }
2526         } else {
2527                 /*
2528                  * If memory error occurs between mmap() and fault, some process
2529                  * don't have hwpoisoned swap entry for errored virtual address.
2530                  * So we need to block hugepage fault by PG_hwpoison bit check.
2531                  */
2532                 if (unlikely(PageHWPoison(page))) {
2533                         ret = VM_FAULT_HWPOISON | 
2534                               VM_FAULT_SET_HINDEX(h - hstates);
2535                         goto backout_unlocked;
2536                 }
2537                 page_dup_rmap(page);
2538         }
2539
2540         /*
2541          * If we are going to COW a private mapping later, we examine the
2542          * pending reservations for this page now. This will ensure that
2543          * any allocations necessary to record that reservation occur outside
2544          * the spinlock.
2545          */
2546         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2547                 if (vma_needs_reservation(h, vma, address) < 0) {
2548                         ret = VM_FAULT_OOM;
2549                         goto backout_unlocked;
2550                 }
2551
2552         spin_lock(&mm->page_table_lock);
2553         size = i_size_read(mapping->host) >> huge_page_shift(h);
2554         if (idx >= size)
2555                 goto backout;
2556
2557         ret = 0;
2558         if (!huge_pte_none(huge_ptep_get(ptep)))
2559                 goto backout;
2560
2561         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2562                                 && (vma->vm_flags & VM_SHARED)));
2563         set_huge_pte_at(mm, address, ptep, new_pte);
2564
2565         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2566                 /* Optimization, do the COW without a second fault */
2567                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2568         }
2569
2570         spin_unlock(&mm->page_table_lock);
2571         unlock_page(page);
2572 out:
2573         return ret;
2574
2575 backout:
2576         spin_unlock(&mm->page_table_lock);
2577 backout_unlocked:
2578         unlock_page(page);
2579         put_page(page);
2580         goto out;
2581 }
2582
2583 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2584                         unsigned long address, unsigned int flags)
2585 {
2586         pte_t *ptep;
2587         pte_t entry;
2588         int ret;
2589         struct page *page = NULL;
2590         struct page *pagecache_page = NULL;
2591         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2592         struct hstate *h = hstate_vma(vma);
2593
2594         ptep = huge_pte_offset(mm, address);
2595         if (ptep) {
2596                 entry = huge_ptep_get(ptep);
2597                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2598                         migration_entry_wait(mm, (pmd_t *)ptep, address);
2599                         return 0;
2600                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2601                         return VM_FAULT_HWPOISON_LARGE | 
2602                                VM_FAULT_SET_HINDEX(h - hstates);
2603         }
2604
2605         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2606         if (!ptep)
2607                 return VM_FAULT_OOM;
2608
2609         /*
2610          * Serialize hugepage allocation and instantiation, so that we don't
2611          * get spurious allocation failures if two CPUs race to instantiate
2612          * the same page in the page cache.
2613          */
2614         mutex_lock(&hugetlb_instantiation_mutex);
2615         entry = huge_ptep_get(ptep);
2616         if (huge_pte_none(entry)) {
2617                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2618                 goto out_mutex;
2619         }
2620
2621         ret = 0;
2622
2623         /*
2624          * If we are going to COW the mapping later, we examine the pending
2625          * reservations for this page now. This will ensure that any
2626          * allocations necessary to record that reservation occur outside the
2627          * spinlock. For private mappings, we also lookup the pagecache
2628          * page now as it is used to determine if a reservation has been
2629          * consumed.
2630          */
2631         if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2632                 if (vma_needs_reservation(h, vma, address) < 0) {
2633                         ret = VM_FAULT_OOM;
2634                         goto out_mutex;
2635                 }
2636
2637                 if (!(vma->vm_flags & VM_MAYSHARE))
2638                         pagecache_page = hugetlbfs_pagecache_page(h,
2639                                                                 vma, address);
2640         }
2641
2642         /*
2643          * hugetlb_cow() requires page locks of pte_page(entry) and
2644          * pagecache_page, so here we need take the former one
2645          * when page != pagecache_page or !pagecache_page.
2646          * Note that locking order is always pagecache_page -> page,
2647          * so no worry about deadlock.
2648          */
2649         page = pte_page(entry);
2650         if (page != pagecache_page)
2651                 lock_page(page);
2652
2653         spin_lock(&mm->page_table_lock);
2654         /* Check for a racing update before calling hugetlb_cow */
2655         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2656                 goto out_page_table_lock;
2657
2658
2659         if (flags & FAULT_FLAG_WRITE) {
2660                 if (!pte_write(entry)) {
2661                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2662                                                         pagecache_page);
2663                         goto out_page_table_lock;
2664                 }
2665                 entry = pte_mkdirty(entry);
2666         }
2667         entry = pte_mkyoung(entry);
2668         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2669                                                 flags & FAULT_FLAG_WRITE))
2670                 update_mmu_cache(vma, address, ptep);
2671
2672 out_page_table_lock:
2673         spin_unlock(&mm->page_table_lock);
2674
2675         if (pagecache_page) {
2676                 unlock_page(pagecache_page);
2677                 put_page(pagecache_page);
2678         }
2679         if (page != pagecache_page)
2680                 unlock_page(page);
2681
2682 out_mutex:
2683         mutex_unlock(&hugetlb_instantiation_mutex);
2684
2685         return ret;
2686 }
2687
2688 /* Can be overriden by architectures */
2689 __attribute__((weak)) struct page *
2690 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2691                pud_t *pud, int write)
2692 {
2693         BUG();
2694         return NULL;
2695 }
2696
2697 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2698                         struct page **pages, struct vm_area_struct **vmas,
2699                         unsigned long *position, int *length, int i,
2700                         unsigned int flags)
2701 {
2702         unsigned long pfn_offset;
2703         unsigned long vaddr = *position;
2704         int remainder = *length;
2705         struct hstate *h = hstate_vma(vma);
2706
2707         spin_lock(&mm->page_table_lock);
2708         while (vaddr < vma->vm_end && remainder) {
2709                 pte_t *pte;
2710                 int absent;
2711                 struct page *page;
2712
2713                 /*
2714                  * Some archs (sparc64, sh*) have multiple pte_ts to
2715                  * each hugepage.  We have to make sure we get the
2716                  * first, for the page indexing below to work.
2717                  */
2718                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2719                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2720
2721                 /*
2722                  * When coredumping, it suits get_dump_page if we just return
2723                  * an error where there's an empty slot with no huge pagecache
2724                  * to back it.  This way, we avoid allocating a hugepage, and
2725                  * the sparse dumpfile avoids allocating disk blocks, but its
2726                  * huge holes still show up with zeroes where they need to be.
2727                  */
2728                 if (absent && (flags & FOLL_DUMP) &&
2729                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2730                         remainder = 0;
2731                         break;
2732                 }
2733
2734                 if (absent ||
2735                     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2736                         int ret;
2737
2738                         spin_unlock(&mm->page_table_lock);
2739                         ret = hugetlb_fault(mm, vma, vaddr,
2740                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2741                         spin_lock(&mm->page_table_lock);
2742                         if (!(ret & VM_FAULT_ERROR))
2743                                 continue;
2744
2745                         remainder = 0;
2746                         break;
2747                 }
2748
2749                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2750                 page = pte_page(huge_ptep_get(pte));
2751 same_page:
2752                 if (pages) {
2753                         pages[i] = mem_map_offset(page, pfn_offset);
2754                         get_page(pages[i]);
2755                 }
2756
2757                 if (vmas)
2758                         vmas[i] = vma;
2759
2760                 vaddr += PAGE_SIZE;
2761                 ++pfn_offset;
2762                 --remainder;
2763                 ++i;
2764                 if (vaddr < vma->vm_end && remainder &&
2765                                 pfn_offset < pages_per_huge_page(h)) {
2766                         /*
2767                          * We use pfn_offset to avoid touching the pageframes
2768                          * of this compound page.
2769                          */
2770                         goto same_page;
2771                 }
2772         }
2773         spin_unlock(&mm->page_table_lock);
2774         *length = remainder;
2775         *position = vaddr;
2776
2777         return i ? i : -EFAULT;
2778 }
2779
2780 void hugetlb_change_protection(struct vm_area_struct *vma,
2781                 unsigned long address, unsigned long end, pgprot_t newprot)
2782 {
2783         struct mm_struct *mm = vma->vm_mm;
2784         unsigned long start = address;
2785         pte_t *ptep;
2786         pte_t pte;
2787         struct hstate *h = hstate_vma(vma);
2788
2789         BUG_ON(address >= end);
2790         flush_cache_range(vma, address, end);
2791
2792         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2793         spin_lock(&mm->page_table_lock);
2794         for (; address < end; address += huge_page_size(h)) {
2795                 ptep = huge_pte_offset(mm, address);
2796                 if (!ptep)
2797                         continue;
2798                 if (huge_pmd_unshare(mm, &address, ptep))
2799                         continue;
2800                 if (!huge_pte_none(huge_ptep_get(ptep))) {
2801                         pte = huge_ptep_get_and_clear(mm, address, ptep);
2802                         pte = pte_mkhuge(pte_modify(pte, newprot));
2803                         set_huge_pte_at(mm, address, ptep, pte);
2804                 }
2805         }
2806         spin_unlock(&mm->page_table_lock);
2807         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2808
2809         flush_tlb_range(vma, start, end);
2810 }
2811
2812 int hugetlb_reserve_pages(struct inode *inode,
2813                                         long from, long to,
2814                                         struct vm_area_struct *vma,
2815                                         int acctflag)
2816 {
2817         long ret, chg;
2818         struct hstate *h = hstate_inode(inode);
2819
2820         /*
2821          * Only apply hugepage reservation if asked. At fault time, an
2822          * attempt will be made for VM_NORESERVE to allocate a page
2823          * and filesystem quota without using reserves
2824          */
2825         if (acctflag & VM_NORESERVE)
2826                 return 0;
2827
2828         /*
2829          * Shared mappings base their reservation on the number of pages that
2830          * are already allocated on behalf of the file. Private mappings need
2831          * to reserve the full area even if read-only as mprotect() may be
2832          * called to make the mapping read-write. Assume !vma is a shm mapping
2833          */
2834         if (!vma || vma->vm_flags & VM_MAYSHARE)
2835                 chg = region_chg(&inode->i_mapping->private_list, from, to);
2836         else {
2837                 struct resv_map *resv_map = resv_map_alloc();
2838                 if (!resv_map)
2839                         return -ENOMEM;
2840
2841                 chg = to - from;
2842
2843                 set_vma_resv_map(vma, resv_map);
2844                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2845         }
2846
2847         if (chg < 0)
2848                 return chg;
2849
2850         /* There must be enough filesystem quota for the mapping */
2851         if (hugetlb_get_quota(inode->i_mapping, chg))
2852                 return -ENOSPC;
2853
2854         /*
2855          * Check enough hugepages are available for the reservation.
2856          * Hand back the quota if there are not
2857          */
2858         ret = hugetlb_acct_memory(h, chg);
2859         if (ret < 0) {
2860                 hugetlb_put_quota(inode->i_mapping, chg);
2861                 return ret;
2862         }
2863
2864         /*
2865          * Account for the reservations made. Shared mappings record regions
2866          * that have reservations as they are shared by multiple VMAs.
2867          * When the last VMA disappears, the region map says how much
2868          * the reservation was and the page cache tells how much of
2869          * the reservation was consumed. Private mappings are per-VMA and
2870          * only the consumed reservations are tracked. When the VMA
2871          * disappears, the original reservation is the VMA size and the
2872          * consumed reservations are stored in the map. Hence, nothing
2873          * else has to be done for private mappings here
2874          */
2875         if (!vma || vma->vm_flags & VM_MAYSHARE)
2876                 region_add(&inode->i_mapping->private_list, from, to);
2877         return 0;
2878 }
2879
2880 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2881 {
2882         struct hstate *h = hstate_inode(inode);
2883         long chg = region_truncate(&inode->i_mapping->private_list, offset);
2884
2885         spin_lock(&inode->i_lock);
2886         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2887         spin_unlock(&inode->i_lock);
2888
2889         hugetlb_put_quota(inode->i_mapping, (chg - freed));
2890         hugetlb_acct_memory(h, -(chg - freed));
2891 }
2892
2893 #ifdef CONFIG_MEMORY_FAILURE
2894
2895 /* Should be called in hugetlb_lock */
2896 static int is_hugepage_on_freelist(struct page *hpage)
2897 {
2898         struct page *page;
2899         struct page *tmp;
2900         struct hstate *h = page_hstate(hpage);
2901         int nid = page_to_nid(hpage);
2902
2903         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
2904                 if (page == hpage)
2905                         return 1;
2906         return 0;
2907 }
2908
2909 /*
2910  * This function is called from memory failure code.
2911  * Assume the caller holds page lock of the head page.
2912  */
2913 int dequeue_hwpoisoned_huge_page(struct page *hpage)
2914 {
2915         struct hstate *h = page_hstate(hpage);
2916         int nid = page_to_nid(hpage);
2917         int ret = -EBUSY;
2918
2919         spin_lock(&hugetlb_lock);
2920         if (is_hugepage_on_freelist(hpage)) {
2921                 list_del(&hpage->lru);
2922                 set_page_refcounted(hpage);
2923                 h->free_huge_pages--;
2924                 h->free_huge_pages_node[nid]--;
2925                 ret = 0;
2926         }
2927         spin_unlock(&hugetlb_lock);
2928         return ret;
2929 }
2930 #endif