mm/gup: Overload get_user_pages() functions
[cascardo/linux.git] / mm / nommu.c
1 /*
2  *  linux/mm/nommu.c
3  *
4  *  Replacement code for mm functions to support CPU's that don't
5  *  have any form of memory management unit (thus no virtual memory).
6  *
7  *  See Documentation/nommu-mmap.txt
8  *
9  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
10  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #define __DISABLE_GUP_DEPRECATED
19
20 #include <linux/export.h>
21 #include <linux/mm.h>
22 #include <linux/vmacache.h>
23 #include <linux/mman.h>
24 #include <linux/swap.h>
25 #include <linux/file.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/blkdev.h>
31 #include <linux/backing-dev.h>
32 #include <linux/compiler.h>
33 #include <linux/mount.h>
34 #include <linux/personality.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/printk.h>
40
41 #include <asm/uaccess.h>
42 #include <asm/tlb.h>
43 #include <asm/tlbflush.h>
44 #include <asm/mmu_context.h>
45 #include "internal.h"
46
47 void *high_memory;
48 EXPORT_SYMBOL(high_memory);
49 struct page *mem_map;
50 unsigned long max_mapnr;
51 EXPORT_SYMBOL(max_mapnr);
52 unsigned long highest_memmap_pfn;
53 struct percpu_counter vm_committed_as;
54 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
55 int sysctl_overcommit_ratio = 50; /* default is 50% */
56 unsigned long sysctl_overcommit_kbytes __read_mostly;
57 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
58 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
59 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
60 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
61 int heap_stack_gap = 0;
62
63 atomic_long_t mmap_pages_allocated;
64
65 /*
66  * The global memory commitment made in the system can be a metric
67  * that can be used to drive ballooning decisions when Linux is hosted
68  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
69  * balancing memory across competing virtual machines that are hosted.
70  * Several metrics drive this policy engine including the guest reported
71  * memory commitment.
72  */
73 unsigned long vm_memory_committed(void)
74 {
75         return percpu_counter_read_positive(&vm_committed_as);
76 }
77
78 EXPORT_SYMBOL_GPL(vm_memory_committed);
79
80 EXPORT_SYMBOL(mem_map);
81
82 /* list of mapped, potentially shareable regions */
83 static struct kmem_cache *vm_region_jar;
84 struct rb_root nommu_region_tree = RB_ROOT;
85 DECLARE_RWSEM(nommu_region_sem);
86
87 const struct vm_operations_struct generic_file_vm_ops = {
88 };
89
90 /*
91  * Return the total memory allocated for this pointer, not
92  * just what the caller asked for.
93  *
94  * Doesn't have to be accurate, i.e. may have races.
95  */
96 unsigned int kobjsize(const void *objp)
97 {
98         struct page *page;
99
100         /*
101          * If the object we have should not have ksize performed on it,
102          * return size of 0
103          */
104         if (!objp || !virt_addr_valid(objp))
105                 return 0;
106
107         page = virt_to_head_page(objp);
108
109         /*
110          * If the allocator sets PageSlab, we know the pointer came from
111          * kmalloc().
112          */
113         if (PageSlab(page))
114                 return ksize(objp);
115
116         /*
117          * If it's not a compound page, see if we have a matching VMA
118          * region. This test is intentionally done in reverse order,
119          * so if there's no VMA, we still fall through and hand back
120          * PAGE_SIZE for 0-order pages.
121          */
122         if (!PageCompound(page)) {
123                 struct vm_area_struct *vma;
124
125                 vma = find_vma(current->mm, (unsigned long)objp);
126                 if (vma)
127                         return vma->vm_end - vma->vm_start;
128         }
129
130         /*
131          * The ksize() function is only guaranteed to work for pointers
132          * returned by kmalloc(). So handle arbitrary pointers here.
133          */
134         return PAGE_SIZE << compound_order(page);
135 }
136
137 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
138                       unsigned long start, unsigned long nr_pages,
139                       unsigned int foll_flags, struct page **pages,
140                       struct vm_area_struct **vmas, int *nonblocking)
141 {
142         struct vm_area_struct *vma;
143         unsigned long vm_flags;
144         int i;
145
146         /* calculate required read or write permissions.
147          * If FOLL_FORCE is set, we only require the "MAY" flags.
148          */
149         vm_flags  = (foll_flags & FOLL_WRITE) ?
150                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
151         vm_flags &= (foll_flags & FOLL_FORCE) ?
152                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
153
154         for (i = 0; i < nr_pages; i++) {
155                 vma = find_vma(mm, start);
156                 if (!vma)
157                         goto finish_or_fault;
158
159                 /* protect what we can, including chardevs */
160                 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
161                     !(vm_flags & vma->vm_flags))
162                         goto finish_or_fault;
163
164                 if (pages) {
165                         pages[i] = virt_to_page(start);
166                         if (pages[i])
167                                 page_cache_get(pages[i]);
168                 }
169                 if (vmas)
170                         vmas[i] = vma;
171                 start = (start + PAGE_SIZE) & PAGE_MASK;
172         }
173
174         return i;
175
176 finish_or_fault:
177         return i ? : -EFAULT;
178 }
179
180 /*
181  * get a list of pages in an address range belonging to the specified process
182  * and indicate the VMA that covers each page
183  * - this is potentially dodgy as we may end incrementing the page count of a
184  *   slab page or a secondary page from a compound page
185  * - don't permit access to VMAs that don't support it, such as I/O mappings
186  */
187 long get_user_pages6(unsigned long start, unsigned long nr_pages,
188                     int write, int force, struct page **pages,
189                     struct vm_area_struct **vmas)
190 {
191         int flags = 0;
192
193         if (write)
194                 flags |= FOLL_WRITE;
195         if (force)
196                 flags |= FOLL_FORCE;
197
198         return __get_user_pages(current, current->mm, start, nr_pages, flags,
199                                 pages, vmas, NULL);
200 }
201 EXPORT_SYMBOL(get_user_pages6);
202
203 long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
204                             int write, int force, struct page **pages,
205                             int *locked)
206 {
207         return get_user_pages6(start, nr_pages, write, force, pages, NULL);
208 }
209 EXPORT_SYMBOL(get_user_pages_locked6);
210
211 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
212                                unsigned long start, unsigned long nr_pages,
213                                int write, int force, struct page **pages,
214                                unsigned int gup_flags)
215 {
216         long ret;
217         down_read(&mm->mmap_sem);
218         ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
219                                 NULL, NULL);
220         up_read(&mm->mmap_sem);
221         return ret;
222 }
223 EXPORT_SYMBOL(__get_user_pages_unlocked);
224
225 long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
226                              int write, int force, struct page **pages)
227 {
228         return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
229                                          write, force, pages, 0);
230 }
231 EXPORT_SYMBOL(get_user_pages_unlocked5);
232
233 /**
234  * follow_pfn - look up PFN at a user virtual address
235  * @vma: memory mapping
236  * @address: user virtual address
237  * @pfn: location to store found PFN
238  *
239  * Only IO mappings and raw PFN mappings are allowed.
240  *
241  * Returns zero and the pfn at @pfn on success, -ve otherwise.
242  */
243 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
244         unsigned long *pfn)
245 {
246         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
247                 return -EINVAL;
248
249         *pfn = address >> PAGE_SHIFT;
250         return 0;
251 }
252 EXPORT_SYMBOL(follow_pfn);
253
254 LIST_HEAD(vmap_area_list);
255
256 void vfree(const void *addr)
257 {
258         kfree(addr);
259 }
260 EXPORT_SYMBOL(vfree);
261
262 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
263 {
264         /*
265          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
266          * returns only a logical address.
267          */
268         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
269 }
270 EXPORT_SYMBOL(__vmalloc);
271
272 void *vmalloc_user(unsigned long size)
273 {
274         void *ret;
275
276         ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
277                         PAGE_KERNEL);
278         if (ret) {
279                 struct vm_area_struct *vma;
280
281                 down_write(&current->mm->mmap_sem);
282                 vma = find_vma(current->mm, (unsigned long)ret);
283                 if (vma)
284                         vma->vm_flags |= VM_USERMAP;
285                 up_write(&current->mm->mmap_sem);
286         }
287
288         return ret;
289 }
290 EXPORT_SYMBOL(vmalloc_user);
291
292 struct page *vmalloc_to_page(const void *addr)
293 {
294         return virt_to_page(addr);
295 }
296 EXPORT_SYMBOL(vmalloc_to_page);
297
298 unsigned long vmalloc_to_pfn(const void *addr)
299 {
300         return page_to_pfn(virt_to_page(addr));
301 }
302 EXPORT_SYMBOL(vmalloc_to_pfn);
303
304 long vread(char *buf, char *addr, unsigned long count)
305 {
306         /* Don't allow overflow */
307         if ((unsigned long) buf + count < count)
308                 count = -(unsigned long) buf;
309
310         memcpy(buf, addr, count);
311         return count;
312 }
313
314 long vwrite(char *buf, char *addr, unsigned long count)
315 {
316         /* Don't allow overflow */
317         if ((unsigned long) addr + count < count)
318                 count = -(unsigned long) addr;
319
320         memcpy(addr, buf, count);
321         return count;
322 }
323
324 /*
325  *      vmalloc  -  allocate virtually contiguous memory
326  *
327  *      @size:          allocation size
328  *
329  *      Allocate enough pages to cover @size from the page level
330  *      allocator and map them into contiguous kernel virtual space.
331  *
332  *      For tight control over page level allocator and protection flags
333  *      use __vmalloc() instead.
334  */
335 void *vmalloc(unsigned long size)
336 {
337        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
338 }
339 EXPORT_SYMBOL(vmalloc);
340
341 /*
342  *      vzalloc - allocate virtually contiguous memory with zero fill
343  *
344  *      @size:          allocation size
345  *
346  *      Allocate enough pages to cover @size from the page level
347  *      allocator and map them into contiguous kernel virtual space.
348  *      The memory allocated is set to zero.
349  *
350  *      For tight control over page level allocator and protection flags
351  *      use __vmalloc() instead.
352  */
353 void *vzalloc(unsigned long size)
354 {
355         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
356                         PAGE_KERNEL);
357 }
358 EXPORT_SYMBOL(vzalloc);
359
360 /**
361  * vmalloc_node - allocate memory on a specific node
362  * @size:       allocation size
363  * @node:       numa node
364  *
365  * Allocate enough pages to cover @size from the page level
366  * allocator and map them into contiguous kernel virtual space.
367  *
368  * For tight control over page level allocator and protection flags
369  * use __vmalloc() instead.
370  */
371 void *vmalloc_node(unsigned long size, int node)
372 {
373         return vmalloc(size);
374 }
375 EXPORT_SYMBOL(vmalloc_node);
376
377 /**
378  * vzalloc_node - allocate memory on a specific node with zero fill
379  * @size:       allocation size
380  * @node:       numa node
381  *
382  * Allocate enough pages to cover @size from the page level
383  * allocator and map them into contiguous kernel virtual space.
384  * The memory allocated is set to zero.
385  *
386  * For tight control over page level allocator and protection flags
387  * use __vmalloc() instead.
388  */
389 void *vzalloc_node(unsigned long size, int node)
390 {
391         return vzalloc(size);
392 }
393 EXPORT_SYMBOL(vzalloc_node);
394
395 #ifndef PAGE_KERNEL_EXEC
396 # define PAGE_KERNEL_EXEC PAGE_KERNEL
397 #endif
398
399 /**
400  *      vmalloc_exec  -  allocate virtually contiguous, executable memory
401  *      @size:          allocation size
402  *
403  *      Kernel-internal function to allocate enough pages to cover @size
404  *      the page level allocator and map them into contiguous and
405  *      executable kernel virtual space.
406  *
407  *      For tight control over page level allocator and protection flags
408  *      use __vmalloc() instead.
409  */
410
411 void *vmalloc_exec(unsigned long size)
412 {
413         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
414 }
415
416 /**
417  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
418  *      @size:          allocation size
419  *
420  *      Allocate enough 32bit PA addressable pages to cover @size from the
421  *      page level allocator and map them into contiguous kernel virtual space.
422  */
423 void *vmalloc_32(unsigned long size)
424 {
425         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
426 }
427 EXPORT_SYMBOL(vmalloc_32);
428
429 /**
430  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
431  *      @size:          allocation size
432  *
433  * The resulting memory area is 32bit addressable and zeroed so it can be
434  * mapped to userspace without leaking data.
435  *
436  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
437  * remap_vmalloc_range() are permissible.
438  */
439 void *vmalloc_32_user(unsigned long size)
440 {
441         /*
442          * We'll have to sort out the ZONE_DMA bits for 64-bit,
443          * but for now this can simply use vmalloc_user() directly.
444          */
445         return vmalloc_user(size);
446 }
447 EXPORT_SYMBOL(vmalloc_32_user);
448
449 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
450 {
451         BUG();
452         return NULL;
453 }
454 EXPORT_SYMBOL(vmap);
455
456 void vunmap(const void *addr)
457 {
458         BUG();
459 }
460 EXPORT_SYMBOL(vunmap);
461
462 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
463 {
464         BUG();
465         return NULL;
466 }
467 EXPORT_SYMBOL(vm_map_ram);
468
469 void vm_unmap_ram(const void *mem, unsigned int count)
470 {
471         BUG();
472 }
473 EXPORT_SYMBOL(vm_unmap_ram);
474
475 void vm_unmap_aliases(void)
476 {
477 }
478 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
479
480 /*
481  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
482  * have one.
483  */
484 void __weak vmalloc_sync_all(void)
485 {
486 }
487
488 /**
489  *      alloc_vm_area - allocate a range of kernel address space
490  *      @size:          size of the area
491  *
492  *      Returns:        NULL on failure, vm_struct on success
493  *
494  *      This function reserves a range of kernel address space, and
495  *      allocates pagetables to map that range.  No actual mappings
496  *      are created.  If the kernel address space is not shared
497  *      between processes, it syncs the pagetable across all
498  *      processes.
499  */
500 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
501 {
502         BUG();
503         return NULL;
504 }
505 EXPORT_SYMBOL_GPL(alloc_vm_area);
506
507 void free_vm_area(struct vm_struct *area)
508 {
509         BUG();
510 }
511 EXPORT_SYMBOL_GPL(free_vm_area);
512
513 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
514                    struct page *page)
515 {
516         return -EINVAL;
517 }
518 EXPORT_SYMBOL(vm_insert_page);
519
520 /*
521  *  sys_brk() for the most part doesn't need the global kernel
522  *  lock, except when an application is doing something nasty
523  *  like trying to un-brk an area that has already been mapped
524  *  to a regular file.  in this case, the unmapping will need
525  *  to invoke file system routines that need the global lock.
526  */
527 SYSCALL_DEFINE1(brk, unsigned long, brk)
528 {
529         struct mm_struct *mm = current->mm;
530
531         if (brk < mm->start_brk || brk > mm->context.end_brk)
532                 return mm->brk;
533
534         if (mm->brk == brk)
535                 return mm->brk;
536
537         /*
538          * Always allow shrinking brk
539          */
540         if (brk <= mm->brk) {
541                 mm->brk = brk;
542                 return brk;
543         }
544
545         /*
546          * Ok, looks good - let it rip.
547          */
548         flush_icache_range(mm->brk, brk);
549         return mm->brk = brk;
550 }
551
552 /*
553  * initialise the VMA and region record slabs
554  */
555 void __init mmap_init(void)
556 {
557         int ret;
558
559         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
560         VM_BUG_ON(ret);
561         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
562 }
563
564 /*
565  * validate the region tree
566  * - the caller must hold the region lock
567  */
568 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
569 static noinline void validate_nommu_regions(void)
570 {
571         struct vm_region *region, *last;
572         struct rb_node *p, *lastp;
573
574         lastp = rb_first(&nommu_region_tree);
575         if (!lastp)
576                 return;
577
578         last = rb_entry(lastp, struct vm_region, vm_rb);
579         BUG_ON(last->vm_end <= last->vm_start);
580         BUG_ON(last->vm_top < last->vm_end);
581
582         while ((p = rb_next(lastp))) {
583                 region = rb_entry(p, struct vm_region, vm_rb);
584                 last = rb_entry(lastp, struct vm_region, vm_rb);
585
586                 BUG_ON(region->vm_end <= region->vm_start);
587                 BUG_ON(region->vm_top < region->vm_end);
588                 BUG_ON(region->vm_start < last->vm_top);
589
590                 lastp = p;
591         }
592 }
593 #else
594 static void validate_nommu_regions(void)
595 {
596 }
597 #endif
598
599 /*
600  * add a region into the global tree
601  */
602 static void add_nommu_region(struct vm_region *region)
603 {
604         struct vm_region *pregion;
605         struct rb_node **p, *parent;
606
607         validate_nommu_regions();
608
609         parent = NULL;
610         p = &nommu_region_tree.rb_node;
611         while (*p) {
612                 parent = *p;
613                 pregion = rb_entry(parent, struct vm_region, vm_rb);
614                 if (region->vm_start < pregion->vm_start)
615                         p = &(*p)->rb_left;
616                 else if (region->vm_start > pregion->vm_start)
617                         p = &(*p)->rb_right;
618                 else if (pregion == region)
619                         return;
620                 else
621                         BUG();
622         }
623
624         rb_link_node(&region->vm_rb, parent, p);
625         rb_insert_color(&region->vm_rb, &nommu_region_tree);
626
627         validate_nommu_regions();
628 }
629
630 /*
631  * delete a region from the global tree
632  */
633 static void delete_nommu_region(struct vm_region *region)
634 {
635         BUG_ON(!nommu_region_tree.rb_node);
636
637         validate_nommu_regions();
638         rb_erase(&region->vm_rb, &nommu_region_tree);
639         validate_nommu_regions();
640 }
641
642 /*
643  * free a contiguous series of pages
644  */
645 static void free_page_series(unsigned long from, unsigned long to)
646 {
647         for (; from < to; from += PAGE_SIZE) {
648                 struct page *page = virt_to_page(from);
649
650                 atomic_long_dec(&mmap_pages_allocated);
651                 put_page(page);
652         }
653 }
654
655 /*
656  * release a reference to a region
657  * - the caller must hold the region semaphore for writing, which this releases
658  * - the region may not have been added to the tree yet, in which case vm_top
659  *   will equal vm_start
660  */
661 static void __put_nommu_region(struct vm_region *region)
662         __releases(nommu_region_sem)
663 {
664         BUG_ON(!nommu_region_tree.rb_node);
665
666         if (--region->vm_usage == 0) {
667                 if (region->vm_top > region->vm_start)
668                         delete_nommu_region(region);
669                 up_write(&nommu_region_sem);
670
671                 if (region->vm_file)
672                         fput(region->vm_file);
673
674                 /* IO memory and memory shared directly out of the pagecache
675                  * from ramfs/tmpfs mustn't be released here */
676                 if (region->vm_flags & VM_MAPPED_COPY)
677                         free_page_series(region->vm_start, region->vm_top);
678                 kmem_cache_free(vm_region_jar, region);
679         } else {
680                 up_write(&nommu_region_sem);
681         }
682 }
683
684 /*
685  * release a reference to a region
686  */
687 static void put_nommu_region(struct vm_region *region)
688 {
689         down_write(&nommu_region_sem);
690         __put_nommu_region(region);
691 }
692
693 /*
694  * update protection on a vma
695  */
696 static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
697 {
698 #ifdef CONFIG_MPU
699         struct mm_struct *mm = vma->vm_mm;
700         long start = vma->vm_start & PAGE_MASK;
701         while (start < vma->vm_end) {
702                 protect_page(mm, start, flags);
703                 start += PAGE_SIZE;
704         }
705         update_protections(mm);
706 #endif
707 }
708
709 /*
710  * add a VMA into a process's mm_struct in the appropriate place in the list
711  * and tree and add to the address space's page tree also if not an anonymous
712  * page
713  * - should be called with mm->mmap_sem held writelocked
714  */
715 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
716 {
717         struct vm_area_struct *pvma, *prev;
718         struct address_space *mapping;
719         struct rb_node **p, *parent, *rb_prev;
720
721         BUG_ON(!vma->vm_region);
722
723         mm->map_count++;
724         vma->vm_mm = mm;
725
726         protect_vma(vma, vma->vm_flags);
727
728         /* add the VMA to the mapping */
729         if (vma->vm_file) {
730                 mapping = vma->vm_file->f_mapping;
731
732                 i_mmap_lock_write(mapping);
733                 flush_dcache_mmap_lock(mapping);
734                 vma_interval_tree_insert(vma, &mapping->i_mmap);
735                 flush_dcache_mmap_unlock(mapping);
736                 i_mmap_unlock_write(mapping);
737         }
738
739         /* add the VMA to the tree */
740         parent = rb_prev = NULL;
741         p = &mm->mm_rb.rb_node;
742         while (*p) {
743                 parent = *p;
744                 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
745
746                 /* sort by: start addr, end addr, VMA struct addr in that order
747                  * (the latter is necessary as we may get identical VMAs) */
748                 if (vma->vm_start < pvma->vm_start)
749                         p = &(*p)->rb_left;
750                 else if (vma->vm_start > pvma->vm_start) {
751                         rb_prev = parent;
752                         p = &(*p)->rb_right;
753                 } else if (vma->vm_end < pvma->vm_end)
754                         p = &(*p)->rb_left;
755                 else if (vma->vm_end > pvma->vm_end) {
756                         rb_prev = parent;
757                         p = &(*p)->rb_right;
758                 } else if (vma < pvma)
759                         p = &(*p)->rb_left;
760                 else if (vma > pvma) {
761                         rb_prev = parent;
762                         p = &(*p)->rb_right;
763                 } else
764                         BUG();
765         }
766
767         rb_link_node(&vma->vm_rb, parent, p);
768         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
769
770         /* add VMA to the VMA list also */
771         prev = NULL;
772         if (rb_prev)
773                 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
774
775         __vma_link_list(mm, vma, prev, parent);
776 }
777
778 /*
779  * delete a VMA from its owning mm_struct and address space
780  */
781 static void delete_vma_from_mm(struct vm_area_struct *vma)
782 {
783         int i;
784         struct address_space *mapping;
785         struct mm_struct *mm = vma->vm_mm;
786         struct task_struct *curr = current;
787
788         protect_vma(vma, 0);
789
790         mm->map_count--;
791         for (i = 0; i < VMACACHE_SIZE; i++) {
792                 /* if the vma is cached, invalidate the entire cache */
793                 if (curr->vmacache[i] == vma) {
794                         vmacache_invalidate(mm);
795                         break;
796                 }
797         }
798
799         /* remove the VMA from the mapping */
800         if (vma->vm_file) {
801                 mapping = vma->vm_file->f_mapping;
802
803                 i_mmap_lock_write(mapping);
804                 flush_dcache_mmap_lock(mapping);
805                 vma_interval_tree_remove(vma, &mapping->i_mmap);
806                 flush_dcache_mmap_unlock(mapping);
807                 i_mmap_unlock_write(mapping);
808         }
809
810         /* remove from the MM's tree and list */
811         rb_erase(&vma->vm_rb, &mm->mm_rb);
812
813         if (vma->vm_prev)
814                 vma->vm_prev->vm_next = vma->vm_next;
815         else
816                 mm->mmap = vma->vm_next;
817
818         if (vma->vm_next)
819                 vma->vm_next->vm_prev = vma->vm_prev;
820 }
821
822 /*
823  * destroy a VMA record
824  */
825 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
826 {
827         if (vma->vm_ops && vma->vm_ops->close)
828                 vma->vm_ops->close(vma);
829         if (vma->vm_file)
830                 fput(vma->vm_file);
831         put_nommu_region(vma->vm_region);
832         kmem_cache_free(vm_area_cachep, vma);
833 }
834
835 /*
836  * look up the first VMA in which addr resides, NULL if none
837  * - should be called with mm->mmap_sem at least held readlocked
838  */
839 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
840 {
841         struct vm_area_struct *vma;
842
843         /* check the cache first */
844         vma = vmacache_find(mm, addr);
845         if (likely(vma))
846                 return vma;
847
848         /* trawl the list (there may be multiple mappings in which addr
849          * resides) */
850         for (vma = mm->mmap; vma; vma = vma->vm_next) {
851                 if (vma->vm_start > addr)
852                         return NULL;
853                 if (vma->vm_end > addr) {
854                         vmacache_update(addr, vma);
855                         return vma;
856                 }
857         }
858
859         return NULL;
860 }
861 EXPORT_SYMBOL(find_vma);
862
863 /*
864  * find a VMA
865  * - we don't extend stack VMAs under NOMMU conditions
866  */
867 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
868 {
869         return find_vma(mm, addr);
870 }
871
872 /*
873  * expand a stack to a given address
874  * - not supported under NOMMU conditions
875  */
876 int expand_stack(struct vm_area_struct *vma, unsigned long address)
877 {
878         return -ENOMEM;
879 }
880
881 /*
882  * look up the first VMA exactly that exactly matches addr
883  * - should be called with mm->mmap_sem at least held readlocked
884  */
885 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
886                                              unsigned long addr,
887                                              unsigned long len)
888 {
889         struct vm_area_struct *vma;
890         unsigned long end = addr + len;
891
892         /* check the cache first */
893         vma = vmacache_find_exact(mm, addr, end);
894         if (vma)
895                 return vma;
896
897         /* trawl the list (there may be multiple mappings in which addr
898          * resides) */
899         for (vma = mm->mmap; vma; vma = vma->vm_next) {
900                 if (vma->vm_start < addr)
901                         continue;
902                 if (vma->vm_start > addr)
903                         return NULL;
904                 if (vma->vm_end == end) {
905                         vmacache_update(addr, vma);
906                         return vma;
907                 }
908         }
909
910         return NULL;
911 }
912
913 /*
914  * determine whether a mapping should be permitted and, if so, what sort of
915  * mapping we're capable of supporting
916  */
917 static int validate_mmap_request(struct file *file,
918                                  unsigned long addr,
919                                  unsigned long len,
920                                  unsigned long prot,
921                                  unsigned long flags,
922                                  unsigned long pgoff,
923                                  unsigned long *_capabilities)
924 {
925         unsigned long capabilities, rlen;
926         int ret;
927
928         /* do the simple checks first */
929         if (flags & MAP_FIXED)
930                 return -EINVAL;
931
932         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
933             (flags & MAP_TYPE) != MAP_SHARED)
934                 return -EINVAL;
935
936         if (!len)
937                 return -EINVAL;
938
939         /* Careful about overflows.. */
940         rlen = PAGE_ALIGN(len);
941         if (!rlen || rlen > TASK_SIZE)
942                 return -ENOMEM;
943
944         /* offset overflow? */
945         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
946                 return -EOVERFLOW;
947
948         if (file) {
949                 /* files must support mmap */
950                 if (!file->f_op->mmap)
951                         return -ENODEV;
952
953                 /* work out if what we've got could possibly be shared
954                  * - we support chardevs that provide their own "memory"
955                  * - we support files/blockdevs that are memory backed
956                  */
957                 if (file->f_op->mmap_capabilities) {
958                         capabilities = file->f_op->mmap_capabilities(file);
959                 } else {
960                         /* no explicit capabilities set, so assume some
961                          * defaults */
962                         switch (file_inode(file)->i_mode & S_IFMT) {
963                         case S_IFREG:
964                         case S_IFBLK:
965                                 capabilities = NOMMU_MAP_COPY;
966                                 break;
967
968                         case S_IFCHR:
969                                 capabilities =
970                                         NOMMU_MAP_DIRECT |
971                                         NOMMU_MAP_READ |
972                                         NOMMU_MAP_WRITE;
973                                 break;
974
975                         default:
976                                 return -EINVAL;
977                         }
978                 }
979
980                 /* eliminate any capabilities that we can't support on this
981                  * device */
982                 if (!file->f_op->get_unmapped_area)
983                         capabilities &= ~NOMMU_MAP_DIRECT;
984                 if (!(file->f_mode & FMODE_CAN_READ))
985                         capabilities &= ~NOMMU_MAP_COPY;
986
987                 /* The file shall have been opened with read permission. */
988                 if (!(file->f_mode & FMODE_READ))
989                         return -EACCES;
990
991                 if (flags & MAP_SHARED) {
992                         /* do checks for writing, appending and locking */
993                         if ((prot & PROT_WRITE) &&
994                             !(file->f_mode & FMODE_WRITE))
995                                 return -EACCES;
996
997                         if (IS_APPEND(file_inode(file)) &&
998                             (file->f_mode & FMODE_WRITE))
999                                 return -EACCES;
1000
1001                         if (locks_verify_locked(file))
1002                                 return -EAGAIN;
1003
1004                         if (!(capabilities & NOMMU_MAP_DIRECT))
1005                                 return -ENODEV;
1006
1007                         /* we mustn't privatise shared mappings */
1008                         capabilities &= ~NOMMU_MAP_COPY;
1009                 } else {
1010                         /* we're going to read the file into private memory we
1011                          * allocate */
1012                         if (!(capabilities & NOMMU_MAP_COPY))
1013                                 return -ENODEV;
1014
1015                         /* we don't permit a private writable mapping to be
1016                          * shared with the backing device */
1017                         if (prot & PROT_WRITE)
1018                                 capabilities &= ~NOMMU_MAP_DIRECT;
1019                 }
1020
1021                 if (capabilities & NOMMU_MAP_DIRECT) {
1022                         if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
1023                             ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
1024                             ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
1025                             ) {
1026                                 capabilities &= ~NOMMU_MAP_DIRECT;
1027                                 if (flags & MAP_SHARED) {
1028                                         pr_warn("MAP_SHARED not completely supported on !MMU\n");
1029                                         return -EINVAL;
1030                                 }
1031                         }
1032                 }
1033
1034                 /* handle executable mappings and implied executable
1035                  * mappings */
1036                 if (path_noexec(&file->f_path)) {
1037                         if (prot & PROT_EXEC)
1038                                 return -EPERM;
1039                 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1040                         /* handle implication of PROT_EXEC by PROT_READ */
1041                         if (current->personality & READ_IMPLIES_EXEC) {
1042                                 if (capabilities & NOMMU_MAP_EXEC)
1043                                         prot |= PROT_EXEC;
1044                         }
1045                 } else if ((prot & PROT_READ) &&
1046                          (prot & PROT_EXEC) &&
1047                          !(capabilities & NOMMU_MAP_EXEC)
1048                          ) {
1049                         /* backing file is not executable, try to copy */
1050                         capabilities &= ~NOMMU_MAP_DIRECT;
1051                 }
1052         } else {
1053                 /* anonymous mappings are always memory backed and can be
1054                  * privately mapped
1055                  */
1056                 capabilities = NOMMU_MAP_COPY;
1057
1058                 /* handle PROT_EXEC implication by PROT_READ */
1059                 if ((prot & PROT_READ) &&
1060                     (current->personality & READ_IMPLIES_EXEC))
1061                         prot |= PROT_EXEC;
1062         }
1063
1064         /* allow the security API to have its say */
1065         ret = security_mmap_addr(addr);
1066         if (ret < 0)
1067                 return ret;
1068
1069         /* looks okay */
1070         *_capabilities = capabilities;
1071         return 0;
1072 }
1073
1074 /*
1075  * we've determined that we can make the mapping, now translate what we
1076  * now know into VMA flags
1077  */
1078 static unsigned long determine_vm_flags(struct file *file,
1079                                         unsigned long prot,
1080                                         unsigned long flags,
1081                                         unsigned long capabilities)
1082 {
1083         unsigned long vm_flags;
1084
1085         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1086         /* vm_flags |= mm->def_flags; */
1087
1088         if (!(capabilities & NOMMU_MAP_DIRECT)) {
1089                 /* attempt to share read-only copies of mapped file chunks */
1090                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1091                 if (file && !(prot & PROT_WRITE))
1092                         vm_flags |= VM_MAYSHARE;
1093         } else {
1094                 /* overlay a shareable mapping on the backing device or inode
1095                  * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1096                  * romfs/cramfs */
1097                 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
1098                 if (flags & MAP_SHARED)
1099                         vm_flags |= VM_SHARED;
1100         }
1101
1102         /* refuse to let anyone share private mappings with this process if
1103          * it's being traced - otherwise breakpoints set in it may interfere
1104          * with another untraced process
1105          */
1106         if ((flags & MAP_PRIVATE) && current->ptrace)
1107                 vm_flags &= ~VM_MAYSHARE;
1108
1109         return vm_flags;
1110 }
1111
1112 /*
1113  * set up a shared mapping on a file (the driver or filesystem provides and
1114  * pins the storage)
1115  */
1116 static int do_mmap_shared_file(struct vm_area_struct *vma)
1117 {
1118         int ret;
1119
1120         ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1121         if (ret == 0) {
1122                 vma->vm_region->vm_top = vma->vm_region->vm_end;
1123                 return 0;
1124         }
1125         if (ret != -ENOSYS)
1126                 return ret;
1127
1128         /* getting -ENOSYS indicates that direct mmap isn't possible (as
1129          * opposed to tried but failed) so we can only give a suitable error as
1130          * it's not possible to make a private copy if MAP_SHARED was given */
1131         return -ENODEV;
1132 }
1133
1134 /*
1135  * set up a private mapping or an anonymous shared mapping
1136  */
1137 static int do_mmap_private(struct vm_area_struct *vma,
1138                            struct vm_region *region,
1139                            unsigned long len,
1140                            unsigned long capabilities)
1141 {
1142         unsigned long total, point;
1143         void *base;
1144         int ret, order;
1145
1146         /* invoke the file's mapping function so that it can keep track of
1147          * shared mappings on devices or memory
1148          * - VM_MAYSHARE will be set if it may attempt to share
1149          */
1150         if (capabilities & NOMMU_MAP_DIRECT) {
1151                 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1152                 if (ret == 0) {
1153                         /* shouldn't return success if we're not sharing */
1154                         BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1155                         vma->vm_region->vm_top = vma->vm_region->vm_end;
1156                         return 0;
1157                 }
1158                 if (ret != -ENOSYS)
1159                         return ret;
1160
1161                 /* getting an ENOSYS error indicates that direct mmap isn't
1162                  * possible (as opposed to tried but failed) so we'll try to
1163                  * make a private copy of the data and map that instead */
1164         }
1165
1166
1167         /* allocate some memory to hold the mapping
1168          * - note that this may not return a page-aligned address if the object
1169          *   we're allocating is smaller than a page
1170          */
1171         order = get_order(len);
1172         total = 1 << order;
1173         point = len >> PAGE_SHIFT;
1174
1175         /* we don't want to allocate a power-of-2 sized page set */
1176         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1177                 total = point;
1178
1179         base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1180         if (!base)
1181                 goto enomem;
1182
1183         atomic_long_add(total, &mmap_pages_allocated);
1184
1185         region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1186         region->vm_start = (unsigned long) base;
1187         region->vm_end   = region->vm_start + len;
1188         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1189
1190         vma->vm_start = region->vm_start;
1191         vma->vm_end   = region->vm_start + len;
1192
1193         if (vma->vm_file) {
1194                 /* read the contents of a file into the copy */
1195                 mm_segment_t old_fs;
1196                 loff_t fpos;
1197
1198                 fpos = vma->vm_pgoff;
1199                 fpos <<= PAGE_SHIFT;
1200
1201                 old_fs = get_fs();
1202                 set_fs(KERNEL_DS);
1203                 ret = __vfs_read(vma->vm_file, base, len, &fpos);
1204                 set_fs(old_fs);
1205
1206                 if (ret < 0)
1207                         goto error_free;
1208
1209                 /* clear the last little bit */
1210                 if (ret < len)
1211                         memset(base + ret, 0, len - ret);
1212
1213         }
1214
1215         return 0;
1216
1217 error_free:
1218         free_page_series(region->vm_start, region->vm_top);
1219         region->vm_start = vma->vm_start = 0;
1220         region->vm_end   = vma->vm_end = 0;
1221         region->vm_top   = 0;
1222         return ret;
1223
1224 enomem:
1225         pr_err("Allocation of length %lu from process %d (%s) failed\n",
1226                len, current->pid, current->comm);
1227         show_free_areas(0);
1228         return -ENOMEM;
1229 }
1230
1231 /*
1232  * handle mapping creation for uClinux
1233  */
1234 unsigned long do_mmap(struct file *file,
1235                         unsigned long addr,
1236                         unsigned long len,
1237                         unsigned long prot,
1238                         unsigned long flags,
1239                         vm_flags_t vm_flags,
1240                         unsigned long pgoff,
1241                         unsigned long *populate)
1242 {
1243         struct vm_area_struct *vma;
1244         struct vm_region *region;
1245         struct rb_node *rb;
1246         unsigned long capabilities, result;
1247         int ret;
1248
1249         *populate = 0;
1250
1251         /* decide whether we should attempt the mapping, and if so what sort of
1252          * mapping */
1253         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1254                                     &capabilities);
1255         if (ret < 0)
1256                 return ret;
1257
1258         /* we ignore the address hint */
1259         addr = 0;
1260         len = PAGE_ALIGN(len);
1261
1262         /* we've determined that we can make the mapping, now translate what we
1263          * now know into VMA flags */
1264         vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1265
1266         /* we're going to need to record the mapping */
1267         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1268         if (!region)
1269                 goto error_getting_region;
1270
1271         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1272         if (!vma)
1273                 goto error_getting_vma;
1274
1275         region->vm_usage = 1;
1276         region->vm_flags = vm_flags;
1277         region->vm_pgoff = pgoff;
1278
1279         INIT_LIST_HEAD(&vma->anon_vma_chain);
1280         vma->vm_flags = vm_flags;
1281         vma->vm_pgoff = pgoff;
1282
1283         if (file) {
1284                 region->vm_file = get_file(file);
1285                 vma->vm_file = get_file(file);
1286         }
1287
1288         down_write(&nommu_region_sem);
1289
1290         /* if we want to share, we need to check for regions created by other
1291          * mmap() calls that overlap with our proposed mapping
1292          * - we can only share with a superset match on most regular files
1293          * - shared mappings on character devices and memory backed files are
1294          *   permitted to overlap inexactly as far as we are concerned for in
1295          *   these cases, sharing is handled in the driver or filesystem rather
1296          *   than here
1297          */
1298         if (vm_flags & VM_MAYSHARE) {
1299                 struct vm_region *pregion;
1300                 unsigned long pglen, rpglen, pgend, rpgend, start;
1301
1302                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1303                 pgend = pgoff + pglen;
1304
1305                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1306                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1307
1308                         if (!(pregion->vm_flags & VM_MAYSHARE))
1309                                 continue;
1310
1311                         /* search for overlapping mappings on the same file */
1312                         if (file_inode(pregion->vm_file) !=
1313                             file_inode(file))
1314                                 continue;
1315
1316                         if (pregion->vm_pgoff >= pgend)
1317                                 continue;
1318
1319                         rpglen = pregion->vm_end - pregion->vm_start;
1320                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1321                         rpgend = pregion->vm_pgoff + rpglen;
1322                         if (pgoff >= rpgend)
1323                                 continue;
1324
1325                         /* handle inexactly overlapping matches between
1326                          * mappings */
1327                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1328                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1329                                 /* new mapping is not a subset of the region */
1330                                 if (!(capabilities & NOMMU_MAP_DIRECT))
1331                                         goto sharing_violation;
1332                                 continue;
1333                         }
1334
1335                         /* we've found a region we can share */
1336                         pregion->vm_usage++;
1337                         vma->vm_region = pregion;
1338                         start = pregion->vm_start;
1339                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1340                         vma->vm_start = start;
1341                         vma->vm_end = start + len;
1342
1343                         if (pregion->vm_flags & VM_MAPPED_COPY)
1344                                 vma->vm_flags |= VM_MAPPED_COPY;
1345                         else {
1346                                 ret = do_mmap_shared_file(vma);
1347                                 if (ret < 0) {
1348                                         vma->vm_region = NULL;
1349                                         vma->vm_start = 0;
1350                                         vma->vm_end = 0;
1351                                         pregion->vm_usage--;
1352                                         pregion = NULL;
1353                                         goto error_just_free;
1354                                 }
1355                         }
1356                         fput(region->vm_file);
1357                         kmem_cache_free(vm_region_jar, region);
1358                         region = pregion;
1359                         result = start;
1360                         goto share;
1361                 }
1362
1363                 /* obtain the address at which to make a shared mapping
1364                  * - this is the hook for quasi-memory character devices to
1365                  *   tell us the location of a shared mapping
1366                  */
1367                 if (capabilities & NOMMU_MAP_DIRECT) {
1368                         addr = file->f_op->get_unmapped_area(file, addr, len,
1369                                                              pgoff, flags);
1370                         if (IS_ERR_VALUE(addr)) {
1371                                 ret = addr;
1372                                 if (ret != -ENOSYS)
1373                                         goto error_just_free;
1374
1375                                 /* the driver refused to tell us where to site
1376                                  * the mapping so we'll have to attempt to copy
1377                                  * it */
1378                                 ret = -ENODEV;
1379                                 if (!(capabilities & NOMMU_MAP_COPY))
1380                                         goto error_just_free;
1381
1382                                 capabilities &= ~NOMMU_MAP_DIRECT;
1383                         } else {
1384                                 vma->vm_start = region->vm_start = addr;
1385                                 vma->vm_end = region->vm_end = addr + len;
1386                         }
1387                 }
1388         }
1389
1390         vma->vm_region = region;
1391
1392         /* set up the mapping
1393          * - the region is filled in if NOMMU_MAP_DIRECT is still set
1394          */
1395         if (file && vma->vm_flags & VM_SHARED)
1396                 ret = do_mmap_shared_file(vma);
1397         else
1398                 ret = do_mmap_private(vma, region, len, capabilities);
1399         if (ret < 0)
1400                 goto error_just_free;
1401         add_nommu_region(region);
1402
1403         /* clear anonymous mappings that don't ask for uninitialized data */
1404         if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1405                 memset((void *)region->vm_start, 0,
1406                        region->vm_end - region->vm_start);
1407
1408         /* okay... we have a mapping; now we have to register it */
1409         result = vma->vm_start;
1410
1411         current->mm->total_vm += len >> PAGE_SHIFT;
1412
1413 share:
1414         add_vma_to_mm(current->mm, vma);
1415
1416         /* we flush the region from the icache only when the first executable
1417          * mapping of it is made  */
1418         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1419                 flush_icache_range(region->vm_start, region->vm_end);
1420                 region->vm_icache_flushed = true;
1421         }
1422
1423         up_write(&nommu_region_sem);
1424
1425         return result;
1426
1427 error_just_free:
1428         up_write(&nommu_region_sem);
1429 error:
1430         if (region->vm_file)
1431                 fput(region->vm_file);
1432         kmem_cache_free(vm_region_jar, region);
1433         if (vma->vm_file)
1434                 fput(vma->vm_file);
1435         kmem_cache_free(vm_area_cachep, vma);
1436         return ret;
1437
1438 sharing_violation:
1439         up_write(&nommu_region_sem);
1440         pr_warn("Attempt to share mismatched mappings\n");
1441         ret = -EINVAL;
1442         goto error;
1443
1444 error_getting_vma:
1445         kmem_cache_free(vm_region_jar, region);
1446         pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1447                         len, current->pid);
1448         show_free_areas(0);
1449         return -ENOMEM;
1450
1451 error_getting_region:
1452         pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1453                         len, current->pid);
1454         show_free_areas(0);
1455         return -ENOMEM;
1456 }
1457
1458 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1459                 unsigned long, prot, unsigned long, flags,
1460                 unsigned long, fd, unsigned long, pgoff)
1461 {
1462         struct file *file = NULL;
1463         unsigned long retval = -EBADF;
1464
1465         audit_mmap_fd(fd, flags);
1466         if (!(flags & MAP_ANONYMOUS)) {
1467                 file = fget(fd);
1468                 if (!file)
1469                         goto out;
1470         }
1471
1472         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1473
1474         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1475
1476         if (file)
1477                 fput(file);
1478 out:
1479         return retval;
1480 }
1481
1482 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1483 struct mmap_arg_struct {
1484         unsigned long addr;
1485         unsigned long len;
1486         unsigned long prot;
1487         unsigned long flags;
1488         unsigned long fd;
1489         unsigned long offset;
1490 };
1491
1492 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1493 {
1494         struct mmap_arg_struct a;
1495
1496         if (copy_from_user(&a, arg, sizeof(a)))
1497                 return -EFAULT;
1498         if (offset_in_page(a.offset))
1499                 return -EINVAL;
1500
1501         return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1502                               a.offset >> PAGE_SHIFT);
1503 }
1504 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1505
1506 /*
1507  * split a vma into two pieces at address 'addr', a new vma is allocated either
1508  * for the first part or the tail.
1509  */
1510 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1511               unsigned long addr, int new_below)
1512 {
1513         struct vm_area_struct *new;
1514         struct vm_region *region;
1515         unsigned long npages;
1516
1517         /* we're only permitted to split anonymous regions (these should have
1518          * only a single usage on the region) */
1519         if (vma->vm_file)
1520                 return -ENOMEM;
1521
1522         if (mm->map_count >= sysctl_max_map_count)
1523                 return -ENOMEM;
1524
1525         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1526         if (!region)
1527                 return -ENOMEM;
1528
1529         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1530         if (!new) {
1531                 kmem_cache_free(vm_region_jar, region);
1532                 return -ENOMEM;
1533         }
1534
1535         /* most fields are the same, copy all, and then fixup */
1536         *new = *vma;
1537         *region = *vma->vm_region;
1538         new->vm_region = region;
1539
1540         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1541
1542         if (new_below) {
1543                 region->vm_top = region->vm_end = new->vm_end = addr;
1544         } else {
1545                 region->vm_start = new->vm_start = addr;
1546                 region->vm_pgoff = new->vm_pgoff += npages;
1547         }
1548
1549         if (new->vm_ops && new->vm_ops->open)
1550                 new->vm_ops->open(new);
1551
1552         delete_vma_from_mm(vma);
1553         down_write(&nommu_region_sem);
1554         delete_nommu_region(vma->vm_region);
1555         if (new_below) {
1556                 vma->vm_region->vm_start = vma->vm_start = addr;
1557                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1558         } else {
1559                 vma->vm_region->vm_end = vma->vm_end = addr;
1560                 vma->vm_region->vm_top = addr;
1561         }
1562         add_nommu_region(vma->vm_region);
1563         add_nommu_region(new->vm_region);
1564         up_write(&nommu_region_sem);
1565         add_vma_to_mm(mm, vma);
1566         add_vma_to_mm(mm, new);
1567         return 0;
1568 }
1569
1570 /*
1571  * shrink a VMA by removing the specified chunk from either the beginning or
1572  * the end
1573  */
1574 static int shrink_vma(struct mm_struct *mm,
1575                       struct vm_area_struct *vma,
1576                       unsigned long from, unsigned long to)
1577 {
1578         struct vm_region *region;
1579
1580         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1581          * and list */
1582         delete_vma_from_mm(vma);
1583         if (from > vma->vm_start)
1584                 vma->vm_end = from;
1585         else
1586                 vma->vm_start = to;
1587         add_vma_to_mm(mm, vma);
1588
1589         /* cut the backing region down to size */
1590         region = vma->vm_region;
1591         BUG_ON(region->vm_usage != 1);
1592
1593         down_write(&nommu_region_sem);
1594         delete_nommu_region(region);
1595         if (from > region->vm_start) {
1596                 to = region->vm_top;
1597                 region->vm_top = region->vm_end = from;
1598         } else {
1599                 region->vm_start = to;
1600         }
1601         add_nommu_region(region);
1602         up_write(&nommu_region_sem);
1603
1604         free_page_series(from, to);
1605         return 0;
1606 }
1607
1608 /*
1609  * release a mapping
1610  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1611  *   VMA, though it need not cover the whole VMA
1612  */
1613 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1614 {
1615         struct vm_area_struct *vma;
1616         unsigned long end;
1617         int ret;
1618
1619         len = PAGE_ALIGN(len);
1620         if (len == 0)
1621                 return -EINVAL;
1622
1623         end = start + len;
1624
1625         /* find the first potentially overlapping VMA */
1626         vma = find_vma(mm, start);
1627         if (!vma) {
1628                 static int limit;
1629                 if (limit < 5) {
1630                         pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1631                                         current->pid, current->comm,
1632                                         start, start + len - 1);
1633                         limit++;
1634                 }
1635                 return -EINVAL;
1636         }
1637
1638         /* we're allowed to split an anonymous VMA but not a file-backed one */
1639         if (vma->vm_file) {
1640                 do {
1641                         if (start > vma->vm_start)
1642                                 return -EINVAL;
1643                         if (end == vma->vm_end)
1644                                 goto erase_whole_vma;
1645                         vma = vma->vm_next;
1646                 } while (vma);
1647                 return -EINVAL;
1648         } else {
1649                 /* the chunk must be a subset of the VMA found */
1650                 if (start == vma->vm_start && end == vma->vm_end)
1651                         goto erase_whole_vma;
1652                 if (start < vma->vm_start || end > vma->vm_end)
1653                         return -EINVAL;
1654                 if (offset_in_page(start))
1655                         return -EINVAL;
1656                 if (end != vma->vm_end && offset_in_page(end))
1657                         return -EINVAL;
1658                 if (start != vma->vm_start && end != vma->vm_end) {
1659                         ret = split_vma(mm, vma, start, 1);
1660                         if (ret < 0)
1661                                 return ret;
1662                 }
1663                 return shrink_vma(mm, vma, start, end);
1664         }
1665
1666 erase_whole_vma:
1667         delete_vma_from_mm(vma);
1668         delete_vma(mm, vma);
1669         return 0;
1670 }
1671 EXPORT_SYMBOL(do_munmap);
1672
1673 int vm_munmap(unsigned long addr, size_t len)
1674 {
1675         struct mm_struct *mm = current->mm;
1676         int ret;
1677
1678         down_write(&mm->mmap_sem);
1679         ret = do_munmap(mm, addr, len);
1680         up_write(&mm->mmap_sem);
1681         return ret;
1682 }
1683 EXPORT_SYMBOL(vm_munmap);
1684
1685 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1686 {
1687         return vm_munmap(addr, len);
1688 }
1689
1690 /*
1691  * release all the mappings made in a process's VM space
1692  */
1693 void exit_mmap(struct mm_struct *mm)
1694 {
1695         struct vm_area_struct *vma;
1696
1697         if (!mm)
1698                 return;
1699
1700         mm->total_vm = 0;
1701
1702         while ((vma = mm->mmap)) {
1703                 mm->mmap = vma->vm_next;
1704                 delete_vma_from_mm(vma);
1705                 delete_vma(mm, vma);
1706                 cond_resched();
1707         }
1708 }
1709
1710 unsigned long vm_brk(unsigned long addr, unsigned long len)
1711 {
1712         return -ENOMEM;
1713 }
1714
1715 /*
1716  * expand (or shrink) an existing mapping, potentially moving it at the same
1717  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1718  *
1719  * under NOMMU conditions, we only permit changing a mapping's size, and only
1720  * as long as it stays within the region allocated by do_mmap_private() and the
1721  * block is not shareable
1722  *
1723  * MREMAP_FIXED is not supported under NOMMU conditions
1724  */
1725 static unsigned long do_mremap(unsigned long addr,
1726                         unsigned long old_len, unsigned long new_len,
1727                         unsigned long flags, unsigned long new_addr)
1728 {
1729         struct vm_area_struct *vma;
1730
1731         /* insanity checks first */
1732         old_len = PAGE_ALIGN(old_len);
1733         new_len = PAGE_ALIGN(new_len);
1734         if (old_len == 0 || new_len == 0)
1735                 return (unsigned long) -EINVAL;
1736
1737         if (offset_in_page(addr))
1738                 return -EINVAL;
1739
1740         if (flags & MREMAP_FIXED && new_addr != addr)
1741                 return (unsigned long) -EINVAL;
1742
1743         vma = find_vma_exact(current->mm, addr, old_len);
1744         if (!vma)
1745                 return (unsigned long) -EINVAL;
1746
1747         if (vma->vm_end != vma->vm_start + old_len)
1748                 return (unsigned long) -EFAULT;
1749
1750         if (vma->vm_flags & VM_MAYSHARE)
1751                 return (unsigned long) -EPERM;
1752
1753         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1754                 return (unsigned long) -ENOMEM;
1755
1756         /* all checks complete - do it */
1757         vma->vm_end = vma->vm_start + new_len;
1758         return vma->vm_start;
1759 }
1760
1761 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1762                 unsigned long, new_len, unsigned long, flags,
1763                 unsigned long, new_addr)
1764 {
1765         unsigned long ret;
1766
1767         down_write(&current->mm->mmap_sem);
1768         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1769         up_write(&current->mm->mmap_sem);
1770         return ret;
1771 }
1772
1773 struct page *follow_page_mask(struct vm_area_struct *vma,
1774                               unsigned long address, unsigned int flags,
1775                               unsigned int *page_mask)
1776 {
1777         *page_mask = 0;
1778         return NULL;
1779 }
1780
1781 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1782                 unsigned long pfn, unsigned long size, pgprot_t prot)
1783 {
1784         if (addr != (pfn << PAGE_SHIFT))
1785                 return -EINVAL;
1786
1787         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1788         return 0;
1789 }
1790 EXPORT_SYMBOL(remap_pfn_range);
1791
1792 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1793 {
1794         unsigned long pfn = start >> PAGE_SHIFT;
1795         unsigned long vm_len = vma->vm_end - vma->vm_start;
1796
1797         pfn += vma->vm_pgoff;
1798         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1799 }
1800 EXPORT_SYMBOL(vm_iomap_memory);
1801
1802 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1803                         unsigned long pgoff)
1804 {
1805         unsigned int size = vma->vm_end - vma->vm_start;
1806
1807         if (!(vma->vm_flags & VM_USERMAP))
1808                 return -EINVAL;
1809
1810         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1811         vma->vm_end = vma->vm_start + size;
1812
1813         return 0;
1814 }
1815 EXPORT_SYMBOL(remap_vmalloc_range);
1816
1817 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1818         unsigned long len, unsigned long pgoff, unsigned long flags)
1819 {
1820         return -ENOMEM;
1821 }
1822
1823 void unmap_mapping_range(struct address_space *mapping,
1824                          loff_t const holebegin, loff_t const holelen,
1825                          int even_cows)
1826 {
1827 }
1828 EXPORT_SYMBOL(unmap_mapping_range);
1829
1830 /*
1831  * Check that a process has enough memory to allocate a new virtual
1832  * mapping. 0 means there is enough memory for the allocation to
1833  * succeed and -ENOMEM implies there is not.
1834  *
1835  * We currently support three overcommit policies, which are set via the
1836  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1837  *
1838  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1839  * Additional code 2002 Jul 20 by Robert Love.
1840  *
1841  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1842  *
1843  * Note this is a helper function intended to be used by LSMs which
1844  * wish to use this logic.
1845  */
1846 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1847 {
1848         long free, allowed, reserve;
1849
1850         vm_acct_memory(pages);
1851
1852         /*
1853          * Sometimes we want to use more memory than we have
1854          */
1855         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1856                 return 0;
1857
1858         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1859                 free = global_page_state(NR_FREE_PAGES);
1860                 free += global_page_state(NR_FILE_PAGES);
1861
1862                 /*
1863                  * shmem pages shouldn't be counted as free in this
1864                  * case, they can't be purged, only swapped out, and
1865                  * that won't affect the overall amount of available
1866                  * memory in the system.
1867                  */
1868                 free -= global_page_state(NR_SHMEM);
1869
1870                 free += get_nr_swap_pages();
1871
1872                 /*
1873                  * Any slabs which are created with the
1874                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1875                  * which are reclaimable, under pressure.  The dentry
1876                  * cache and most inode caches should fall into this
1877                  */
1878                 free += global_page_state(NR_SLAB_RECLAIMABLE);
1879
1880                 /*
1881                  * Leave reserved pages. The pages are not for anonymous pages.
1882                  */
1883                 if (free <= totalreserve_pages)
1884                         goto error;
1885                 else
1886                         free -= totalreserve_pages;
1887
1888                 /*
1889                  * Reserve some for root
1890                  */
1891                 if (!cap_sys_admin)
1892                         free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1893
1894                 if (free > pages)
1895                         return 0;
1896
1897                 goto error;
1898         }
1899
1900         allowed = vm_commit_limit();
1901         /*
1902          * Reserve some 3% for root
1903          */
1904         if (!cap_sys_admin)
1905                 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1906
1907         /*
1908          * Don't let a single process grow so big a user can't recover
1909          */
1910         if (mm) {
1911                 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1912                 allowed -= min_t(long, mm->total_vm / 32, reserve);
1913         }
1914
1915         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1916                 return 0;
1917
1918 error:
1919         vm_unacct_memory(pages);
1920
1921         return -ENOMEM;
1922 }
1923
1924 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1925 {
1926         BUG();
1927         return 0;
1928 }
1929 EXPORT_SYMBOL(filemap_fault);
1930
1931 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
1932 {
1933         BUG();
1934 }
1935 EXPORT_SYMBOL(filemap_map_pages);
1936
1937 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1938                 unsigned long addr, void *buf, int len, int write)
1939 {
1940         struct vm_area_struct *vma;
1941
1942         down_read(&mm->mmap_sem);
1943
1944         /* the access must start within one of the target process's mappings */
1945         vma = find_vma(mm, addr);
1946         if (vma) {
1947                 /* don't overrun this mapping */
1948                 if (addr + len >= vma->vm_end)
1949                         len = vma->vm_end - addr;
1950
1951                 /* only read or write mappings where it is permitted */
1952                 if (write && vma->vm_flags & VM_MAYWRITE)
1953                         copy_to_user_page(vma, NULL, addr,
1954                                          (void *) addr, buf, len);
1955                 else if (!write && vma->vm_flags & VM_MAYREAD)
1956                         copy_from_user_page(vma, NULL, addr,
1957                                             buf, (void *) addr, len);
1958                 else
1959                         len = 0;
1960         } else {
1961                 len = 0;
1962         }
1963
1964         up_read(&mm->mmap_sem);
1965
1966         return len;
1967 }
1968
1969 /**
1970  * @access_remote_vm - access another process' address space
1971  * @mm:         the mm_struct of the target address space
1972  * @addr:       start address to access
1973  * @buf:        source or destination buffer
1974  * @len:        number of bytes to transfer
1975  * @write:      whether the access is a write
1976  *
1977  * The caller must hold a reference on @mm.
1978  */
1979 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1980                 void *buf, int len, int write)
1981 {
1982         return __access_remote_vm(NULL, mm, addr, buf, len, write);
1983 }
1984
1985 /*
1986  * Access another process' address space.
1987  * - source/target buffer must be kernel space
1988  */
1989 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1990 {
1991         struct mm_struct *mm;
1992
1993         if (addr + len < addr)
1994                 return 0;
1995
1996         mm = get_task_mm(tsk);
1997         if (!mm)
1998                 return 0;
1999
2000         len = __access_remote_vm(tsk, mm, addr, buf, len, write);
2001
2002         mmput(mm);
2003         return len;
2004 }
2005
2006 /**
2007  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
2008  * @inode: The inode to check
2009  * @size: The current filesize of the inode
2010  * @newsize: The proposed filesize of the inode
2011  *
2012  * Check the shared mappings on an inode on behalf of a shrinking truncate to
2013  * make sure that that any outstanding VMAs aren't broken and then shrink the
2014  * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
2015  * automatically grant mappings that are too large.
2016  */
2017 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2018                                 size_t newsize)
2019 {
2020         struct vm_area_struct *vma;
2021         struct vm_region *region;
2022         pgoff_t low, high;
2023         size_t r_size, r_top;
2024
2025         low = newsize >> PAGE_SHIFT;
2026         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2027
2028         down_write(&nommu_region_sem);
2029         i_mmap_lock_read(inode->i_mapping);
2030
2031         /* search for VMAs that fall within the dead zone */
2032         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2033                 /* found one - only interested if it's shared out of the page
2034                  * cache */
2035                 if (vma->vm_flags & VM_SHARED) {
2036                         i_mmap_unlock_read(inode->i_mapping);
2037                         up_write(&nommu_region_sem);
2038                         return -ETXTBSY; /* not quite true, but near enough */
2039                 }
2040         }
2041
2042         /* reduce any regions that overlap the dead zone - if in existence,
2043          * these will be pointed to by VMAs that don't overlap the dead zone
2044          *
2045          * we don't check for any regions that start beyond the EOF as there
2046          * shouldn't be any
2047          */
2048         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
2049                 if (!(vma->vm_flags & VM_SHARED))
2050                         continue;
2051
2052                 region = vma->vm_region;
2053                 r_size = region->vm_top - region->vm_start;
2054                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2055
2056                 if (r_top > newsize) {
2057                         region->vm_top -= r_top - newsize;
2058                         if (region->vm_end > region->vm_top)
2059                                 region->vm_end = region->vm_top;
2060                 }
2061         }
2062
2063         i_mmap_unlock_read(inode->i_mapping);
2064         up_write(&nommu_region_sem);
2065         return 0;
2066 }
2067
2068 /*
2069  * Initialise sysctl_user_reserve_kbytes.
2070  *
2071  * This is intended to prevent a user from starting a single memory hogging
2072  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
2073  * mode.
2074  *
2075  * The default value is min(3% of free memory, 128MB)
2076  * 128MB is enough to recover with sshd/login, bash, and top/kill.
2077  */
2078 static int __meminit init_user_reserve(void)
2079 {
2080         unsigned long free_kbytes;
2081
2082         free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2083
2084         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
2085         return 0;
2086 }
2087 subsys_initcall(init_user_reserve);
2088
2089 /*
2090  * Initialise sysctl_admin_reserve_kbytes.
2091  *
2092  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
2093  * to log in and kill a memory hogging process.
2094  *
2095  * Systems with more than 256MB will reserve 8MB, enough to recover
2096  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
2097  * only reserve 3% of free pages by default.
2098  */
2099 static int __meminit init_admin_reserve(void)
2100 {
2101         unsigned long free_kbytes;
2102
2103         free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2104
2105         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
2106         return 0;
2107 }
2108 subsys_initcall(init_admin_reserve);
2109
2110 long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
2111                      unsigned long start, unsigned long nr_pages,
2112                      int write, int force, struct page **pages,
2113                      struct vm_area_struct **vmas)
2114 {
2115         return get_user_pages6(start, nr_pages, write, force, pages, vmas);
2116 }
2117 EXPORT_SYMBOL(get_user_pages8);
2118
2119 long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
2120                             unsigned long start, unsigned long nr_pages,
2121                             int write, int force, struct page **pages,
2122                             int *locked)
2123 {
2124         return get_user_pages_locked6(start, nr_pages, write,
2125                                       force, pages, locked);
2126 }
2127 EXPORT_SYMBOL(get_user_pages_locked8);
2128
2129 long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
2130                               unsigned long start, unsigned long nr_pages,
2131                               int write, int force, struct page **pages)
2132 {
2133         return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
2134 }
2135 EXPORT_SYMBOL(get_user_pages_unlocked7);
2136