x86, vdso: Use <tools/le_byteshift.h> for littleendian access
[cascardo/linux.git] / mm / fremap.c
1 /*
2  *   linux/mm/fremap.c
3  * 
4  * Explicit pagetable population and nonlinear (random) mappings support.
5  *
6  * started by Ingo Molnar, Copyright (C) 2002, 2003
7  */
8 #include <linux/export.h>
9 #include <linux/backing-dev.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/file.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swapops.h>
16 #include <linux/rmap.h>
17 #include <linux/syscalls.h>
18 #include <linux/mmu_notifier.h>
19
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23
24 #include "internal.h"
25
26 static int mm_counter(struct page *page)
27 {
28         return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
29 }
30
31 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
32                         unsigned long addr, pte_t *ptep)
33 {
34         pte_t pte = *ptep;
35         struct page *page;
36         swp_entry_t entry;
37
38         if (pte_present(pte)) {
39                 flush_cache_page(vma, addr, pte_pfn(pte));
40                 pte = ptep_clear_flush(vma, addr, ptep);
41                 page = vm_normal_page(vma, addr, pte);
42                 if (page) {
43                         if (pte_dirty(pte))
44                                 set_page_dirty(page);
45                         update_hiwater_rss(mm);
46                         dec_mm_counter(mm, mm_counter(page));
47                         page_remove_rmap(page);
48                         page_cache_release(page);
49                 }
50         } else {        /* zap_pte() is not called when pte_none() */
51                 if (!pte_file(pte)) {
52                         update_hiwater_rss(mm);
53                         entry = pte_to_swp_entry(pte);
54                         if (non_swap_entry(entry)) {
55                                 if (is_migration_entry(entry)) {
56                                         page = migration_entry_to_page(entry);
57                                         dec_mm_counter(mm, mm_counter(page));
58                                 }
59                         } else {
60                                 free_swap_and_cache(entry);
61                                 dec_mm_counter(mm, MM_SWAPENTS);
62                         }
63                 }
64                 pte_clear_not_present_full(mm, addr, ptep, 0);
65         }
66 }
67
68 /*
69  * Install a file pte to a given virtual memory address, release any
70  * previously existing mapping.
71  */
72 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
73                 unsigned long addr, unsigned long pgoff, pgprot_t prot)
74 {
75         int err = -ENOMEM;
76         pte_t *pte, ptfile;
77         spinlock_t *ptl;
78
79         pte = get_locked_pte(mm, addr, &ptl);
80         if (!pte)
81                 goto out;
82
83         ptfile = pgoff_to_pte(pgoff);
84
85         if (!pte_none(*pte))
86                 zap_pte(mm, vma, addr, pte);
87
88         set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
89         /*
90          * We don't need to run update_mmu_cache() here because the "file pte"
91          * being installed by install_file_pte() is not a real pte - it's a
92          * non-present entry (like a swap entry), noting what file offset should
93          * be mapped there when there's a fault (in a non-linear vma where
94          * that's not obvious).
95          */
96         pte_unmap_unlock(pte, ptl);
97         err = 0;
98 out:
99         return err;
100 }
101
102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
103                              unsigned long size, pgoff_t pgoff)
104 {
105         struct mm_struct *mm = vma->vm_mm;
106         int err;
107
108         do {
109                 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
110                 if (err)
111                         return err;
112
113                 size -= PAGE_SIZE;
114                 addr += PAGE_SIZE;
115                 pgoff++;
116         } while (size);
117
118         return 0;
119 }
120 EXPORT_SYMBOL(generic_file_remap_pages);
121
122 /**
123  * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
124  * @start: start of the remapped virtual memory range
125  * @size: size of the remapped virtual memory range
126  * @prot: new protection bits of the range (see NOTE)
127  * @pgoff: to-be-mapped page of the backing store file
128  * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
129  *
130  * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
131  * (shared backing store file).
132  *
133  * This syscall works purely via pagetables, so it's the most efficient
134  * way to map the same (large) file into a given virtual window. Unlike
135  * mmap()/mremap() it does not create any new vmas. The new mappings are
136  * also safe across swapout.
137  *
138  * NOTE: the @prot parameter right now is ignored (but must be zero),
139  * and the vma's default protection is used. Arbitrary protections
140  * might be implemented in the future.
141  */
142 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
143                 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
144 {
145         struct mm_struct *mm = current->mm;
146         struct address_space *mapping;
147         struct vm_area_struct *vma;
148         int err = -EINVAL;
149         int has_write_lock = 0;
150         vm_flags_t vm_flags = 0;
151
152         if (prot)
153                 return err;
154         /*
155          * Sanitize the syscall parameters:
156          */
157         start = start & PAGE_MASK;
158         size = size & PAGE_MASK;
159
160         /* Does the address range wrap, or is the span zero-sized? */
161         if (start + size <= start)
162                 return err;
163
164         /* Does pgoff wrap? */
165         if (pgoff + (size >> PAGE_SHIFT) < pgoff)
166                 return err;
167
168         /* Can we represent this offset inside this architecture's pte's? */
169 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
170         if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
171                 return err;
172 #endif
173
174         /* We need down_write() to change vma->vm_flags. */
175         down_read(&mm->mmap_sem);
176  retry:
177         vma = find_vma(mm, start);
178
179         /*
180          * Make sure the vma is shared, that it supports prefaulting,
181          * and that the remapped range is valid and fully within
182          * the single existing vma.
183          */
184         if (!vma || !(vma->vm_flags & VM_SHARED))
185                 goto out;
186
187         if (!vma->vm_ops || !vma->vm_ops->remap_pages)
188                 goto out;
189
190         if (start < vma->vm_start || start + size > vma->vm_end)
191                 goto out;
192
193         /* Must set VM_NONLINEAR before any pages are populated. */
194         if (!(vma->vm_flags & VM_NONLINEAR)) {
195                 /*
196                  * vm_private_data is used as a swapout cursor
197                  * in a VM_NONLINEAR vma.
198                  */
199                 if (vma->vm_private_data)
200                         goto out;
201
202                 /* Don't need a nonlinear mapping, exit success */
203                 if (pgoff == linear_page_index(vma, start)) {
204                         err = 0;
205                         goto out;
206                 }
207
208                 if (!has_write_lock) {
209 get_write_lock:
210                         up_read(&mm->mmap_sem);
211                         down_write(&mm->mmap_sem);
212                         has_write_lock = 1;
213                         goto retry;
214                 }
215                 mapping = vma->vm_file->f_mapping;
216                 /*
217                  * page_mkclean doesn't work on nonlinear vmas, so if
218                  * dirty pages need to be accounted, emulate with linear
219                  * vmas.
220                  */
221                 if (mapping_cap_account_dirty(mapping)) {
222                         unsigned long addr;
223                         struct file *file = get_file(vma->vm_file);
224                         /* mmap_region may free vma; grab the info now */
225                         vm_flags = vma->vm_flags;
226
227                         addr = mmap_region(file, start, size, vm_flags, pgoff);
228                         fput(file);
229                         if (IS_ERR_VALUE(addr)) {
230                                 err = addr;
231                         } else {
232                                 BUG_ON(addr != start);
233                                 err = 0;
234                         }
235                         goto out_freed;
236                 }
237                 mutex_lock(&mapping->i_mmap_mutex);
238                 flush_dcache_mmap_lock(mapping);
239                 vma->vm_flags |= VM_NONLINEAR;
240                 vma_interval_tree_remove(vma, &mapping->i_mmap);
241                 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
242                 flush_dcache_mmap_unlock(mapping);
243                 mutex_unlock(&mapping->i_mmap_mutex);
244         }
245
246         if (vma->vm_flags & VM_LOCKED) {
247                 /*
248                  * drop PG_Mlocked flag for over-mapped range
249                  */
250                 if (!has_write_lock)
251                         goto get_write_lock;
252                 vm_flags = vma->vm_flags;
253                 munlock_vma_pages_range(vma, start, start + size);
254                 vma->vm_flags = vm_flags;
255         }
256
257         mmu_notifier_invalidate_range_start(mm, start, start + size);
258         err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
259         mmu_notifier_invalidate_range_end(mm, start, start + size);
260
261         /*
262          * We can't clear VM_NONLINEAR because we'd have to do
263          * it after ->populate completes, and that would prevent
264          * downgrading the lock.  (Locks can't be upgraded).
265          */
266
267 out:
268         if (vma)
269                 vm_flags = vma->vm_flags;
270 out_freed:
271         if (likely(!has_write_lock))
272                 up_read(&mm->mmap_sem);
273         else
274                 up_write(&mm->mmap_sem);
275         if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
276                 mm_populate(start, size);
277
278         return err;
279 }