7b35ee3894ee4a8970a502826e860a644222e4a9
[cascardo/linux.git] / mm / mprotect.c
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code       <alan@lxorguk.ukuu.org.uk>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/pkeys.h>
27 #include <linux/ksm.h>
28 #include <linux/pkeys.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/cacheflush.h>
32 #include <asm/mmu_context.h>
33 #include <asm/tlbflush.h>
34
35 #include "internal.h"
36
37 /*
38  * For a prot_numa update we only hold mmap_sem for read so there is a
39  * potential race with faulting where a pmd was temporarily none. This
40  * function checks for a transhuge pmd under the appropriate lock. It
41  * returns a pte if it was successfully locked or NULL if it raced with
42  * a transhuge insertion.
43  */
44 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
45                         unsigned long addr, int prot_numa, spinlock_t **ptl)
46 {
47         pte_t *pte;
48         spinlock_t *pmdl;
49
50         /* !prot_numa is protected by mmap_sem held for write */
51         if (!prot_numa)
52                 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
53
54         pmdl = pmd_lock(vma->vm_mm, pmd);
55         if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
56                 spin_unlock(pmdl);
57                 return NULL;
58         }
59
60         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
61         spin_unlock(pmdl);
62         return pte;
63 }
64
65 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
66                 unsigned long addr, unsigned long end, pgprot_t newprot,
67                 int dirty_accountable, int prot_numa)
68 {
69         struct mm_struct *mm = vma->vm_mm;
70         pte_t *pte, oldpte;
71         spinlock_t *ptl;
72         unsigned long pages = 0;
73
74         pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
75         if (!pte)
76                 return 0;
77
78         arch_enter_lazy_mmu_mode();
79         do {
80                 oldpte = *pte;
81                 if (pte_present(oldpte)) {
82                         pte_t ptent;
83                         bool preserve_write = prot_numa && pte_write(oldpte);
84
85                         /*
86                          * Avoid trapping faults against the zero or KSM
87                          * pages. See similar comment in change_huge_pmd.
88                          */
89                         if (prot_numa) {
90                                 struct page *page;
91
92                                 page = vm_normal_page(vma, addr, oldpte);
93                                 if (!page || PageKsm(page))
94                                         continue;
95
96                                 /* Avoid TLB flush if possible */
97                                 if (pte_protnone(oldpte))
98                                         continue;
99                         }
100
101                         ptent = ptep_modify_prot_start(mm, addr, pte);
102                         ptent = pte_modify(ptent, newprot);
103                         if (preserve_write)
104                                 ptent = pte_mkwrite(ptent);
105
106                         /* Avoid taking write faults for known dirty pages */
107                         if (dirty_accountable && pte_dirty(ptent) &&
108                                         (pte_soft_dirty(ptent) ||
109                                          !(vma->vm_flags & VM_SOFTDIRTY))) {
110                                 ptent = pte_mkwrite(ptent);
111                         }
112                         ptep_modify_prot_commit(mm, addr, pte, ptent);
113                         pages++;
114                 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
115                         swp_entry_t entry = pte_to_swp_entry(oldpte);
116
117                         if (is_write_migration_entry(entry)) {
118                                 pte_t newpte;
119                                 /*
120                                  * A protection check is difficult so
121                                  * just be safe and disable write
122                                  */
123                                 make_migration_entry_read(&entry);
124                                 newpte = swp_entry_to_pte(entry);
125                                 if (pte_swp_soft_dirty(oldpte))
126                                         newpte = pte_swp_mksoft_dirty(newpte);
127                                 set_pte_at(mm, addr, pte, newpte);
128
129                                 pages++;
130                         }
131                 }
132         } while (pte++, addr += PAGE_SIZE, addr != end);
133         arch_leave_lazy_mmu_mode();
134         pte_unmap_unlock(pte - 1, ptl);
135
136         return pages;
137 }
138
139 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
140                 pud_t *pud, unsigned long addr, unsigned long end,
141                 pgprot_t newprot, int dirty_accountable, int prot_numa)
142 {
143         pmd_t *pmd;
144         struct mm_struct *mm = vma->vm_mm;
145         unsigned long next;
146         unsigned long pages = 0;
147         unsigned long nr_huge_updates = 0;
148         unsigned long mni_start = 0;
149
150         pmd = pmd_offset(pud, addr);
151         do {
152                 unsigned long this_pages;
153
154                 next = pmd_addr_end(addr, end);
155                 if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
156                                 && pmd_none_or_clear_bad(pmd))
157                         continue;
158
159                 /* invoke the mmu notifier if the pmd is populated */
160                 if (!mni_start) {
161                         mni_start = addr;
162                         mmu_notifier_invalidate_range_start(mm, mni_start, end);
163                 }
164
165                 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
166                         if (next - addr != HPAGE_PMD_SIZE) {
167                                 split_huge_pmd(vma, pmd, addr);
168                                 if (pmd_trans_unstable(pmd))
169                                         continue;
170                         } else {
171                                 int nr_ptes = change_huge_pmd(vma, pmd, addr,
172                                                 newprot, prot_numa);
173
174                                 if (nr_ptes) {
175                                         if (nr_ptes == HPAGE_PMD_NR) {
176                                                 pages += HPAGE_PMD_NR;
177                                                 nr_huge_updates++;
178                                         }
179
180                                         /* huge pmd was handled */
181                                         continue;
182                                 }
183                         }
184                         /* fall through, the trans huge pmd just split */
185                 }
186                 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
187                                  dirty_accountable, prot_numa);
188                 pages += this_pages;
189         } while (pmd++, addr = next, addr != end);
190
191         if (mni_start)
192                 mmu_notifier_invalidate_range_end(mm, mni_start, end);
193
194         if (nr_huge_updates)
195                 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
196         return pages;
197 }
198
199 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
200                 pgd_t *pgd, unsigned long addr, unsigned long end,
201                 pgprot_t newprot, int dirty_accountable, int prot_numa)
202 {
203         pud_t *pud;
204         unsigned long next;
205         unsigned long pages = 0;
206
207         pud = pud_offset(pgd, addr);
208         do {
209                 next = pud_addr_end(addr, end);
210                 if (pud_none_or_clear_bad(pud))
211                         continue;
212                 pages += change_pmd_range(vma, pud, addr, next, newprot,
213                                  dirty_accountable, prot_numa);
214         } while (pud++, addr = next, addr != end);
215
216         return pages;
217 }
218
219 static unsigned long change_protection_range(struct vm_area_struct *vma,
220                 unsigned long addr, unsigned long end, pgprot_t newprot,
221                 int dirty_accountable, int prot_numa)
222 {
223         struct mm_struct *mm = vma->vm_mm;
224         pgd_t *pgd;
225         unsigned long next;
226         unsigned long start = addr;
227         unsigned long pages = 0;
228
229         BUG_ON(addr >= end);
230         pgd = pgd_offset(mm, addr);
231         flush_cache_range(vma, addr, end);
232         set_tlb_flush_pending(mm);
233         do {
234                 next = pgd_addr_end(addr, end);
235                 if (pgd_none_or_clear_bad(pgd))
236                         continue;
237                 pages += change_pud_range(vma, pgd, addr, next, newprot,
238                                  dirty_accountable, prot_numa);
239         } while (pgd++, addr = next, addr != end);
240
241         /* Only flush the TLB if we actually modified any entries: */
242         if (pages)
243                 flush_tlb_range(vma, start, end);
244         clear_tlb_flush_pending(mm);
245
246         return pages;
247 }
248
249 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
250                        unsigned long end, pgprot_t newprot,
251                        int dirty_accountable, int prot_numa)
252 {
253         unsigned long pages;
254
255         if (is_vm_hugetlb_page(vma))
256                 pages = hugetlb_change_protection(vma, start, end, newprot);
257         else
258                 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
259
260         return pages;
261 }
262
263 int
264 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
265         unsigned long start, unsigned long end, unsigned long newflags)
266 {
267         struct mm_struct *mm = vma->vm_mm;
268         unsigned long oldflags = vma->vm_flags;
269         long nrpages = (end - start) >> PAGE_SHIFT;
270         unsigned long charged = 0;
271         pgoff_t pgoff;
272         int error;
273         int dirty_accountable = 0;
274
275         if (newflags == oldflags) {
276                 *pprev = vma;
277                 return 0;
278         }
279
280         /*
281          * If we make a private mapping writable we increase our commit;
282          * but (without finer accounting) cannot reduce our commit if we
283          * make it unwritable again. hugetlb mapping were accounted for
284          * even if read-only so there is no need to account for them here
285          */
286         if (newflags & VM_WRITE) {
287                 /* Check space limits when area turns into data. */
288                 if (!may_expand_vm(mm, newflags, nrpages) &&
289                                 may_expand_vm(mm, oldflags, nrpages))
290                         return -ENOMEM;
291                 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
292                                                 VM_SHARED|VM_NORESERVE))) {
293                         charged = nrpages;
294                         if (security_vm_enough_memory_mm(mm, charged))
295                                 return -ENOMEM;
296                         newflags |= VM_ACCOUNT;
297                 }
298         }
299
300         /*
301          * First try to merge with previous and/or next vma.
302          */
303         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
304         *pprev = vma_merge(mm, *pprev, start, end, newflags,
305                            vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
306                            vma->vm_userfaultfd_ctx);
307         if (*pprev) {
308                 vma = *pprev;
309                 goto success;
310         }
311
312         *pprev = vma;
313
314         if (start != vma->vm_start) {
315                 error = split_vma(mm, vma, start, 1);
316                 if (error)
317                         goto fail;
318         }
319
320         if (end != vma->vm_end) {
321                 error = split_vma(mm, vma, end, 0);
322                 if (error)
323                         goto fail;
324         }
325
326 success:
327         /*
328          * vm_flags and vm_page_prot are protected by the mmap_sem
329          * held in write mode.
330          */
331         vma->vm_flags = newflags;
332         dirty_accountable = vma_wants_writenotify(vma);
333         vma_set_page_prot(vma);
334
335         change_protection(vma, start, end, vma->vm_page_prot,
336                           dirty_accountable, 0);
337
338         /*
339          * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
340          * fault on access.
341          */
342         if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
343                         (newflags & VM_WRITE)) {
344                 populate_vma_page_range(vma, start, end, NULL);
345         }
346
347         vm_stat_account(mm, oldflags, -nrpages);
348         vm_stat_account(mm, newflags, nrpages);
349         perf_event_mmap(vma);
350         return 0;
351
352 fail:
353         vm_unacct_memory(charged);
354         return error;
355 }
356
357 /*
358  * pkey==-1 when doing a legacy mprotect()
359  */
360 static int do_mprotect_pkey(unsigned long start, size_t len,
361                 unsigned long prot, int pkey)
362 {
363         unsigned long nstart, end, tmp, reqprot;
364         struct vm_area_struct *vma, *prev;
365         int error = -EINVAL;
366         const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
367         const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
368                                 (prot & PROT_READ);
369
370         prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
371         if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
372                 return -EINVAL;
373
374         if (start & ~PAGE_MASK)
375                 return -EINVAL;
376         if (!len)
377                 return 0;
378         len = PAGE_ALIGN(len);
379         end = start + len;
380         if (end <= start)
381                 return -ENOMEM;
382         if (!arch_validate_prot(prot))
383                 return -EINVAL;
384
385         reqprot = prot;
386
387         if (down_write_killable(&current->mm->mmap_sem))
388                 return -EINTR;
389
390         /*
391          * If userspace did not allocate the pkey, do not let
392          * them use it here.
393          */
394         error = -EINVAL;
395         if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
396                 goto out;
397
398         vma = find_vma(current->mm, start);
399         error = -ENOMEM;
400         if (!vma)
401                 goto out;
402         prev = vma->vm_prev;
403         if (unlikely(grows & PROT_GROWSDOWN)) {
404                 if (vma->vm_start >= end)
405                         goto out;
406                 start = vma->vm_start;
407                 error = -EINVAL;
408                 if (!(vma->vm_flags & VM_GROWSDOWN))
409                         goto out;
410         } else {
411                 if (vma->vm_start > start)
412                         goto out;
413                 if (unlikely(grows & PROT_GROWSUP)) {
414                         end = vma->vm_end;
415                         error = -EINVAL;
416                         if (!(vma->vm_flags & VM_GROWSUP))
417                                 goto out;
418                 }
419         }
420         if (start > vma->vm_start)
421                 prev = vma;
422
423         for (nstart = start ; ; ) {
424                 unsigned long mask_off_old_flags;
425                 unsigned long newflags;
426                 int new_vma_pkey;
427
428                 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
429
430                 /* Does the application expect PROT_READ to imply PROT_EXEC */
431                 if (rier && (vma->vm_flags & VM_MAYEXEC))
432                         prot |= PROT_EXEC;
433
434                 /*
435                  * Each mprotect() call explicitly passes r/w/x permissions.
436                  * If a permission is not passed to mprotect(), it must be
437                  * cleared from the VMA.
438                  */
439                 mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
440                                         ARCH_VM_PKEY_FLAGS;
441
442                 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
443                 newflags = calc_vm_prot_bits(prot, new_vma_pkey);
444                 newflags |= (vma->vm_flags & ~mask_off_old_flags);
445
446                 /* newflags >> 4 shift VM_MAY% in place of VM_% */
447                 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
448                         error = -EACCES;
449                         goto out;
450                 }
451
452                 error = security_file_mprotect(vma, reqprot, prot);
453                 if (error)
454                         goto out;
455
456                 tmp = vma->vm_end;
457                 if (tmp > end)
458                         tmp = end;
459                 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
460                 if (error)
461                         goto out;
462                 nstart = tmp;
463
464                 if (nstart < prev->vm_end)
465                         nstart = prev->vm_end;
466                 if (nstart >= end)
467                         goto out;
468
469                 vma = prev->vm_next;
470                 if (!vma || vma->vm_start != nstart) {
471                         error = -ENOMEM;
472                         goto out;
473                 }
474                 prot = reqprot;
475         }
476 out:
477         up_write(&current->mm->mmap_sem);
478         return error;
479 }
480
481 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
482                 unsigned long, prot)
483 {
484         return do_mprotect_pkey(start, len, prot, -1);
485 }
486
487 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
488                 unsigned long, prot, int, pkey)
489 {
490         return do_mprotect_pkey(start, len, prot, pkey);
491 }
492
493 SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
494 {
495         int pkey;
496         int ret;
497
498         /* No flags supported yet. */
499         if (flags)
500                 return -EINVAL;
501         /* check for unsupported init values */
502         if (init_val & ~PKEY_ACCESS_MASK)
503                 return -EINVAL;
504
505         down_write(&current->mm->mmap_sem);
506         pkey = mm_pkey_alloc(current->mm);
507
508         ret = -ENOSPC;
509         if (pkey == -1)
510                 goto out;
511
512         ret = arch_set_user_pkey_access(current, pkey, init_val);
513         if (ret) {
514                 mm_pkey_free(current->mm, pkey);
515                 goto out;
516         }
517         ret = pkey;
518 out:
519         up_write(&current->mm->mmap_sem);
520         return ret;
521 }
522
523 SYSCALL_DEFINE1(pkey_free, int, pkey)
524 {
525         int ret;
526
527         down_write(&current->mm->mmap_sem);
528         ret = mm_pkey_free(current->mm, pkey);
529         up_write(&current->mm->mmap_sem);
530
531         /*
532          * We could provie warnings or errors if any VMA still
533          * has the pkey set here.
534          */
535         return ret;
536 }