2 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 * Written by: Lennert Buytenhek and Nicolas Pitre
5 * Copyright (C) 2009 Marvell Semiconductor
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/ctype.h>
14 #include <linux/uaccess.h>
15 #include <linux/rwsem.h>
17 #include <linux/sched.h>
18 #include <linux/hardirq.h> /* for in_atomic() */
19 #include <linux/gfp.h>
20 #include <linux/highmem.h>
21 #include <linux/hugetlb.h>
22 #include <linux/export.h>
23 #include <asm/current.h>
27 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
29 unsigned long addr = (unsigned long)_addr;
36 pgd = pgd_offset(current->mm, addr);
37 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
40 pud = pud_offset(pgd, addr);
41 if (unlikely(pud_none(*pud) || pud_bad(*pud)))
44 pmd = pmd_offset(pud, addr);
45 if (unlikely(pmd_none(*pmd)))
49 * A pmd can be bad if it refers to a HugeTLB or THP page.
51 * Both THP and HugeTLB pages have the same pmd layout
52 * and should not be manipulated by the pte functions.
54 * Lock the page table for the destination and check
55 * to see that it's still huge and whether or not we will
56 * need to fault on write.
58 if (unlikely(pmd_thp_or_huge(*pmd))) {
59 ptl = ¤t->mm->page_table_lock;
61 if (unlikely(!pmd_thp_or_huge(*pmd)
62 || pmd_hugewillfault(*pmd))) {
72 if (unlikely(pmd_bad(*pmd)))
75 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
76 if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
77 !pte_write(*pte) || !pte_dirty(*pte))) {
78 pte_unmap_unlock(pte, ptl);
88 static unsigned long noinline
89 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
91 unsigned long ua_flags;
94 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
95 memcpy((void *)to, from, n);
99 /* the mmap semaphore is taken only if not in an atomic context */
100 atomic = faulthandler_disabled();
103 down_read(¤t->mm->mmap_sem);
109 while (!pin_page_for_write(to, &pte, &ptl)) {
111 up_read(¤t->mm->mmap_sem);
112 if (__put_user(0, (char __user *)to))
115 down_read(¤t->mm->mmap_sem);
118 tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
122 ua_flags = uaccess_save_and_enable();
123 memcpy((void *)to, from, tocopy);
124 uaccess_restore(ua_flags);
130 pte_unmap_unlock(pte, ptl);
135 up_read(¤t->mm->mmap_sem);
142 arm_copy_to_user(void __user *to, const void *from, unsigned long n)
145 * This test is stubbed out of the main function above to keep
146 * the overhead for small copies low by avoiding a large
147 * register dump on the stack just to reload them right away.
148 * With frame pointer disabled, tail call optimization kicks in
149 * as well making this test almost invisible.
152 unsigned long ua_flags = uaccess_save_and_enable();
153 n = __copy_to_user_std(to, from, n);
154 uaccess_restore(ua_flags);
156 n = __copy_to_user_memcpy(to, from, n);
160 EXPORT_SYMBOL(arm_copy_to_user);
162 static unsigned long noinline
163 __clear_user_memset(void __user *addr, unsigned long n)
165 unsigned long ua_flags;
167 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
168 memset((void *)addr, 0, n);
172 down_read(¤t->mm->mmap_sem);
178 while (!pin_page_for_write(addr, &pte, &ptl)) {
179 up_read(¤t->mm->mmap_sem);
180 if (__put_user(0, (char __user *)addr))
182 down_read(¤t->mm->mmap_sem);
185 tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
189 ua_flags = uaccess_save_and_enable();
190 memset((void *)addr, 0, tocopy);
191 uaccess_restore(ua_flags);
196 pte_unmap_unlock(pte, ptl);
200 up_read(¤t->mm->mmap_sem);
206 unsigned long arm_clear_user(void __user *addr, unsigned long n)
208 /* See rational for this in __copy_to_user() above. */
210 unsigned long ua_flags = uaccess_save_and_enable();
211 n = __clear_user_std(addr, n);
212 uaccess_restore(ua_flags);
214 n = __clear_user_memset(addr, n);
218 EXPORT_SYMBOL(arm_clear_user);
223 * This code is disabled by default, but kept around in case the chosen
224 * thresholds need to be revalidated. Some overhead (small but still)
225 * would be implied by a runtime determined variable threshold, and
226 * so far the measurement on concerned targets didn't show a worthwhile
229 * Note that a fairly precise sched_clock() implementation is needed
230 * for results to make some sense.
233 #include <linux/vmalloc.h>
235 static int __init test_size_treshold(void)
237 struct page *src_page, *dst_page;
238 void *user_ptr, *kernel_ptr;
239 unsigned long long t0, t1, t2;
243 src_page = alloc_page(GFP_KERNEL);
246 dst_page = alloc_page(GFP_KERNEL);
249 kernel_ptr = page_address(src_page);
250 user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
254 /* warm up the src page dcache */
255 ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
257 for (size = PAGE_SIZE; size >= 4; size /= 2) {
259 ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
261 ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
263 printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
266 for (size = PAGE_SIZE; size >= 4; size /= 2) {
268 ret |= __clear_user_memset(user_ptr, size);
270 ret |= __clear_user_std(user_ptr, size);
272 printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
287 subsys_initcall(test_size_treshold);