Merge tag 'tegra-for-3.17-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/tegra...
[cascardo/linux.git] / arch / x86 / vdso / vma.c
1 /*
2  * Set up the VMAs to tell the VM about the vDSO.
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  * Subject to the GPL, v.2
5  */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
18 #include <asm/hpet.h>
19
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso64_enabled = 1;
22
23 extern unsigned short vdso_sync_cpuid;
24 #endif
25
26 void __init init_vdso_image(const struct vdso_image *image)
27 {
28         int i;
29         int npages = (image->size) / PAGE_SIZE;
30
31         BUG_ON(image->size % PAGE_SIZE != 0);
32         for (i = 0; i < npages; i++)
33                 image->text_mapping.pages[i] =
34                         virt_to_page(image->data + i*PAGE_SIZE);
35
36         apply_alternatives((struct alt_instr *)(image->data + image->alt),
37                            (struct alt_instr *)(image->data + image->alt +
38                                                 image->alt_len));
39 }
40
41 #if defined(CONFIG_X86_64)
42 static int __init init_vdso(void)
43 {
44         init_vdso_image(&vdso_image_64);
45
46 #ifdef CONFIG_X86_X32_ABI
47         init_vdso_image(&vdso_image_x32);
48 #endif
49
50         return 0;
51 }
52 subsys_initcall(init_vdso);
53 #endif
54
55 struct linux_binprm;
56
57 /* Put the vdso above the (randomized) stack with another randomized offset.
58    This way there is no hole in the middle of address space.
59    To save memory make sure it is still in the same PTE as the stack top.
60    This doesn't give that many random bits.
61
62    Only used for the 64-bit and x32 vdsos. */
63 static unsigned long vdso_addr(unsigned long start, unsigned len)
64 {
65 #ifdef CONFIG_X86_32
66         return 0;
67 #else
68         unsigned long addr, end;
69         unsigned offset;
70         end = (start + PMD_SIZE - 1) & PMD_MASK;
71         if (end >= TASK_SIZE_MAX)
72                 end = TASK_SIZE_MAX;
73         end -= len;
74         /* This loses some more bits than a modulo, but is cheaper */
75         offset = get_random_int() & (PTRS_PER_PTE - 1);
76         addr = start + (offset << PAGE_SHIFT);
77         if (addr >= end)
78                 addr = end;
79
80         /*
81          * page-align it here so that get_unmapped_area doesn't
82          * align it wrongfully again to the next page. addr can come in 4K
83          * unaligned here as a result of stack start randomization.
84          */
85         addr = PAGE_ALIGN(addr);
86         addr = align_vdso_addr(addr);
87
88         return addr;
89 #endif
90 }
91
92 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
93 {
94         struct mm_struct *mm = current->mm;
95         struct vm_area_struct *vma;
96         unsigned long addr;
97         int ret = 0;
98         static struct page *no_pages[] = {NULL};
99         static struct vm_special_mapping vvar_mapping = {
100                 .name = "[vvar]",
101                 .pages = no_pages,
102         };
103
104         if (calculate_addr) {
105                 addr = vdso_addr(current->mm->start_stack,
106                                  image->sym_end_mapping);
107         } else {
108                 addr = 0;
109         }
110
111         down_write(&mm->mmap_sem);
112
113         addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
114         if (IS_ERR_VALUE(addr)) {
115                 ret = addr;
116                 goto up_fail;
117         }
118
119         current->mm->context.vdso = (void __user *)addr;
120
121         /*
122          * MAYWRITE to allow gdb to COW and set breakpoints
123          */
124         vma = _install_special_mapping(mm,
125                                        addr,
126                                        image->size,
127                                        VM_READ|VM_EXEC|
128                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
129                                        &image->text_mapping);
130
131         if (IS_ERR(vma)) {
132                 ret = PTR_ERR(vma);
133                 goto up_fail;
134         }
135
136         vma = _install_special_mapping(mm,
137                                        addr + image->size,
138                                        image->sym_end_mapping - image->size,
139                                        VM_READ,
140                                        &vvar_mapping);
141
142         if (IS_ERR(vma)) {
143                 ret = PTR_ERR(vma);
144                 goto up_fail;
145         }
146
147         if (image->sym_vvar_page)
148                 ret = remap_pfn_range(vma,
149                                       addr + image->sym_vvar_page,
150                                       __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
151                                       PAGE_SIZE,
152                                       PAGE_READONLY);
153
154         if (ret)
155                 goto up_fail;
156
157 #ifdef CONFIG_HPET_TIMER
158         if (hpet_address && image->sym_hpet_page) {
159                 ret = io_remap_pfn_range(vma,
160                         addr + image->sym_hpet_page,
161                         hpet_address >> PAGE_SHIFT,
162                         PAGE_SIZE,
163                         pgprot_noncached(PAGE_READONLY));
164
165                 if (ret)
166                         goto up_fail;
167         }
168 #endif
169
170 up_fail:
171         if (ret)
172                 current->mm->context.vdso = NULL;
173
174         up_write(&mm->mmap_sem);
175         return ret;
176 }
177
178 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
179 static int load_vdso32(void)
180 {
181         int ret;
182
183         if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
184                 return 0;
185
186         ret = map_vdso(selected_vdso32, false);
187         if (ret)
188                 return ret;
189
190         if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
191                 current_thread_info()->sysenter_return =
192                         current->mm->context.vdso +
193                         selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
194
195         return 0;
196 }
197 #endif
198
199 #ifdef CONFIG_X86_64
200 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
201 {
202         if (!vdso64_enabled)
203                 return 0;
204
205         return map_vdso(&vdso_image_64, true);
206 }
207
208 #ifdef CONFIG_COMPAT
209 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
210                                        int uses_interp)
211 {
212 #ifdef CONFIG_X86_X32_ABI
213         if (test_thread_flag(TIF_X32)) {
214                 if (!vdso64_enabled)
215                         return 0;
216
217                 return map_vdso(&vdso_image_x32, true);
218         }
219 #endif
220
221         return load_vdso32();
222 }
223 #endif
224 #else
225 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
226 {
227         return load_vdso32();
228 }
229 #endif
230
231 #ifdef CONFIG_X86_64
232 static __init int vdso_setup(char *s)
233 {
234         vdso64_enabled = simple_strtoul(s, NULL, 0);
235         return 0;
236 }
237 __setup("vdso=", vdso_setup);
238 #endif