153dec13cf97625273bdc448c5ecc1b2ca77a958
[cascardo/linux.git] / arch / um / kernel / tlb.c
1 /*
2  * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/mm.h"
7 #include "asm/page.h"
8 #include "asm/pgalloc.h"
9 #include "asm/pgtable.h"
10 #include "asm/tlbflush.h"
11 #include "choose-mode.h"
12 #include "mode_kern.h"
13 #include "as-layout.h"
14 #include "tlb.h"
15 #include "mem.h"
16 #include "mem_user.h"
17 #include "os.h"
18
19 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
20                     int r, int w, int x, struct host_vm_op *ops, int *index,
21                     int last_filled, union mm_context *mmu, void **flush,
22                     int (*do_ops)(union mm_context *, struct host_vm_op *,
23                                   int, int, void **))
24 {
25         __u64 offset;
26         struct host_vm_op *last;
27         int fd, ret = 0;
28
29         fd = phys_mapping(phys, &offset);
30         if(*index != -1){
31                 last = &ops[*index];
32                 if((last->type == MMAP) &&
33                    (last->u.mmap.addr + last->u.mmap.len == virt) &&
34                    (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
35                    (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
36                    (last->u.mmap.offset + last->u.mmap.len == offset)){
37                         last->u.mmap.len += len;
38                         return 0;
39                 }
40         }
41
42         if(*index == last_filled){
43                 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
44                 *index = -1;
45         }
46
47         ops[++*index] = ((struct host_vm_op) { .type    = MMAP,
48                                                 .u = { .mmap = {
49                                                        .addr    = virt,
50                                                        .len     = len,
51                                                        .r       = r,
52                                                        .w       = w,
53                                                        .x       = x,
54                                                        .fd      = fd,
55                                                        .offset  = offset }
56                            } });
57         return ret;
58 }
59
60 static int add_munmap(unsigned long addr, unsigned long len,
61                       struct host_vm_op *ops, int *index, int last_filled,
62                       union mm_context *mmu, void **flush,
63                       int (*do_ops)(union mm_context *, struct host_vm_op *,
64                                     int, int, void **))
65 {
66         struct host_vm_op *last;
67         int ret = 0;
68
69         if(*index != -1){
70                 last = &ops[*index];
71                 if((last->type == MUNMAP) &&
72                    (last->u.munmap.addr + last->u.mmap.len == addr)){
73                         last->u.munmap.len += len;
74                         return 0;
75                 }
76         }
77
78         if(*index == last_filled){
79                 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
80                 *index = -1;
81         }
82
83         ops[++*index] = ((struct host_vm_op) { .type    = MUNMAP,
84                                                .u = { .munmap = {
85                                                         .addr   = addr,
86                                                         .len    = len } } });
87         return ret;
88 }
89
90 static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
91                         int x, struct host_vm_op *ops, int *index,
92                         int last_filled, union mm_context *mmu, void **flush,
93                         int (*do_ops)(union mm_context *, struct host_vm_op *,
94                                       int, int, void **))
95 {
96         struct host_vm_op *last;
97         int ret = 0;
98
99         if(*index != -1){
100                 last = &ops[*index];
101                 if((last->type == MPROTECT) &&
102                    (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
103                    (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
104                    (last->u.mprotect.x == x)){
105                         last->u.mprotect.len += len;
106                         return 0;
107                 }
108         }
109
110         if(*index == last_filled){
111                 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
112                 *index = -1;
113         }
114
115         ops[++*index] = ((struct host_vm_op) { .type    = MPROTECT,
116                                                .u = { .mprotect = {
117                                                        .addr    = addr,
118                                                        .len     = len,
119                                                        .r       = r,
120                                                        .w       = w,
121                                                        .x       = x } } });
122         return ret;
123 }
124
125 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
126
127 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
128                                    unsigned long end, struct host_vm_op *ops,
129                                    int last_op, int *op_index, int force,
130                                    union mm_context *mmu, void **flush,
131                                    int (*do_ops)(union mm_context *,
132                                                  struct host_vm_op *, int, int,
133                                                  void **))
134 {
135         pte_t *pte;
136         int r, w, x, ret = 0;
137
138         pte = pte_offset_kernel(pmd, addr);
139         do {
140                 r = pte_read(*pte);
141                 w = pte_write(*pte);
142                 x = pte_exec(*pte);
143                 if (!pte_young(*pte)) {
144                         r = 0;
145                         w = 0;
146                 } else if (!pte_dirty(*pte)) {
147                         w = 0;
148                 }
149                 if(force || pte_newpage(*pte)){
150                         if(pte_present(*pte))
151                                 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
152                                                PAGE_SIZE, r, w, x, ops,
153                                                op_index, last_op, mmu, flush,
154                                                do_ops);
155                         else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
156                                               last_op, mmu, flush, do_ops);
157                 }
158                 else if(pte_newprot(*pte))
159                         ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
160                                            op_index, last_op, mmu, flush,
161                                            do_ops);
162                 *pte = pte_mkuptodate(*pte);
163         } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
164         return ret;
165 }
166
167 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
168                                    unsigned long end, struct host_vm_op *ops,
169                                    int last_op, int *op_index, int force,
170                                    union mm_context *mmu, void **flush,
171                                    int (*do_ops)(union mm_context *,
172                                                  struct host_vm_op *, int, int,
173                                                  void **))
174 {
175         pmd_t *pmd;
176         unsigned long next;
177         int ret = 0;
178
179         pmd = pmd_offset(pud, addr);
180         do {
181                 next = pmd_addr_end(addr, end);
182                 if(!pmd_present(*pmd)){
183                         if(force || pmd_newpage(*pmd)){
184                                 ret = add_munmap(addr, next - addr, ops,
185                                                  op_index, last_op, mmu,
186                                                  flush, do_ops);
187                                 pmd_mkuptodate(*pmd);
188                         }
189                 }
190                 else ret = update_pte_range(pmd, addr, next, ops, last_op,
191                                             op_index, force, mmu, flush,
192                                             do_ops);
193         } while (pmd++, addr = next, ((addr != end) && !ret));
194         return ret;
195 }
196
197 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
198                                    unsigned long end, struct host_vm_op *ops,
199                                    int last_op, int *op_index, int force,
200                                    union mm_context *mmu, void **flush,
201                                    int (*do_ops)(union mm_context *,
202                                                  struct host_vm_op *, int, int,
203                                                  void **))
204 {
205         pud_t *pud;
206         unsigned long next;
207         int ret = 0;
208
209         pud = pud_offset(pgd, addr);
210         do {
211                 next = pud_addr_end(addr, end);
212                 if(!pud_present(*pud)){
213                         if(force || pud_newpage(*pud)){
214                                 ret = add_munmap(addr, next - addr, ops,
215                                                  op_index, last_op, mmu,
216                                                  flush, do_ops);
217                                 pud_mkuptodate(*pud);
218                         }
219                 }
220                 else ret = update_pmd_range(pud, addr, next, ops, last_op,
221                                             op_index, force, mmu, flush,
222                                             do_ops);
223         } while (pud++, addr = next, ((addr != end) && !ret));
224         return ret;
225 }
226
227 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
228                       unsigned long end_addr, int force,
229                       int (*do_ops)(union mm_context *, struct host_vm_op *,
230                                     int, int, void **))
231 {
232         pgd_t *pgd;
233         union mm_context *mmu = &mm->context;
234         struct host_vm_op ops[1];
235         unsigned long addr = start_addr, next;
236         int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
237         void *flush = NULL;
238         unsigned long long start_time, end_time;
239
240         start_time = os_nsecs();
241         ops[0].type = NONE;
242         pgd = pgd_offset(mm, addr);
243         do {
244                 next = pgd_addr_end(addr, end_addr);
245                 if(!pgd_present(*pgd)){
246                         if (force || pgd_newpage(*pgd)){
247                                 ret = add_munmap(addr, next - addr, ops,
248                                                  &op_index, last_op, mmu,
249                                                  &flush, do_ops);
250                                 pgd_mkuptodate(*pgd);
251                         }
252                 }
253                 else ret = update_pud_range(pgd, addr, next, ops, last_op,
254                                             &op_index, force, mmu, &flush,
255                                             do_ops);
256         } while (pgd++, addr = next, ((addr != end_addr) && !ret));
257         end_time = os_nsecs();
258         log_info("total flush time - %Ld nsecs\n", end_time - start_time);
259
260         if(!ret)
261                 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
262
263         /* This is not an else because ret is modified above */
264         if(ret) {
265                 printk("fix_range_common: failed, killing current process\n");
266                 force_sig(SIGKILL, current);
267         }
268 }
269
270 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
271 {
272         struct mm_struct *mm;
273         pgd_t *pgd;
274         pud_t *pud;
275         pmd_t *pmd;
276         pte_t *pte;
277         unsigned long addr, last;
278         int updated = 0, err;
279
280         mm = &init_mm;
281         for(addr = start; addr < end;){
282                 pgd = pgd_offset(mm, addr);
283                 if(!pgd_present(*pgd)){
284                         last = ADD_ROUND(addr, PGDIR_SIZE);
285                         if(last > end)
286                                 last = end;
287                         if(pgd_newpage(*pgd)){
288                                 updated = 1;
289                                 err = os_unmap_memory((void *) addr,
290                                                       last - addr);
291                                 if(err < 0)
292                                         panic("munmap failed, errno = %d\n",
293                                               -err);
294                         }
295                         addr = last;
296                         continue;
297                 }
298
299                 pud = pud_offset(pgd, addr);
300                 if(!pud_present(*pud)){
301                         last = ADD_ROUND(addr, PUD_SIZE);
302                         if(last > end)
303                                 last = end;
304                         if(pud_newpage(*pud)){
305                                 updated = 1;
306                                 err = os_unmap_memory((void *) addr,
307                                                       last - addr);
308                                 if(err < 0)
309                                         panic("munmap failed, errno = %d\n",
310                                               -err);
311                         }
312                         addr = last;
313                         continue;
314                 }
315
316                 pmd = pmd_offset(pud, addr);
317                 if(!pmd_present(*pmd)){
318                         last = ADD_ROUND(addr, PMD_SIZE);
319                         if(last > end)
320                                 last = end;
321                         if(pmd_newpage(*pmd)){
322                                 updated = 1;
323                                 err = os_unmap_memory((void *) addr,
324                                                       last - addr);
325                                 if(err < 0)
326                                         panic("munmap failed, errno = %d\n",
327                                               -err);
328                         }
329                         addr = last;
330                         continue;
331                 }
332
333                 pte = pte_offset_kernel(pmd, addr);
334                 if(!pte_present(*pte) || pte_newpage(*pte)){
335                         updated = 1;
336                         err = os_unmap_memory((void *) addr,
337                                               PAGE_SIZE);
338                         if(err < 0)
339                                 panic("munmap failed, errno = %d\n",
340                                       -err);
341                         if(pte_present(*pte))
342                                 map_memory(addr,
343                                            pte_val(*pte) & PAGE_MASK,
344                                            PAGE_SIZE, 1, 1, 1);
345                 }
346                 else if(pte_newprot(*pte)){
347                         updated = 1;
348                         os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
349                 }
350                 addr += PAGE_SIZE;
351         }
352         return(updated);
353 }
354
355 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
356 {
357         return(pgd_offset(mm, address));
358 }
359
360 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
361 {
362         return(pud_offset(pgd, address));
363 }
364
365 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
366 {
367         return(pmd_offset(pud, address));
368 }
369
370 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
371 {
372         return(pte_offset_kernel(pmd, address));
373 }
374
375 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
376 {
377         pgd_t *pgd = pgd_offset(task->mm, addr);
378         pud_t *pud = pud_offset(pgd, addr);
379         pmd_t *pmd = pmd_offset(pud, addr);
380
381         return(pte_offset_map(pmd, addr));
382 }
383
384 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
385 {
386         address &= PAGE_MASK;
387         flush_tlb_range(vma, address, address + PAGE_SIZE);
388 }
389
390 void flush_tlb_all(void)
391 {
392         flush_tlb_mm(current->mm);
393 }
394
395 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
396 {
397         CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
398                          flush_tlb_kernel_range_common, start, end);
399 }
400
401 void flush_tlb_kernel_vm(void)
402 {
403         CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
404                     flush_tlb_kernel_range_common(start_vm, end_vm));
405 }
406
407 void __flush_tlb_one(unsigned long addr)
408 {
409         CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
410 }
411
412 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
413                      unsigned long end)
414 {
415         CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
416                          end);
417 }
418
419 void flush_tlb_mm(struct mm_struct *mm)
420 {
421         CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
422 }
423
424 void force_flush_all(void)
425 {
426         CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
427 }
428