Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[cascardo/linux.git] / arch / tile / kernel / stack.c
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/pfn.h>
20 #include <linux/kallsyms.h>
21 #include <linux/stacktrace.h>
22 #include <linux/uaccess.h>
23 #include <linux/mmzone.h>
24 #include <asm/backtrace.h>
25 #include <asm/page.h>
26 #include <asm/tlbflush.h>
27 #include <asm/ucontext.h>
28 #include <asm/sigframe.h>
29 #include <asm/stack.h>
30 #include <arch/abi.h>
31 #include <arch/interrupts.h>
32
33
34 /* Is address on the specified kernel stack? */
35 static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
36 {
37         ulong kstack_base = (ulong) kbt->task->stack;
38         if (kstack_base == 0)  /* corrupt task pointer; just follow stack... */
39                 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
40         return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
41 }
42
43 /* Is address in the specified kernel code? */
44 static int in_kernel_text(VirtualAddress address)
45 {
46         return (address >= MEM_SV_INTRPT &&
47                 address < MEM_SV_INTRPT + HPAGE_SIZE);
48 }
49
50 /* Is address valid for reading? */
51 static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
52 {
53         HV_PTE *l1_pgtable = kbt->pgtable;
54         HV_PTE *l2_pgtable;
55         unsigned long pfn;
56         HV_PTE pte;
57         struct page *page;
58
59         if (l1_pgtable == NULL)
60                 return 0;       /* can't read user space in other tasks */
61
62         pte = l1_pgtable[HV_L1_INDEX(address)];
63         if (!hv_pte_get_present(pte))
64                 return 0;
65         pfn = hv_pte_get_pfn(pte);
66         if (pte_huge(pte)) {
67                 if (!pfn_valid(pfn)) {
68                         pr_err("huge page has bad pfn %#lx\n", pfn);
69                         return 0;
70                 }
71                 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
72         }
73
74         page = pfn_to_page(pfn);
75         if (PageHighMem(page)) {
76                 pr_err("L2 page table not in LOWMEM (%#llx)\n",
77                        HV_PFN_TO_CPA(pfn));
78                 return 0;
79         }
80         l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
81         pte = l2_pgtable[HV_L2_INDEX(address)];
82         return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
83 }
84
85 /* Callback for backtracer; basically a glorified memcpy */
86 static bool read_memory_func(void *result, VirtualAddress address,
87                              unsigned int size, void *vkbt)
88 {
89         int retval;
90         struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
91         if (in_kernel_text(address)) {
92                 /* OK to read kernel code. */
93         } else if (address >= PAGE_OFFSET) {
94                 /* We only tolerate kernel-space reads of this task's stack */
95                 if (!in_kernel_stack(kbt, address))
96                         return 0;
97         } else if (!valid_address(kbt, address)) {
98                 return 0;       /* invalid user-space address */
99         }
100         pagefault_disable();
101         retval = __copy_from_user_inatomic(result,
102                                            (void __user __force *)address,
103                                            size);
104         pagefault_enable();
105         return (retval == 0);
106 }
107
108 /* Return a pt_regs pointer for a valid fault handler frame */
109 static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
110 {
111 #ifndef __tilegx__
112         const char *fault = NULL;  /* happy compiler */
113         char fault_buf[64];
114         VirtualAddress sp = kbt->it.sp;
115         struct pt_regs *p;
116
117         if (!in_kernel_stack(kbt, sp))
118                 return NULL;
119         if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
120                 return NULL;
121         p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
122         if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
123                 fault = "syscall";
124         else {
125                 if (kbt->verbose) {     /* else we aren't going to use it */
126                         snprintf(fault_buf, sizeof(fault_buf),
127                                  "interrupt %ld", p->faultnum);
128                         fault = fault_buf;
129                 }
130         }
131         if (EX1_PL(p->ex1) == KERNEL_PL &&
132             in_kernel_text(p->pc) &&
133             in_kernel_stack(kbt, p->sp) &&
134             p->sp >= sp) {
135                 if (kbt->verbose)
136                         pr_err("  <%s while in kernel mode>\n", fault);
137         } else if (EX1_PL(p->ex1) == USER_PL &&
138             p->pc < PAGE_OFFSET &&
139             p->sp < PAGE_OFFSET) {
140                 if (kbt->verbose)
141                         pr_err("  <%s while in user mode>\n", fault);
142         } else if (kbt->verbose) {
143                 pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
144                        p->pc, p->sp, p->ex1);
145                 p = NULL;
146         }
147         if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
148                 return p;
149 #endif
150         return NULL;
151 }
152
153 /* Is the pc pointing to a sigreturn trampoline? */
154 static int is_sigreturn(VirtualAddress pc)
155 {
156         return (pc == VDSO_BASE);
157 }
158
159 /* Return a pt_regs pointer for a valid signal handler frame */
160 static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
161 {
162         BacktraceIterator *b = &kbt->it;
163
164         if (b->pc == VDSO_BASE) {
165                 struct rt_sigframe *frame;
166                 unsigned long sigframe_top =
167                         b->sp + sizeof(struct rt_sigframe) - 1;
168                 if (!valid_address(kbt, b->sp) ||
169                     !valid_address(kbt, sigframe_top)) {
170                         if (kbt->verbose)
171                                 pr_err("  (odd signal: sp %#lx?)\n",
172                                        (unsigned long)(b->sp));
173                         return NULL;
174                 }
175                 frame = (struct rt_sigframe *)b->sp;
176                 if (kbt->verbose) {
177                         pr_err("  <received signal %d>\n",
178                                frame->info.si_signo);
179                 }
180                 return &frame->uc.uc_mcontext.regs;
181         }
182         return NULL;
183 }
184
185 static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
186 {
187         return is_sigreturn(kbt->it.pc);
188 }
189
190 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
191 {
192         struct pt_regs *p;
193
194         p = valid_fault_handler(kbt);
195         if (p == NULL)
196                 p = valid_sigframe(kbt);
197         if (p == NULL)
198                 return 0;
199         backtrace_init(&kbt->it, read_memory_func, kbt,
200                        p->pc, p->lr, p->sp, p->regs[52]);
201         kbt->new_context = 1;
202         return 1;
203 }
204
205 /* Find a frame that isn't a sigreturn, if there is one. */
206 static int KBacktraceIterator_next_item_inclusive(
207         struct KBacktraceIterator *kbt)
208 {
209         for (;;) {
210                 do {
211                         if (!KBacktraceIterator_is_sigreturn(kbt))
212                                 return 1;
213                 } while (backtrace_next(&kbt->it));
214
215                 if (!KBacktraceIterator_restart(kbt))
216                         return 0;
217         }
218 }
219
220 /*
221  * If the current sp is on a page different than what we recorded
222  * as the top-of-kernel-stack last time we context switched, we have
223  * probably blown the stack, and nothing is going to work out well.
224  * If we can at least get out a warning, that may help the debug,
225  * though we probably won't be able to backtrace into the code that
226  * actually did the recursive damage.
227  */
228 static void validate_stack(struct pt_regs *regs)
229 {
230         int cpu = smp_processor_id();
231         unsigned long ksp0 = get_current_ksp0();
232         unsigned long ksp0_base = ksp0 - THREAD_SIZE;
233         unsigned long sp = stack_pointer;
234
235         if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
236                 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
237                        "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
238                        cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
239         }
240
241         else if (sp < ksp0_base + sizeof(struct thread_info)) {
242                 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
243                        "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
244                        cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
245         }
246 }
247
248 void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
249                              struct task_struct *t, struct pt_regs *regs)
250 {
251         VirtualAddress pc, lr, sp, r52;
252         int is_current;
253
254         /*
255          * Set up callback information.  We grab the kernel stack base
256          * so we will allow reads of that address range, and if we're
257          * asking about the current process we grab the page table
258          * so we can check user accesses before trying to read them.
259          * We flush the TLB to avoid any weird skew issues.
260          */
261         is_current = (t == NULL);
262         kbt->is_current = is_current;
263         if (is_current)
264                 t = validate_current();
265         kbt->task = t;
266         kbt->pgtable = NULL;
267         kbt->verbose = 0;   /* override in caller if desired */
268         kbt->profile = 0;   /* override in caller if desired */
269         kbt->end = 0;
270         kbt->new_context = 0;
271         if (is_current) {
272                 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
273                 if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
274                         /*
275                          * Not just an optimization: this also allows
276                          * this to work at all before va/pa mappings
277                          * are set up.
278                          */
279                         kbt->pgtable = swapper_pg_dir;
280                 } else {
281                         struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
282                         if (!PageHighMem(page))
283                                 kbt->pgtable = __va(pgdir_pa);
284                         else
285                                 pr_err("page table not in LOWMEM"
286                                        " (%#llx)\n", pgdir_pa);
287                 }
288                 local_flush_tlb_all();
289                 validate_stack(regs);
290         }
291
292         if (regs == NULL) {
293                 if (is_current || t->state == TASK_RUNNING) {
294                         /* Can't do this; we need registers */
295                         kbt->end = 1;
296                         return;
297                 }
298                 pc = get_switch_to_pc();
299                 lr = t->thread.pc;
300                 sp = t->thread.ksp;
301                 r52 = 0;
302         } else {
303                 pc = regs->pc;
304                 lr = regs->lr;
305                 sp = regs->sp;
306                 r52 = regs->regs[52];
307         }
308
309         backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
310         kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
311 }
312 EXPORT_SYMBOL(KBacktraceIterator_init);
313
314 int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
315 {
316         return kbt->end;
317 }
318 EXPORT_SYMBOL(KBacktraceIterator_end);
319
320 void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
321 {
322         kbt->new_context = 0;
323         if (!backtrace_next(&kbt->it) &&
324             !KBacktraceIterator_restart(kbt)) {
325                         kbt->end = 1;
326                         return;
327                 }
328
329         kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
330 }
331 EXPORT_SYMBOL(KBacktraceIterator_next);
332
333 /*
334  * This method wraps the backtracer's more generic support.
335  * It is only invoked from the architecture-specific code; show_stack()
336  * and dump_stack() (in entry.S) are architecture-independent entry points.
337  */
338 void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
339 {
340         int i;
341
342         if (headers) {
343                 /*
344                  * Add a blank line since if we are called from panic(),
345                  * then bust_spinlocks() spit out a space in front of us
346                  * and it will mess up our KERN_ERR.
347                  */
348                 pr_err("\n");
349                 pr_err("Starting stack dump of tid %d, pid %d (%s)"
350                        " on cpu %d at cycle %lld\n",
351                        kbt->task->pid, kbt->task->tgid, kbt->task->comm,
352                        smp_processor_id(), get_cycles());
353         }
354 #ifdef __tilegx__
355         if (kbt->is_current) {
356                 __insn_mtspr(SPR_SIM_CONTROL,
357                              SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE));
358         }
359 #endif
360         kbt->verbose = 1;
361         i = 0;
362         for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
363                 char *modname;
364                 const char *name;
365                 unsigned long address = kbt->it.pc;
366                 unsigned long offset, size;
367                 char namebuf[KSYM_NAME_LEN+100];
368
369                 if (address >= PAGE_OFFSET)
370                         name = kallsyms_lookup(address, &size, &offset,
371                                                &modname, namebuf);
372                 else
373                         name = NULL;
374
375                 if (!name)
376                         namebuf[0] = '\0';
377                 else {
378                         size_t namelen = strlen(namebuf);
379                         size_t remaining = (sizeof(namebuf) - 1) - namelen;
380                         char *p = namebuf + namelen;
381                         int rc = snprintf(p, remaining, "+%#lx/%#lx ",
382                                           offset, size);
383                         if (modname && rc < remaining)
384                                 snprintf(p + rc, remaining - rc,
385                                          "[%s] ", modname);
386                         namebuf[sizeof(namebuf)-1] = '\0';
387                 }
388
389                 pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
390                        i++, address, namebuf, (unsigned long)(kbt->it.sp));
391
392                 if (i >= 100) {
393                         pr_err("Stack dump truncated"
394                                " (%d frames)\n", i);
395                         break;
396                 }
397         }
398         if (headers)
399                 pr_err("Stack dump complete\n");
400 }
401 EXPORT_SYMBOL(tile_show_stack);
402
403
404 /* This is called from show_regs() and _dump_stack() */
405 void dump_stack_regs(struct pt_regs *regs)
406 {
407         struct KBacktraceIterator kbt;
408         KBacktraceIterator_init(&kbt, NULL, regs);
409         tile_show_stack(&kbt, 1);
410 }
411 EXPORT_SYMBOL(dump_stack_regs);
412
413 static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
414                                        ulong pc, ulong lr, ulong sp, ulong r52)
415 {
416         memset(regs, 0, sizeof(struct pt_regs));
417         regs->pc = pc;
418         regs->lr = lr;
419         regs->sp = sp;
420         regs->regs[52] = r52;
421         return regs;
422 }
423
424 /* This is called from dump_stack() and just converts to pt_regs */
425 void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
426 {
427         struct pt_regs regs;
428         dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
429 }
430
431 /* This is called from KBacktraceIterator_init_current() */
432 void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
433                                       ulong lr, ulong sp, ulong r52)
434 {
435         struct pt_regs regs;
436         KBacktraceIterator_init(kbt, NULL,
437                                 regs_to_pt_regs(&regs, pc, lr, sp, r52));
438 }
439
440 /* This is called only from kernel/sched.c, with esp == NULL */
441 void show_stack(struct task_struct *task, unsigned long *esp)
442 {
443         struct KBacktraceIterator kbt;
444         if (task == NULL || task == current)
445                 KBacktraceIterator_init_current(&kbt);
446         else
447                 KBacktraceIterator_init(&kbt, task, NULL);
448         tile_show_stack(&kbt, 0);
449 }
450
451 #ifdef CONFIG_STACKTRACE
452
453 /* Support generic Linux stack API too */
454
455 void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
456 {
457         struct KBacktraceIterator kbt;
458         int skip = trace->skip;
459         int i = 0;
460
461         if (task == NULL || task == current)
462                 KBacktraceIterator_init_current(&kbt);
463         else
464                 KBacktraceIterator_init(&kbt, task, NULL);
465         for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
466                 if (skip) {
467                         --skip;
468                         continue;
469                 }
470                 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
471                         break;
472                 trace->entries[i++] = kbt.it.pc;
473         }
474         trace->nr_entries = i;
475 }
476 EXPORT_SYMBOL(save_stack_trace_tsk);
477
478 void save_stack_trace(struct stack_trace *trace)
479 {
480         save_stack_trace_tsk(NULL, trace);
481 }
482
483 #endif
484
485 /* In entry.S */
486 EXPORT_SYMBOL(KBacktraceIterator_init_current);