2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/hardirq.h>
9 #include <linux/kdebug.h>
10 #include <linux/export.h>
11 #include <linux/ptrace.h>
12 #include <linux/kexec.h>
13 #include <linux/sysfs.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
17 #include <asm/stacktrace.h>
19 static char *exception_stack_names[N_EXCEPTION_STACKS] = {
20 [ DOUBLEFAULT_STACK-1 ] = "#DF",
21 [ NMI_STACK-1 ] = "NMI",
22 [ DEBUG_STACK-1 ] = "#DB",
23 [ MCE_STACK-1 ] = "#MC",
26 static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
27 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
28 [DEBUG_STACK - 1] = DEBUG_STKSZ
31 void stack_type_str(enum stack_type type, const char **begin, const char **end)
33 BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
40 case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
41 *begin = exception_stack_names[type - STACK_TYPE_EXCEPTION];
50 static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
52 unsigned long *begin, *end;
56 BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
58 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
59 end = (unsigned long *)raw_cpu_ptr(&orig_ist)->ist[k];
60 begin = end - (exception_stack_sizes[k] / sizeof(long));
61 regs = (struct pt_regs *)end - 1;
63 if (stack < begin || stack >= end)
66 info->type = STACK_TYPE_EXCEPTION + k;
69 info->next_sp = (unsigned long *)regs->sp;
77 static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
79 unsigned long *end = (unsigned long *)this_cpu_read(irq_stack_ptr);
80 unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
83 * This is a software stack, so 'end' can be a valid stack pointer.
84 * It just means the stack is empty.
86 if (stack < begin || stack > end)
89 info->type = STACK_TYPE_IRQ;
94 * The next stack pointer is the first thing pushed by the entry code
95 * after switching to the irq stack.
97 info->next_sp = (unsigned long *)*(end - 1);
102 int get_stack_info(unsigned long *stack, struct task_struct *task,
103 struct stack_info *info, unsigned long *visit_mask)
108 task = task ? : current;
110 if (in_task_stack(stack, task, info))
111 goto recursion_check;
116 if (in_exception_stack(stack, info))
117 goto recursion_check;
119 if (in_irq_stack(stack, info))
120 goto recursion_check;
126 * Make sure we don't iterate through any given stack more than once.
127 * If it comes up a second time then there's something wrong going on:
128 * just break out and report an unknown stack type.
131 if (*visit_mask & (1UL << info->type))
133 *visit_mask |= 1UL << info->type;
139 info->type = STACK_TYPE_UNKNOWN;
144 * x86-64 can have up to three kernel stacks:
147 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
150 void dump_trace(struct task_struct *task, struct pt_regs *regs,
151 unsigned long *stack, unsigned long bp,
152 const struct stacktrace_ops *ops, void *data)
154 unsigned long visit_mask = 0;
155 struct stack_info info;
159 task = task ? : current;
160 stack = stack ? : get_stack_pointer(task, regs);
161 bp = bp ? : (unsigned long)get_frame_pointer(task, regs);
164 * Print function call entries in all stacks, starting at the
165 * current stack address. If the stacks consist of nested
169 const char *begin_str, *end_str;
171 get_stack_info(stack, task, &info, &visit_mask);
173 /* Default finish unless specified to continue */
178 /* Break out early if we are on the thread stack */
179 case STACK_TYPE_TASK:
183 case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
185 stack_type_str(info.type, &begin_str, &end_str);
187 if (ops->stack(data, begin_str) < 0)
190 bp = ops->walk_stack(task, stack, bp, ops,
191 data, &info, &graph);
193 ops->stack(data, end_str);
195 stack = info.next_sp;
200 ops->stack(data, "UNK");
206 * This handles the process stack:
208 bp = ops->walk_stack(task, stack, bp, ops, data, &info, &graph);
210 EXPORT_SYMBOL(dump_trace);
212 void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
213 unsigned long *sp, char *log_lvl)
215 unsigned long *irq_stack_end;
216 unsigned long *irq_stack;
217 unsigned long *stack;
220 if (!try_get_task_stack(task))
223 irq_stack_end = (unsigned long *)this_cpu_read(irq_stack_ptr);
224 irq_stack = irq_stack_end - (IRQ_STACK_SIZE / sizeof(long));
226 sp = sp ? : get_stack_pointer(task, regs);
229 for (i = 0; i < kstack_depth_to_print; i++) {
232 if (stack >= irq_stack && stack <= irq_stack_end) {
233 if (stack == irq_stack_end) {
234 stack = (unsigned long *) (irq_stack_end[-1]);
238 if (kstack_end(stack))
242 if (probe_kernel_address(stack, word))
245 if ((i % STACKSLOTS_PER_LINE) == 0) {
248 printk("%s %016lx", log_lvl, word);
250 pr_cont(" %016lx", word);
253 touch_nmi_watchdog();
257 show_trace_log_lvl(task, regs, sp, log_lvl);
259 put_task_stack(task);
262 void show_regs(struct pt_regs *regs)
266 show_regs_print_info(KERN_DEFAULT);
267 __show_regs(regs, 1);
270 * When in-kernel, we also print out the stack and code at the
271 * time of the fault..
273 if (!user_mode(regs)) {
274 unsigned int code_prologue = code_bytes * 43 / 64;
275 unsigned int code_len = code_bytes;
279 printk(KERN_DEFAULT "Stack:\n");
280 show_stack_log_lvl(current, regs, NULL, KERN_DEFAULT);
282 printk(KERN_DEFAULT "Code: ");
284 ip = (u8 *)regs->ip - code_prologue;
285 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
286 /* try starting at IP */
288 code_len = code_len - code_prologue + 1;
290 for (i = 0; i < code_len; i++, ip++) {
291 if (ip < (u8 *)PAGE_OFFSET ||
292 probe_kernel_address(ip, c)) {
293 pr_cont(" Bad RIP value.");
296 if (ip == (u8 *)regs->ip)
297 pr_cont("<%02x> ", c);
305 int is_valid_bugaddr(unsigned long ip)
309 if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
312 return ud2 == 0x0b0f;