perf/x86/intel, watchdog: Switch NMI watchdog to ref cycles on x86
[cascardo/linux.git] / arch / x86 / kernel / apic / hw_nmi.c
1 /*
2  *  HW NMI watchdog support
3  *
4  *  started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  *  Arch specific calls to support NMI watchdog
7  *
8  *  Bits copied from original nmi.c file
9  *
10  */
11 #include <asm/apic.h>
12 #include <asm/nmi.h>
13
14 #include <linux/cpumask.h>
15 #include <linux/kdebug.h>
16 #include <linux/notifier.h>
17 #include <linux/kprobes.h>
18 #include <linux/nmi.h>
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/perf_event.h>
22
23 #ifdef CONFIG_HARDLOCKUP_DETECTOR
24 int hw_nmi_get_event(void)
25 {
26         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
27                 return PERF_COUNT_HW_REF_CPU_CYCLES;
28         return PERF_COUNT_HW_CPU_CYCLES;
29 }
30
31 u64 hw_nmi_get_sample_period(int watchdog_thresh)
32 {
33         return (u64)(cpu_khz) * 1000 * watchdog_thresh;
34 }
35 #endif
36
37 #ifdef arch_trigger_all_cpu_backtrace
38 static void nmi_raise_cpu_backtrace(cpumask_t *mask)
39 {
40         apic->send_IPI_mask(mask, NMI_VECTOR);
41 }
42
43 void arch_trigger_all_cpu_backtrace(bool include_self)
44 {
45         nmi_trigger_all_cpu_backtrace(include_self, nmi_raise_cpu_backtrace);
46 }
47
48 static int
49 arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
50 {
51         if (nmi_cpu_backtrace(regs))
52                 return NMI_HANDLED;
53
54         return NMI_DONE;
55 }
56 NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler);
57
58 static int __init register_trigger_all_cpu_backtrace(void)
59 {
60         register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
61                                 0, "arch_bt");
62         return 0;
63 }
64 early_initcall(register_trigger_all_cpu_backtrace);
65 #endif