Merge branch 'for-linus' of git://neil.brown.name/md
[cascardo/linux.git] / kernel / watchdog.c
index 57b8e2c..613bc1f 100644 (file)
@@ -31,13 +31,13 @@ int watchdog_enabled;
 int __read_mostly softlockup_thresh = 60;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
-static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
-static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
-#ifdef CONFIG_PERF_EVENTS_NMI
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
@@ -51,7 +51,7 @@ static int __initdata no_watchdog;
 /*
  * Should we panic when a soft-lockup or hard-lockup occurs:
  */
-#ifdef CONFIG_PERF_EVENTS_NMI
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
 static int hardlockup_panic;
 
 static int __init hardlockup_panic_setup(char *str)
@@ -115,7 +115,7 @@ static unsigned long get_sample_period(void)
 /* Commands for resetting the watchdog */
 static void __touch_watchdog(void)
 {
-       int this_cpu = raw_smp_processor_id();
+       int this_cpu = smp_processor_id();
 
        __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
 }
@@ -124,6 +124,7 @@ void touch_softlockup_watchdog(void)
 {
        __get_cpu_var(watchdog_touch_ts) = 0;
 }
+EXPORT_SYMBOL(touch_softlockup_watchdog);
 
 void touch_all_softlockup_watchdogs(void)
 {
@@ -138,6 +139,7 @@ void touch_all_softlockup_watchdogs(void)
                per_cpu(watchdog_touch_ts, cpu) = 0;
 }
 
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
 void touch_nmi_watchdog(void)
 {
        __get_cpu_var(watchdog_nmi_touch) = true;
@@ -145,29 +147,31 @@ void touch_nmi_watchdog(void)
 }
 EXPORT_SYMBOL(touch_nmi_watchdog);
 
+#endif
+
 void touch_softlockup_watchdog_sync(void)
 {
        __raw_get_cpu_var(softlockup_touch_sync) = true;
        __raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 
-#ifdef CONFIG_PERF_EVENTS_NMI
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
 /* watchdog detector functions */
-static int is_hardlockup(int cpu)
+static int is_hardlockup(void)
 {
-       unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+       unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
 
-       if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+       if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
                return 1;
 
-       per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+       __get_cpu_var(hrtimer_interrupts_saved) = hrint;
        return 0;
 }
 #endif
 
-static int is_softlockup(unsigned long touch_ts, int cpu)
+static int is_softlockup(unsigned long touch_ts)
 {
-       unsigned long now = get_timestamp(cpu);
+       unsigned long now = get_timestamp(smp_processor_id());
 
        /* Warn about unreasonable delays: */
        if (time_after(now, touch_ts + softlockup_thresh))
@@ -188,7 +192,7 @@ static struct notifier_block panic_block = {
        .notifier_call = watchdog_panic,
 };
 
-#ifdef CONFIG_PERF_EVENTS_NMI
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
 static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
        .config         = PERF_COUNT_HW_CPU_CYCLES,
@@ -202,8 +206,6 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
                 struct perf_sample_data *data,
                 struct pt_regs *regs)
 {
-       int this_cpu = smp_processor_id();
-
        if (__get_cpu_var(watchdog_nmi_touch) == true) {
                __get_cpu_var(watchdog_nmi_touch) = false;
                return;
@@ -215,7 +217,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
         * fired multiple times before we overflow'd.  If it hasn't
         * then this is a good indication the cpu is stuck
         */
-       if (is_hardlockup(this_cpu)) {
+       if (is_hardlockup()) {
+               int this_cpu = smp_processor_id();
+
                /* only print hardlockups once */
                if (__get_cpu_var(hard_watchdog_warn) == true)
                        return;
@@ -238,12 +242,11 @@ static void watchdog_interrupt_count(void)
 }
 #else
 static inline void watchdog_interrupt_count(void) { return; }
-#endif /* CONFIG_PERF_EVENTS_NMI */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
-       int this_cpu = smp_processor_id();
        unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
        struct pt_regs *regs = get_irq_regs();
        int duration;
@@ -258,12 +261,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
 
        if (touch_ts == 0) {
-               if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) {
+               if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
                        /*
                         * If the time stamp was touched atomically
                         * make sure the scheduler tick is up to date.
                         */
-                       per_cpu(softlockup_touch_sync, this_cpu) = false;
+                       __get_cpu_var(softlockup_touch_sync) = false;
                        sched_clock_tick();
                }
                __touch_watchdog();
@@ -276,14 +279,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
         * indicate it is getting cpu time.  If it hasn't then
         * this is a good indication some task is hogging the cpu
         */
-       duration = is_softlockup(touch_ts, this_cpu);
+       duration = is_softlockup(touch_ts);
        if (unlikely(duration)) {
                /* only warn once */
                if (__get_cpu_var(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
                printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
-                       this_cpu, duration,
+                       smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
                print_modules();
                print_irqtrace_events(current);
@@ -305,10 +308,10 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 /*
  * The watchdog thread - touches the timestamp.
  */
-static int watchdog(void *__bind_cpu)
+static int watchdog(void *unused)
 {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
-       struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu);
+       struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
 
        sched_setscheduler(current, SCHED_FIFO, &param);
 
@@ -324,7 +327,7 @@ static int watchdog(void *__bind_cpu)
        /*
         * Run briefly once per second to reset the softlockup timestamp.
         * If this gets delayed for more than 60 seconds then the
-        * debug-printout triggers in softlockup_tick().
+        * debug-printout triggers in watchdog_timer_fn().
         */
        while (!kthread_should_stop()) {
                __touch_watchdog();
@@ -341,7 +344,7 @@ static int watchdog(void *__bind_cpu)
 }
 
 
-#ifdef CONFIG_PERF_EVENTS_NMI
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
 static int watchdog_nmi_enable(int cpu)
 {
        struct perf_event_attr *wd_attr;
@@ -392,7 +395,7 @@ static void watchdog_nmi_disable(int cpu)
 #else
 static int watchdog_nmi_enable(int cpu) { return 0; }
 static void watchdog_nmi_disable(int cpu) { return; }
-#endif /* CONFIG_PERF_EVENTS_NMI */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
 /* prepare/enable/disable routines */
 static int watchdog_prepare_cpu(int cpu)
@@ -457,7 +460,7 @@ static void watchdog_disable(int cpu)
 static void watchdog_enable_all_cpus(void)
 {
        int cpu;
-       int result;
+       int result = 0;
 
        for_each_online_cpu(cpu)
                result += watchdog_enable(cpu);