arch/tile: adopt the new nmi_backtrace framework
[cascardo/linux.git] / arch / tile / kernel / process.c
index a465d83..9f37106 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/compat.h>
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/compat.h>
-#include <linux/hardirq.h>
+#include <linux/nmi.h>
 #include <linux/syscalls.h>
 #include <linux/kernel.h>
 #include <linux/tracehook.h>
 #include <linux/syscalls.h>
 #include <linux/kernel.h>
 #include <linux/tracehook.h>
@@ -594,66 +594,18 @@ void show_regs(struct pt_regs *regs)
        tile_show_stack(&kbt);
 }
 
        tile_show_stack(&kbt);
 }
 
-/* To ensure stack dump on tiles occurs one by one. */
-static DEFINE_SPINLOCK(backtrace_lock);
-/* To ensure no backtrace occurs before all of the stack dump are done. */
-static atomic_t backtrace_cpus;
-/* The cpu mask to avoid reentrance. */
-static struct cpumask backtrace_mask;
-
-void do_nmi_dump_stack(struct pt_regs *regs)
-{
-       int is_idle = is_idle_task(current) && !in_interrupt();
-       int cpu;
-
-       nmi_enter();
-       cpu = smp_processor_id();
-       if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu, &backtrace_mask)))
-               goto done;
-
-       spin_lock(&backtrace_lock);
-       if (is_idle)
-               pr_info("CPU: %d idle\n", cpu);
-       else
-               show_regs(regs);
-       spin_unlock(&backtrace_lock);
-       atomic_dec(&backtrace_cpus);
-done:
-       nmi_exit();
-}
-
 #ifdef __tilegx__
 #ifdef __tilegx__
-void arch_trigger_all_cpu_backtrace(bool self)
+void nmi_raise_cpu_backtrace(struct cpumask *in_mask)
 {
        struct cpumask mask;
        HV_Coord tile;
        unsigned int timeout;
        int cpu;
 {
        struct cpumask mask;
        HV_Coord tile;
        unsigned int timeout;
        int cpu;
-       int ongoing;
        HV_NMI_Info info[NR_CPUS];
 
        HV_NMI_Info info[NR_CPUS];
 
-       ongoing = atomic_cmpxchg(&backtrace_cpus, 0, num_online_cpus() - 1);
-       if (ongoing != 0) {
-               pr_err("Trying to do all-cpu backtrace.\n");
-               pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
-                      ongoing);
-               if (self) {
-                       pr_err("Reporting the stack on this cpu only.\n");
-                       dump_stack();
-               }
-               return;
-       }
-
-       cpumask_copy(&mask, cpu_online_mask);
-       cpumask_clear_cpu(smp_processor_id(), &mask);
-       cpumask_copy(&backtrace_mask, &mask);
-
-       /* Backtrace for myself first. */
-       if (self)
-               dump_stack();
-
        /* Tentatively dump stack on remote tiles via NMI. */
        timeout = 100;
        /* Tentatively dump stack on remote tiles via NMI. */
        timeout = 100;
+       cpumask_copy(&mask, in_mask);
        while (!cpumask_empty(&mask) && timeout) {
                for_each_cpu(cpu, &mask) {
                        tile.x = cpu_x(cpu);
        while (!cpumask_empty(&mask) && timeout) {
                for_each_cpu(cpu, &mask) {
                        tile.x = cpu_x(cpu);
@@ -664,12 +616,17 @@ void arch_trigger_all_cpu_backtrace(bool self)
                }
 
                mdelay(10);
                }
 
                mdelay(10);
+               touch_softlockup_watchdog();
                timeout--;
        }
 
                timeout--;
        }
 
-       /* Warn about cpus stuck in ICS and decrement their counts here. */
+       /* Warn about cpus stuck in ICS. */
        if (!cpumask_empty(&mask)) {
                for_each_cpu(cpu, &mask) {
        if (!cpumask_empty(&mask)) {
                for_each_cpu(cpu, &mask) {
+
+                       /* Clear the bit as if nmi_cpu_backtrace() ran. */
+                       cpumask_clear_cpu(cpu, in_mask);
+
                        switch (info[cpu].result) {
                        case HV_NMI_RESULT_FAIL_ICS:
                                pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
                        switch (info[cpu].result) {
                        case HV_NMI_RESULT_FAIL_ICS:
                                pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
@@ -680,16 +637,20 @@ void arch_trigger_all_cpu_backtrace(bool self)
                                        cpu);
                                break;
                        case HV_ENOSYS:
                                        cpu);
                                break;
                        case HV_ENOSYS:
-                               pr_warn("Hypervisor too old to allow remote stack dumps.\n");
-                               goto skip_for_each;
+                               WARN_ONCE(1, "Hypervisor too old to allow remote stack dumps.\n");
+                               break;
                        default:  /* should not happen */
                                pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
                                        cpu, info[cpu].result, info[cpu].pc);
                                break;
                        }
                }
                        default:  /* should not happen */
                                pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
                                        cpu, info[cpu].result, info[cpu].pc);
                                break;
                        }
                }
-skip_for_each:
-               atomic_sub(cpumask_weight(&mask), &backtrace_cpus);
        }
 }
        }
 }
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+       nmi_trigger_cpumask_backtrace(mask, exclude_self,
+                                     nmi_raise_cpu_backtrace);
+}
 #endif /* __tilegx_ */
 #endif /* __tilegx_ */