Merge branch 'master' of git://1984.lsi.us.es/nf
[cascardo/linux.git] / arch / arm / kernel / smp.c
index 365c8d9..1bdfd87 100644 (file)
@@ -401,7 +401,8 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 
 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 {
-       smp_cross_call = fn;
+       if (!smp_cross_call)
+               smp_cross_call = fn;
 }
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -460,19 +461,11 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
  */
 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 
-static void ipi_timer(void)
-{
-       struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
-       evt->event_handler(evt);
-}
-
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-static void smp_timer_broadcast(const struct cpumask *mask)
+void tick_broadcast(const struct cpumask *mask)
 {
        smp_cross_call(mask, IPI_TIMER);
 }
-#else
-#define smp_timer_broadcast    NULL
 #endif
 
 static void broadcast_timer_set_mode(enum clock_event_mode mode,
@@ -515,7 +508,6 @@ static void __cpuinit percpu_timer_setup(void)
        struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
 
        evt->cpumask = cpumask_of(cpu);
-       evt->broadcast = smp_timer_broadcast;
 
        if (!lt_ops || lt_ops->setup(evt))
                broadcast_timer_setup(evt);
@@ -581,11 +573,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        case IPI_WAKEUP:
                break;
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        case IPI_TIMER:
                irq_enter();
-               ipi_timer();
+               tick_receive_broadcast();
                irq_exit();
                break;
+#endif
 
        case IPI_RESCHEDULE:
                scheduler_ipi();
@@ -678,6 +672,9 @@ static int cpufreq_callback(struct notifier_block *nb,
        if (freq->flags & CPUFREQ_CONST_LOOPS)
                return NOTIFY_OK;
 
+       if (arm_delay_ops.const_clock)
+               return NOTIFY_OK;
+
        if (!per_cpu(l_p_j_ref, cpu)) {
                per_cpu(l_p_j_ref, cpu) =
                        per_cpu(cpu_data, cpu).loops_per_jiffy;