cfg80211: fix race on init and driver registration
[cascardo/linux.git] / kernel / time / tick-sched.c
index d5097c4..4042064 100644 (file)
@@ -139,7 +139,6 @@ static void tick_nohz_update_jiffies(ktime_t now)
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
        unsigned long flags;
 
-       cpumask_clear_cpu(cpu, nohz_cpu_mask);
        ts->idle_waketime = now;
 
        local_irq_save(flags);
@@ -159,9 +158,10 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
 
        if (ts->idle_active) {
                delta = ktime_sub(now, ts->idle_entrytime);
-               ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
                if (nr_iowait_cpu(cpu) > 0)
                        ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
+               else
+                       ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
                ts->idle_entrytime = now;
        }
 
@@ -197,11 +197,11 @@ static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
 /**
  * get_cpu_idle_time_us - get the total idle time of a cpu
  * @cpu: CPU number to query
- * @last_update_time: variable to store update time in
+ * @last_update_time: variable to store update time in. Do not update
+ * counters if NULL.
  *
  * Return the cummulative idle time (since boot) for a given
- * CPU, in microseconds. The idle time returned includes
- * the iowait time (unlike what "top" and co report).
+ * CPU, in microseconds.
  *
  * This time is measured via accounting rather than sampling,
  * and is as accurate as ktime_get() is.
@@ -211,20 +211,35 @@ static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
 {
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+       ktime_t now, idle;
 
        if (!tick_nohz_enabled)
                return -1;
 
-       update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
+       now = ktime_get();
+       if (last_update_time) {
+               update_ts_time_stats(cpu, ts, now, last_update_time);
+               idle = ts->idle_sleeptime;
+       } else {
+               if (ts->idle_active && !nr_iowait_cpu(cpu)) {
+                       ktime_t delta = ktime_sub(now, ts->idle_entrytime);
+
+                       idle = ktime_add(ts->idle_sleeptime, delta);
+               } else {
+                       idle = ts->idle_sleeptime;
+               }
+       }
+
+       return ktime_to_us(idle);
 
-       return ktime_to_us(ts->idle_sleeptime);
 }
 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
 
-/*
+/**
  * get_cpu_iowait_time_us - get the total iowait time of a cpu
  * @cpu: CPU number to query
- * @last_update_time: variable to store update time in
+ * @last_update_time: variable to store update time in. Do not update
+ * counters if NULL.
  *
  * Return the cummulative iowait time (since boot) for a given
  * CPU, in microseconds.
@@ -237,13 +252,26 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 {
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+       ktime_t now, iowait;
 
        if (!tick_nohz_enabled)
                return -1;
 
-       update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
+       now = ktime_get();
+       if (last_update_time) {
+               update_ts_time_stats(cpu, ts, now, last_update_time);
+               iowait = ts->iowait_sleeptime;
+       } else {
+               if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
+                       ktime_t delta = ktime_sub(now, ts->idle_entrytime);
+
+                       iowait = ktime_add(ts->iowait_sleeptime, delta);
+               } else {
+                       iowait = ts->iowait_sleeptime;
+               }
+       }
 
-       return ktime_to_us(ts->iowait_sleeptime);
+       return ktime_to_us(iowait);
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 
@@ -389,9 +417,6 @@ void tick_nohz_stop_sched_tick(int inidle)
                else
                        expires.tv64 = KTIME_MAX;
 
-               if (delta_jiffies > 1)
-                       cpumask_set_cpu(cpu, nohz_cpu_mask);
-
                /* Skip reprogram of event if its not changed */
                if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
                        goto out;
@@ -441,7 +466,6 @@ void tick_nohz_stop_sched_tick(int inidle)
                 * softirq.
                 */
                tick_do_update_jiffies64(ktime_get());
-               cpumask_clear_cpu(cpu, nohz_cpu_mask);
        }
        raise_softirq_irqoff(TIMER_SOFTIRQ);
 out:
@@ -524,7 +548,6 @@ void tick_nohz_restart_sched_tick(void)
        /* Update jiffies first */
        select_nohz_load_balancer(0);
        tick_do_update_jiffies64(now);
-       cpumask_clear_cpu(cpu, nohz_cpu_mask);
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        /*
@@ -640,8 +663,6 @@ static void tick_nohz_switch_to_nohz(void)
                next = ktime_add(next, tick_period);
        }
        local_irq_enable();
-
-       printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
 }
 
 /*
@@ -793,10 +814,8 @@ void tick_setup_sched_timer(void)
        }
 
 #ifdef CONFIG_NO_HZ
-       if (tick_nohz_enabled) {
+       if (tick_nohz_enabled)
                ts->nohz_mode = NOHZ_MODE_HIGHRES;
-               printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
-       }
 #endif
 }
 #endif /* HIGH_RES_TIMERS */