Merge branches 'pm-cpufreq' and 'pm-cpuidle'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 29 Dec 2014 20:23:41 +0000 (21:23 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 29 Dec 2014 20:23:41 +0000 (21:23 +0100)
* pm-cpufreq:
  cpufreq: fix a NULL pointer dereference in __cpufreq_governor()
  cpufreq-dt: defer probing if OPP table is not ready

* pm-cpuidle:
  cpuidle / ACPI: remove unused CPUIDLE_FLAG_TIME_INVALID
  cpuidle: ladder: Better idle duration measurement without using CPUIDLE_FLAG_TIME_INVALID
  cpuidle: menu: Better idle duration measurement without using CPUIDLE_FLAG_TIME_INVALID

drivers/acpi/processor_idle.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpuidle/governors/ladder.c
drivers/cpuidle/governors/menu.c
include/linux/cpuidle.h

index 4995365..87b704e 100644 (file)
@@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
                state->flags = 0;
                switch (cx->type) {
                        case ACPI_STATE_C1:
-                       if (cx->entry_method != ACPI_CSTATE_FFH)
-                               state->flags |= CPUIDLE_FLAG_TIME_INVALID;
 
                        state->enter = acpi_idle_enter_c1;
                        state->enter_dead = acpi_idle_play_dead;
index f56147a..fde97d6 100644 (file)
@@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        /* OPPs might be populated at runtime, don't check for error here */
        of_init_opp_table(cpu_dev);
 
+       /*
+        * But we need OPP table to function so if it is not there let's
+        * give platform code chance to provide it for us.
+        */
+       ret = dev_pm_opp_get_opp_count(cpu_dev);
+       if (ret <= 0) {
+               pr_debug("OPP table is not ready, deferring probe\n");
+               ret = -EPROBE_DEFER;
+               goto out_free_opp;
+       }
+
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv) {
                ret = -ENOMEM;
index a09a29c..46bed4f 100644 (file)
@@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        /* Don't start any governor operations if we are entering suspend */
        if (cpufreq_suspended)
                return 0;
+       /*
+        * Governor might not be initiated here if ACPI _PPC changed
+        * notification happened, so check it.
+        */
+       if (!policy->governor)
+               return -EINVAL;
 
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
index 37263d9..401c010 100644 (file)
@@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        last_state = &ldev->states[last_idx];
 
-       if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
-               last_residency = cpuidle_get_last_residency(dev) - \
-                                        drv->states[last_idx].exit_latency;
-       }
-       else
-               last_residency = last_state->threshold.promotion_time + 1;
+       last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
 
        /* consider promotion */
        if (last_idx < drv->state_count - 1 &&
index 659d7b0..4058079 100644 (file)
@@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * power state and occurrence of the wakeup event.
         *
         * If the entered idle state didn't support residency measurements,
-        * we are basically lost in the dark how much time passed.
-        * As a compromise, assume we slept for the whole expected time.
+        * we use them anyway if they are short, and if long,
+        * truncate to the whole expected time.
         *
         * Any measured amount of time will include the exit latency.
         * Since we are interested in when the wakeup begun, not when it
@@ -405,22 +405,17 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * the measured amount of time is less than the exit latency,
         * assume the state was never reached and the exit latency is 0.
         */
-       if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
-               /* Use timer value as is */
-               measured_us = data->next_timer_us;
 
-       } else {
-               /* Use measured value */
-               measured_us = cpuidle_get_last_residency(dev);
+       /* measured value */
+       measured_us = cpuidle_get_last_residency(dev);
 
-               /* Deduct exit latency */
-               if (measured_us > target->exit_latency)
-                       measured_us -= target->exit_latency;
+       /* Deduct exit latency */
+       if (measured_us > target->exit_latency)
+               measured_us -= target->exit_latency;
 
-               /* Make sure our coefficients do not exceed unity */
-               if (measured_us > data->next_timer_us)
-                       measured_us = data->next_timer_us;
-       }
+       /* Make sure our coefficients do not exceed unity */
+       if (measured_us > data->next_timer_us)
+               measured_us = data->next_timer_us;
 
        /* Update our correction ratio */
        new_factor = data->correction_factor[data->bucket];
index a07e087..ab70f3b 100644 (file)
@@ -53,7 +53,6 @@ struct cpuidle_state {
 };
 
 /* Idle State Flags */
-#define CPUIDLE_FLAG_TIME_INVALID      (0x01) /* is residency time measurable? */
 #define CPUIDLE_FLAG_COUPLED   (0x02) /* state applies to multiple cpus */
 #define CPUIDLE_FLAG_TIMER_STOP (0x04)  /* timer is stopped on this state */
 
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
 /**
  * cpuidle_get_last_residency - retrieves the last state's residency time
  * @dev: the target CPU
- *
- * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
  */
 static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
 {