intel_pstate: Add a few comments
authorKristen Carlson Accardi <kristen@linux.intel.com>
Wed, 10 Dec 2014 20:39:38 +0000 (12:39 -0800)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 10 Dec 2014 23:24:42 +0000 (00:24 +0100)
Add a few comments in the code which calculates busyness to
clarify parts of the algorithm.

Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
drivers/cpufreq/intel_pstate.c

index 0e841ee..742eefb 100644 (file)
@@ -199,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
 
        pid->integral += fp_error;
 
-       /* limit the integral term */
+       /*
+        * We limit the integral here so that it will never
+        * get higher than 30.  This prevents it from becoming
+        * too large an input over long periods of time and allows
+        * it to get factored out sooner.
+        *
+        * The value of 30 was chosen through experimentation.
+        */
        integral_limit = int_tofp(30);
        if (pid->integral > integral_limit)
                pid->integral = integral_limit;
@@ -616,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        if (limits.no_turbo || limits.turbo_disabled)
                max_perf = cpu->pstate.max_pstate;
 
+       /*
+        * performance can be limited by user through sysfs, by cpufreq
+        * policy, or by cpu specific default values determined through
+        * experimentation.
+        */
        max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
        *max = clamp_t(int, max_perf_adj,
                        cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
@@ -717,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
        u32 duration_us;
        u32 sample_time;
 
+       /*
+        * core_busy is the ratio of actual performance to max
+        * max_pstate is the max non turbo pstate available
+        * current_pstate was the pstate that was requested during
+        *      the last sample period.
+        *
+        * We normalize core_busy, which was our actual percent
+        * performance to what we requested during the last sample
+        * period. The result will be a percentage of busy at a
+        * specified pstate.
+        */
        core_busy = cpu->sample.core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 
+       /*
+        * Since we have a deferred timer, it will not fire unless
+        * we are in C0.  So, determine if the actual elapsed time
+        * is significantly greater (3x) than our sample interval.  If it
+        * is, then we were idle for a long enough period of time
+        * to adjust our busyness.
+        */
        sample_time = pid_params.sample_rate_ms  * USEC_PER_MSEC;
        duration_us = (u32) ktime_us_delta(cpu->sample.time,
                                           cpu->last_sample_time);