Merge branch 'pm-cpufreq'
[cascardo/linux.git] / drivers / cpufreq / intel_pstate.c
index 7898de0..3af9dd7 100644 (file)
@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
                 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
 }
 
-#define PCT_TO_HWP(x) (x * 255 / 100)
 static void intel_pstate_hwp_set(void)
 {
-       int min, max, cpu;
-       u64 value, freq;
+       int min, hw_min, max, hw_max, cpu, range, adj_range;
+       u64 value, cap;
+
+       rdmsrl(MSR_HWP_CAPABILITIES, cap);
+       hw_min = HWP_LOWEST_PERF(cap);
+       hw_max = HWP_HIGHEST_PERF(cap);
+       range = hw_max - hw_min;
 
        get_online_cpus();
 
        for_each_online_cpu(cpu) {
                rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
-               min = PCT_TO_HWP(limits.min_perf_pct);
+               adj_range = limits.min_perf_pct * range / 100;
+               min = hw_min + adj_range;
                value &= ~HWP_MIN_PERF(~0L);
                value |= HWP_MIN_PERF(min);
 
-               max = PCT_TO_HWP(limits.max_perf_pct);
+               adj_range = limits.max_perf_pct * range / 100;
+               max = hw_min + adj_range;
                if (limits.no_turbo) {
-                       rdmsrl( MSR_HWP_CAPABILITIES, freq);
-                       max = HWP_GUARANTEED_PERF(freq);
+                       hw_max = HWP_GUARANTEED_PERF(cap);
+                       if (hw_max < max)
+                               max = hw_max;
                }
 
                value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 
        limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
        limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+       limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+       limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
        if (hwp_active)
@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 
        limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
        limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+       limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
+       limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
        limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 
        if (hwp_active)
@@ -484,12 +495,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
 }
 /************************** sysfs end ************************/
 
-static void intel_pstate_hwp_enable(void)
+static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 {
-       hwp_active++;
        pr_info("intel_pstate: HWP enabled\n");
 
-       wrmsrl( MSR_PM_ENABLE, 0x1);
+       wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
 
 static int byt_get_min_pstate(void)
@@ -522,7 +532,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        int32_t vid_fp;
        u32 vid;
 
-       val = pstate << 8;
+       val = (u64)pstate << 8;
        if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
@@ -611,7 +621,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
 {
        u64 val;
 
-       val = pstate << 8;
+       val = (u64)pstate << 8;
        if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
@@ -909,6 +919,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x4c, byt_params),
        ICPU(0x4e, core_params),
        ICPU(0x4f, core_params),
+       ICPU(0x5e, core_params),
        ICPU(0x56, core_params),
        ICPU(0x57, knl_params),
        {}
@@ -933,6 +944,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu = all_cpu_data[cpunum];
 
        cpu->cpu = cpunum;
+
+       if (hwp_active)
+               intel_pstate_hwp_enable(cpu);
+
        intel_pstate_get_cpu_pstates(cpu);
 
        init_timer_deferrable(&cpu->timer);
@@ -985,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
        limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
        limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
-       limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
-       limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
-
        limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
        limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+
+       /* Normalize user input to [min_policy_pct, max_policy_pct] */
+       limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+       limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
        limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+       limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+
+       /* Make sure min_perf_pct <= max_perf_pct */
+       limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
+
+       limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
        if (hwp_active)
@@ -1170,6 +1192,10 @@ static struct hw_vendor_info vendor_info[] = {
        {1, "ORACLE", "X4270M3 ", PPC},
        {1, "ORACLE", "X4270M2 ", PPC},
        {1, "ORACLE", "X4170M2 ", PPC},
+       {1, "ORACLE", "X4170 M3", PPC},
+       {1, "ORACLE", "X4275 M3", PPC},
+       {1, "ORACLE", "X6-2    ", PPC},
+       {1, "ORACLE", "Sudbury ", PPC},
        {0, "", ""},
 };
 
@@ -1246,7 +1272,7 @@ static int __init intel_pstate_init(void)
                return -ENOMEM;
 
        if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
-               intel_pstate_hwp_enable();
+               hwp_active++;
 
        if (!hwp_active && hwp_only)
                goto out;