Merge branches 'pm-sleep', 'pm-cpufreq', 'pm-core' and 'pm-opp'
[cascardo/linux.git] / drivers / cpufreq / intel_pstate.c
index 1fa1a32..be9eade 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/msr.h>
 #include <asm/cpu_device_id.h>
 #include <asm/cpufeature.h>
+#include <asm/intel-family.h>
 
 #define ATOM_RATIOS            0x66a
 #define ATOM_VIDS              0x66b
@@ -96,7 +97,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
  *                     read from MPERF MSR between last and current sample
  * @tsc:               Difference of time stamp counter between last and
  *                     current sample
- * @freq:              Effective frequency calculated from APERF/MPERF
  * @time:              Current time from scheduler
  *
  * This structure is used in the cpudata structure to store performance sample
@@ -108,7 +108,6 @@ struct sample {
        u64 aperf;
        u64 mperf;
        u64 tsc;
-       int freq;
        u64 time;
 };
 
@@ -281,9 +280,9 @@ struct cpu_defaults {
 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
 
-static struct pstate_adjust_policy pid_params;
-static struct pstate_funcs pstate_funcs;
-static int hwp_active;
+static struct pstate_adjust_policy pid_params __read_mostly;
+static struct pstate_funcs pstate_funcs __read_mostly;
+static int hwp_active __read_mostly;
 
 #ifdef CONFIG_ACPI
 static bool acpi_ppc;
@@ -807,7 +806,8 @@ static void __init intel_pstate_sysfs_expose_params(void)
 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 {
        /* First disable HWP notification interrupt as we don't process them */
-       wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+       if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+               wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
 
        wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
@@ -944,7 +944,7 @@ static int core_get_max_pstate(void)
                        if (err)
                                goto skip_tar;
 
-                       tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
+                       tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
                        err = rdmsrl_safe(tdp_msr, &tdp_ratio);
                        if (err)
                                goto skip_tar;
@@ -972,7 +972,7 @@ static int core_get_turbo_pstate(void)
        u64 value;
        int nont, ret;
 
-       rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+       rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
        nont = core_get_max_pstate();
        ret = (value) & 255;
        if (ret <= nont)
@@ -1001,7 +1001,7 @@ static int knl_get_turbo_pstate(void)
        u64 value;
        int nont, ret;
 
-       rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+       rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
        nont = core_get_max_pstate();
        ret = (((value) >> 8) & 0xFF);
        if (ret <= nont)
@@ -1091,6 +1091,26 @@ static struct cpu_defaults knl_params = {
        },
 };
 
+static struct cpu_defaults bxt_params = {
+       .pid_policy = {
+               .sample_rate_ms = 10,
+               .deadband = 0,
+               .setpoint = 60,
+               .p_gain_pct = 14,
+               .d_gain_pct = 0,
+               .i_gain_pct = 4,
+       },
+       .funcs = {
+               .get_max = core_get_max_pstate,
+               .get_max_physical = core_get_max_pstate_physical,
+               .get_min = core_get_min_pstate,
+               .get_turbo = core_get_turbo_pstate,
+               .get_scaling = core_get_scaling,
+               .get_val = core_get_val,
+               .get_target_pstate = get_target_pstate_use_cpu_load,
+       },
+};
+
 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 {
        int max_perf = cpu->pstate.turbo_pstate;
@@ -1113,17 +1133,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
-static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
-{
-       trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
-       cpu->pstate.current_pstate = pstate;
-}
-
 static void intel_pstate_set_min_pstate(struct cpudata *cpu)
 {
        int pstate = cpu->pstate.min_pstate;
 
-       intel_pstate_record_pstate(cpu, pstate);
+       trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+       cpu->pstate.current_pstate = pstate;
        /*
         * Generally, there is no guarantee that this code will always run on
         * the CPU being updated, so force the register update to run on the
@@ -1283,10 +1298,11 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
 
        intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
        pstate = clamp_t(int, pstate, min_perf, max_perf);
+       trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
        if (pstate == cpu->pstate.current_pstate)
                return;
 
-       intel_pstate_record_pstate(cpu, pstate);
+       cpu->pstate.current_pstate = pstate;
        wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
 }
 
@@ -1334,29 +1350,32 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
                        (unsigned long)&policy }
 
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
-       ICPU(0x2a, core_params),
-       ICPU(0x2d, core_params),
-       ICPU(0x37, silvermont_params),
-       ICPU(0x3a, core_params),
-       ICPU(0x3c, core_params),
-       ICPU(0x3d, core_params),
-       ICPU(0x3e, core_params),
-       ICPU(0x3f, core_params),
-       ICPU(0x45, core_params),
-       ICPU(0x46, core_params),
-       ICPU(0x47, core_params),
-       ICPU(0x4c, airmont_params),
-       ICPU(0x4e, core_params),
-       ICPU(0x4f, core_params),
-       ICPU(0x5e, core_params),
-       ICPU(0x56, core_params),
-       ICPU(0x57, knl_params),
+       ICPU(INTEL_FAM6_SANDYBRIDGE,            core_params),
+       ICPU(INTEL_FAM6_SANDYBRIDGE_X,          core_params),
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT1,       silvermont_params),
+       ICPU(INTEL_FAM6_IVYBRIDGE,              core_params),
+       ICPU(INTEL_FAM6_HASWELL_CORE,           core_params),
+       ICPU(INTEL_FAM6_BROADWELL_CORE,         core_params),
+       ICPU(INTEL_FAM6_IVYBRIDGE_X,            core_params),
+       ICPU(INTEL_FAM6_HASWELL_X,              core_params),
+       ICPU(INTEL_FAM6_HASWELL_ULT,            core_params),
+       ICPU(INTEL_FAM6_HASWELL_GT3E,           core_params),
+       ICPU(INTEL_FAM6_BROADWELL_GT3E,         core_params),
+       ICPU(INTEL_FAM6_ATOM_AIRMONT,           airmont_params),
+       ICPU(INTEL_FAM6_SKYLAKE_MOBILE,         core_params),
+       ICPU(INTEL_FAM6_BROADWELL_X,            core_params),
+       ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,        core_params),
+       ICPU(INTEL_FAM6_BROADWELL_XEON_D,       core_params),
+       ICPU(INTEL_FAM6_XEON_PHI_KNL,           knl_params),
+       ICPU(INTEL_FAM6_ATOM_GOLDMONT,          bxt_params),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
 
-static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
-       ICPU(0x56, core_params),
+static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
+       ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
+       ICPU(INTEL_FAM6_BROADWELL_X, core_params),
+       ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
        {}
 };
 
@@ -1575,12 +1594,12 @@ static struct cpufreq_driver intel_pstate_driver = {
        .name           = "intel_pstate",
 };
 
-static int __initdata no_load;
-static int __initdata no_hwp;
-static int __initdata hwp_only;
-static unsigned int force_load;
+static int no_load __initdata;
+static int no_hwp __initdata;
+static int hwp_only __initdata;
+static unsigned int force_load __initdata;
 
-static int intel_pstate_msrs_not_valid(void)
+static int __init intel_pstate_msrs_not_valid(void)
 {
        if (!pstate_funcs.get_max() ||
            !pstate_funcs.get_min() ||
@@ -1590,7 +1609,7 @@ static int intel_pstate_msrs_not_valid(void)
        return 0;
 }
 
-static void copy_pid_params(struct pstate_adjust_policy *policy)
+static void __init copy_pid_params(struct pstate_adjust_policy *policy)
 {
        pid_params.sample_rate_ms = policy->sample_rate_ms;
        pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
@@ -1601,7 +1620,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
        pid_params.setpoint = policy->setpoint;
 }
 
-static void copy_cpu_funcs(struct pstate_funcs *funcs)
+static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
 {
        pstate_funcs.get_max   = funcs->get_max;
        pstate_funcs.get_max_physical = funcs->get_max_physical;
@@ -1616,7 +1635,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
 
 #ifdef CONFIG_ACPI
 
-static bool intel_pstate_no_acpi_pss(void)
+static bool __init intel_pstate_no_acpi_pss(void)
 {
        int i;
 
@@ -1645,7 +1664,7 @@ static bool intel_pstate_no_acpi_pss(void)
        return true;
 }
 
-static bool intel_pstate_has_acpi_ppc(void)
+static bool __init intel_pstate_has_acpi_ppc(void)
 {
        int i;
 
@@ -1673,7 +1692,7 @@ struct hw_vendor_info {
 };
 
 /* Hardware vendor-specific info that has its own power management modes */
-static struct hw_vendor_info vendor_info[] = {
+static struct hw_vendor_info vendor_info[] __initdata = {
        {1, "HP    ", "ProLiant", PSS},
        {1, "ORACLE", "X4-2    ", PPC},
        {1, "ORACLE", "X4-2L   ", PPC},
@@ -1692,7 +1711,7 @@ static struct hw_vendor_info vendor_info[] = {
        {0, "", ""},
 };
 
-static bool intel_pstate_platform_pwr_mgmt_exists(void)
+static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
 {
        struct acpi_table_header hdr;
        struct hw_vendor_info *v_info;