#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
+ ++#include <asm/intel-family.h>
#define ATOM_RATIOS 0x66a
#define ATOM_VIDS 0x66b
* read from MPERF MSR between last and current sample
* @tsc: Difference of time stamp counter between last and
* current sample
- -- * @freq: Effective frequency calculated from APERF/MPERF
* @time: Current time from scheduler
*
* This structure is used in the cpudata structure to store performance sample
u64 aperf;
u64 mperf;
u64 tsc;
- -- int freq;
u64 time;
};
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
- --static struct pstate_adjust_policy pid_params;
- --static struct pstate_funcs pstate_funcs;
- --static int hwp_active;
+ ++static struct pstate_adjust_policy pid_params __read_mostly;
+ ++static struct pstate_funcs pstate_funcs __read_mostly;
+ ++static int hwp_active __read_mostly;
#ifdef CONFIG_ACPI
static bool acpi_ppc;
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
- -- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ ++ if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ ++ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
if (err)
goto skip_tar;
- -- tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
+ ++ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
if (err)
goto skip_tar;
u64 value;
int nont, ret;
---- rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
++++ rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
ret = (value) & 255;
if (ret <= nont)
u64 value;
int nont, ret;
---- rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
++++ rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
},
};
+ ++static struct cpu_defaults bxt_params = {
+ ++ .pid_policy = {
+ ++ .sample_rate_ms = 10,
+ ++ .deadband = 0,
+ ++ .setpoint = 60,
+ ++ .p_gain_pct = 14,
+ ++ .d_gain_pct = 0,
+ ++ .i_gain_pct = 4,
+ ++ },
+ ++ .funcs = {
+ ++ .get_max = core_get_max_pstate,
+ ++ .get_max_physical = core_get_max_pstate_physical,
+ ++ .get_min = core_get_min_pstate,
+ ++ .get_turbo = core_get_turbo_pstate,
+ ++ .get_scaling = core_get_scaling,
+ ++ .get_val = core_get_val,
+ ++ .get_target_pstate = get_target_pstate_use_cpu_load,
+ ++ },
+ ++};
+ ++
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
- --static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
- --{
- -- trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
- -- cpu->pstate.current_pstate = pstate;
- --}
- --
static void intel_pstate_set_min_pstate(struct cpudata *cpu)
{
int pstate = cpu->pstate.min_pstate;
- -- intel_pstate_record_pstate(cpu, pstate);
+ ++ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+ ++ cpu->pstate.current_pstate = pstate;
/*
* Generally, there is no guarantee that this code will always run on
* the CPU being updated, so force the register update to run on the
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
+ ++ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
if (pstate == cpu->pstate.current_pstate)
return;
- -- intel_pstate_record_pstate(cpu, pstate);
+ ++ cpu->pstate.current_pstate = pstate;
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
(unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- -- ICPU(0x2a, core_params),
- -- ICPU(0x2d, core_params),
- -- ICPU(0x37, silvermont_params),
- -- ICPU(0x3a, core_params),
- -- ICPU(0x3c, core_params),
- -- ICPU(0x3d, core_params),
- -- ICPU(0x3e, core_params),
- -- ICPU(0x3f, core_params),
- -- ICPU(0x45, core_params),
- -- ICPU(0x46, core_params),
- -- ICPU(0x47, core_params),
- -- ICPU(0x4c, airmont_params),
- -- ICPU(0x4e, core_params),
- -- ICPU(0x4f, core_params),
- -- ICPU(0x5e, core_params),
- -- ICPU(0x56, core_params),
- -- ICPU(0x57, knl_params),
+ ++ ICPU(INTEL_FAM6_SANDYBRIDGE, core_params),
+ ++ ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params),
+ ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params),
+ ++ ICPU(INTEL_FAM6_IVYBRIDGE, core_params),
+ ++ ICPU(INTEL_FAM6_HASWELL_CORE, core_params),
+ ++ ICPU(INTEL_FAM6_BROADWELL_CORE, core_params),
+ ++ ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params),
+ ++ ICPU(INTEL_FAM6_HASWELL_X, core_params),
+ ++ ICPU(INTEL_FAM6_HASWELL_ULT, core_params),
+ ++ ICPU(INTEL_FAM6_HASWELL_GT3E, core_params),
+ ++ ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params),
+ ++ ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params),
+ ++ ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params),
+ ++ ICPU(INTEL_FAM6_BROADWELL_X, core_params),
+ ++ ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params),
+ ++ ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
+ ++ ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params),
+ ++ ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
- --static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
- -- ICPU(0x56, core_params),
+ ++static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
+ ++ ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
++ ++ ICPU(INTEL_FAM6_BROADWELL_X, core_params),
++ ++ ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
{}
};
{
struct cpudata *cpu = all_cpu_data[cpu_num];
+ + if (cpu->update_util_set)
+ + return;
+ +
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- - intel_pstate_clear_update_util_hook(policy->cpu);
- -
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
.name = "intel_pstate",
};
- --static int __initdata no_load;
- --static int __initdata no_hwp;
- --static int __initdata hwp_only;
- --static unsigned int force_load;
+ ++static int no_load __initdata;
+ ++static int no_hwp __initdata;
+ ++static int hwp_only __initdata;
+ ++static unsigned int force_load __initdata;
- --static int intel_pstate_msrs_not_valid(void)
+ ++static int __init intel_pstate_msrs_not_valid(void)
{
if (!pstate_funcs.get_max() ||
!pstate_funcs.get_min() ||
return 0;
}
- --static void copy_pid_params(struct pstate_adjust_policy *policy)
+ ++static void __init copy_pid_params(struct pstate_adjust_policy *policy)
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
pid_params.setpoint = policy->setpoint;
}
- --static void copy_cpu_funcs(struct pstate_funcs *funcs)
+ ++static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
pstate_funcs.get_max_physical = funcs->get_max_physical;
#ifdef CONFIG_ACPI
- --static bool intel_pstate_no_acpi_pss(void)
+ ++static bool __init intel_pstate_no_acpi_pss(void)
{
int i;
return true;
}
- --static bool intel_pstate_has_acpi_ppc(void)
+ ++static bool __init intel_pstate_has_acpi_ppc(void)
{
int i;
};
/* Hardware vendor-specific info that has its own power management modes */
- --static struct hw_vendor_info vendor_info[] = {
+ ++static struct hw_vendor_info vendor_info[] __initdata = {
{1, "HP ", "ProLiant", PSS},
{1, "ORACLE", "X4-2 ", PPC},
{1, "ORACLE", "X4-2L ", PPC},
{0, "", ""},
};
- --static bool intel_pstate_platform_pwr_mgmt_exists(void)
+ ++static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
struct acpi_table_header hdr;
struct hw_vendor_info *v_info;