intel_pstate: Use pr_fmt
[cascardo/linux.git] / drivers / cpufreq / intel_pstate.c
index cb56074..1866705 100644 (file)
@@ -10,6 +10,8 @@
  * of the License.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
@@ -64,6 +66,25 @@ static inline int ceiling_fp(int32_t x)
        return ret;
 }
 
+/**
+ * struct sample -     Store performance sample
+ * @core_pct_busy:     Ratio of APERF/MPERF in percent, which is actual
+ *                     performance during last sample period
+ * @busy_scaled:       Scaled busy value which is used to calculate next
+ *                     P state. This can be different than core_pct_busy
+ *                     to account for cpu idle period
+ * @aperf:             Difference of actual performance frequency clock count
+ *                     read from APERF MSR between last and current sample
+ * @mperf:             Difference of maximum performance frequency clock count
+ *                     read from MPERF MSR between last and current sample
+ * @tsc:               Difference of time stamp counter between last and
+ *                     current sample
+ * @freq:              Effective frequency calculated from APERF/MPERF
+ * @time:              Current time from scheduler
+ *
+ * This structure is used in the cpudata structure to store performance sample
+ * data for choosing next P State.
+ */
 struct sample {
        int32_t core_pct_busy;
        int32_t busy_scaled;
@@ -74,6 +95,20 @@ struct sample {
        u64 time;
 };
 
+/**
+ * struct pstate_data - Store P state data
+ * @current_pstate:    Current requested P state
+ * @min_pstate:                Min P state possible for this platform
+ * @max_pstate:                Max P state possible for this platform
+ * @max_pstate_physical:This is physical Max P state for a processor
+ *                     This can be higher than the max_pstate which can
+ *                     be limited by platform thermal design power limits
+ * @scaling:           Scaling factor to  convert frequency to cpufreq
+ *                     frequency units
+ * @turbo_pstate:      Max Turbo P state possible for this platform
+ *
+ * Stores the per cpu model P state limits and current P state.
+ */
 struct pstate_data {
        int     current_pstate;
        int     min_pstate;
@@ -83,6 +118,19 @@ struct pstate_data {
        int     turbo_pstate;
 };
 
+/**
+ * struct vid_data -   Stores voltage information data
+ * @min:               VID data for this platform corresponding to
+ *                     the lowest P state
+ * @max:               VID data corresponding to the highest P State.
+ * @turbo:             VID data for turbo P state
+ * @ratio:             Ratio of (vid max - vid min) /
+ *                     (max P state - Min P State)
+ *
+ * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
+ * This data is used in Atom platforms, where in addition to target P state,
+ * the voltage data needs to be specified to select next P State.
+ */
 struct vid_data {
        int min;
        int max;
@@ -90,6 +138,18 @@ struct vid_data {
        int32_t ratio;
 };
 
+/**
+ * struct _pid -       Stores PID data
+ * @setpoint:          Target set point for busyness or performance
+ * @integral:          Storage for accumulated error values
+ * @p_gain:            PID proportional gain
+ * @i_gain:            PID integral gain
+ * @d_gain:            PID derivative gain
+ * @deadband:          PID deadband
+ * @last_err:          Last error storage for integral part of PID calculation
+ *
+ * Stores PID coefficients and last error for PID controller.
+ */
 struct _pid {
        int setpoint;
        int32_t integral;
@@ -100,6 +160,23 @@ struct _pid {
        int32_t last_err;
 };
 
+/**
+ * struct cpudata -    Per CPU instance data storage
+ * @cpu:               CPU number for this instance data
+ * @update_util:       CPUFreq utility callback information
+ * @pstate:            Stores P state limits for this CPU
+ * @vid:               Stores VID limits for this CPU
+ * @pid:               Stores PID parameters for this CPU
+ * @last_sample_time:  Last Sample time
+ * @prev_aperf:                Last APERF value read from APERF MSR
+ * @prev_mperf:                Last MPERF value read from MPERF MSR
+ * @prev_tsc:          Last timestamp counter (TSC) value
+ * @prev_cummulative_iowait: IO Wait time difference from last and
+ *                     current sample
+ * @sample:            Storage for storing last Sample data
+ *
+ * This structure stores per CPU instance data for all CPUs.
+ */
 struct cpudata {
        int cpu;
 
@@ -118,6 +195,19 @@ struct cpudata {
 };
 
 static struct cpudata **all_cpu_data;
+
+/**
+ * struct pid_adjust_policy - Stores static PID configuration data
+ * @sample_rate_ms:    PID calculation sample rate in ms
+ * @sample_rate_ns:    Sample rate calculation in ns
+ * @deadband:          PID deadband
+ * @setpoint:          PID Setpoint
+ * @p_gain_pct:                PID proportional gain
+ * @i_gain_pct:                PID integral gain
+ * @d_gain_pct:                PID derivative gain
+ *
+ * Stores per CPU model static PID configuration data.
+ */
 struct pstate_adjust_policy {
        int sample_rate_ms;
        s64 sample_rate_ns;
@@ -128,17 +218,36 @@ struct pstate_adjust_policy {
        int i_gain_pct;
 };
 
+/**
+ * struct pstate_funcs - Per CPU model specific callbacks
+ * @get_max:           Callback to get maximum non turbo effective P state
+ * @get_max_physical:  Callback to get maximum non turbo physical P state
+ * @get_min:           Callback to get minimum P state
+ * @get_turbo:         Callback to get turbo P state
+ * @get_scaling:       Callback to get frequency scaling factor
+ * @get_val:           Callback to convert P state to actual MSR write value
+ * @get_vid:           Callback to get VID data for Atom platforms
+ * @get_target_pstate: Callback to a function to calculate next P state to use
+ *
+ * Core and Atom CPU models have different way to get P State limits. This
+ * structure is used to store those callbacks.
+ */
 struct pstate_funcs {
        int (*get_max)(void);
        int (*get_max_physical)(void);
        int (*get_min)(void);
        int (*get_turbo)(void);
        int (*get_scaling)(void);
-       void (*set)(struct cpudata*, int pstate);
+       u64 (*get_val)(struct cpudata*, int pstate);
        void (*get_vid)(struct cpudata *);
        int32_t (*get_target_pstate)(struct cpudata *);
 };
 
+/**
+ * struct cpu_defaults- Per CPU model default config data
+ * @pid_policy:        PID config data
+ * @funcs:             Callback function data
+ */
 struct cpu_defaults {
        struct pstate_adjust_policy pid_policy;
        struct pstate_funcs funcs;
@@ -151,6 +260,34 @@ static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
 
+
+/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo:          User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled:    Platform turbo status either from msr
+ *                     MSR_IA32_MISC_ENABLE or when maximum available pstate
+ *                     matches the maximum turbo pstate
+ * @max_perf_pct:      Effective maximum performance limit in percentage, this
+ *                     is minimum of either limits enforced by cpufreq policy
+ *                     or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct:      Effective minimum performance limit in percentage, this
+ *                     is maximum of either limits enforced by cpufreq policy
+ *                     or limits from user set limits via intel_pstate sysfs
+ * @max_perf:          This is a scaled value between 0 to 255 for max_perf_pct
+ *                     This value is used to limit max pstate
+ * @min_perf:          This is a scaled value between 0 to 255 for min_perf_pct
+ *                     This value is used to limit min pstate
+ * @max_policy_pct:    The maximum performance in percentage enforced by
+ *                     cpufreq setpolicy interface
+ * @max_sysfs_pct:     The maximum performance in percentage enforced by
+ *                     intel pstate sysfs interface
+ * @min_policy_pct:    The minimum performance in percentage enforced by
+ *                     cpufreq setpolicy interface
+ * @min_sysfs_pct:     The minimum performance in percentage enforced by
+ *                     intel pstate sysfs interface
+ *
+ * Storage for user and policy defined limits.
+ */
 struct perf_limits {
        int no_turbo;
        int turbo_disabled;
@@ -206,17 +343,17 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
 
 static inline void pid_p_gain_set(struct _pid *pid, int percent)
 {
-       pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->p_gain = div_fp(percent, 100);
 }
 
 static inline void pid_i_gain_set(struct _pid *pid, int percent)
 {
-       pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->i_gain = div_fp(percent, 100);
 }
 
 static inline void pid_d_gain_set(struct _pid *pid, int percent)
 {
-       pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->d_gain = div_fp(percent, 100);
 }
 
 static signed int pid_calc(struct _pid *pid, int32_t busy)
@@ -394,7 +531,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
 
        total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
        no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
-       turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+       turbo_fp = div_fp(no_turbo, total);
        turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
        return sprintf(buf, "%u\n", turbo_pct);
 }
@@ -436,7 +573,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 
        update_turbo_state();
        if (limits->turbo_disabled) {
-               pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
+               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                return -EPERM;
        }
 
@@ -465,8 +602,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->max_perf_pct);
        limits->max_perf_pct = max(limits->min_perf_pct,
                                   limits->max_perf_pct);
-       limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-                                 int_tofp(100));
+       limits->max_perf = div_fp(limits->max_perf_pct, 100);
 
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@ -490,8 +626,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->min_perf_pct);
        limits->min_perf_pct = min(limits->max_perf_pct,
                                   limits->min_perf_pct);
-       limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-                                 int_tofp(100));
+       limits->min_perf = div_fp(limits->min_perf_pct, 100);
 
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@ -565,7 +700,7 @@ static int atom_get_turbo_pstate(void)
        return value & 0x7F;
 }
 
-static void atom_set_pstate(struct cpudata *cpudata, int pstate)
+static u64 atom_get_val(struct cpudata *cpudata, int pstate)
 {
        u64 val;
        int32_t vid_fp;
@@ -585,9 +720,7 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate)
        if (pstate > cpudata->pstate.max_pstate)
                vid = cpudata->vid.turbo;
 
-       val |= vid;
-
-       wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+       return val | vid;
 }
 
 static int silvermont_get_scaling(void)
@@ -711,7 +844,7 @@ static inline int core_get_scaling(void)
        return 100000;
 }
 
-static void core_set_pstate(struct cpudata *cpudata, int pstate)
+static u64 core_get_val(struct cpudata *cpudata, int pstate)
 {
        u64 val;
 
@@ -719,7 +852,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
        if (limits->no_turbo && !limits->turbo_disabled)
                val |= (u64)1 << 32;
 
-       wrmsrl(MSR_IA32_PERF_CTL, val);
+       return val;
 }
 
 static int knl_get_turbo_pstate(void)
@@ -750,7 +883,7 @@ static struct cpu_defaults core_params = {
                .get_min = core_get_min_pstate,
                .get_turbo = core_get_turbo_pstate,
                .get_scaling = core_get_scaling,
-               .set = core_set_pstate,
+               .get_val = core_get_val,
                .get_target_pstate = get_target_pstate_use_performance,
        },
 };
@@ -769,7 +902,7 @@ static struct cpu_defaults silvermont_params = {
                .get_max_physical = atom_get_max_pstate,
                .get_min = atom_get_min_pstate,
                .get_turbo = atom_get_turbo_pstate,
-               .set = atom_set_pstate,
+               .get_val = atom_get_val,
                .get_scaling = silvermont_get_scaling,
                .get_vid = atom_get_vid,
                .get_target_pstate = get_target_pstate_use_cpu_load,
@@ -790,7 +923,7 @@ static struct cpu_defaults airmont_params = {
                .get_max_physical = atom_get_max_pstate,
                .get_min = atom_get_min_pstate,
                .get_turbo = atom_get_turbo_pstate,
-               .set = atom_set_pstate,
+               .get_val = atom_get_val,
                .get_scaling = airmont_get_scaling,
                .get_vid = atom_get_vid,
                .get_target_pstate = get_target_pstate_use_cpu_load,
@@ -812,7 +945,7 @@ static struct cpu_defaults knl_params = {
                .get_min = core_get_min_pstate,
                .get_turbo = knl_get_turbo_pstate,
                .get_scaling = core_get_scaling,
-               .set = core_set_pstate,
+               .get_val = core_get_val,
                .get_target_pstate = get_target_pstate_use_performance,
        },
 };
@@ -839,25 +972,24 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
-static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
+static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
 {
-       int max_perf, min_perf;
-
-       if (force) {
-               update_turbo_state();
-
-               intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
-
-               pstate = clamp_t(int, pstate, min_perf, max_perf);
-
-               if (pstate == cpu->pstate.current_pstate)
-                       return;
-       }
        trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
-
        cpu->pstate.current_pstate = pstate;
+}
+
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+       int pstate = cpu->pstate.min_pstate;
 
-       pstate_funcs.set(cpu, pstate);
+       intel_pstate_record_pstate(cpu, pstate);
+       /*
+        * Generally, there is no guarantee that this code will always run on
+        * the CPU being updated, so force the register update to run on the
+        * right CPU.
+        */
+       wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+                     pstate_funcs.get_val(cpu, pstate));
 }
 
 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
@@ -870,7 +1002,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 
        if (pstate_funcs.get_vid)
                pstate_funcs.get_vid(cpu);
-       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
+
+       intel_pstate_set_min_pstate(cpu);
 }
 
 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
@@ -878,8 +1011,8 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
        struct sample *sample = &cpu->sample;
        int64_t core_pct;
 
-       core_pct = int_tofp(sample->aperf) * int_tofp(100);
-       core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
+       core_pct = sample->aperf * int_tofp(100);
+       core_pct = div64_u64(core_pct, sample->mperf);
 
        sample->core_pct_busy = (int32_t)core_pct;
 }
@@ -912,7 +1045,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
        cpu->prev_tsc = tsc;
-       return true;
+       /*
+        * First time this function is invoked in a given cycle, all of the
+        * previous sample data fields are equal to zero or stale and they must
+        * be populated with meaningful numbers for things to work, so assume
+        * that sample.time will always be reset before setting the utilization
+        * update hook and make the caller skip the sample then.
+        */
+       return !!cpu->last_sample_time;
 }
 
 static inline int32_t get_avg_frequency(struct cpudata *cpu)
@@ -975,8 +1115,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
         * specified pstate.
         */
        core_busy = cpu->sample.core_pct_busy;
-       max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
-       current_pstate = int_tofp(cpu->pstate.current_pstate);
+       max_pstate = cpu->pstate.max_pstate_physical;
+       current_pstate = cpu->pstate.current_pstate;
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 
        /*
@@ -986,10 +1126,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
         * enough period of time to adjust our busyness.
         */
        duration_ns = cpu->sample.time - cpu->last_sample_time;
-       if ((s64)duration_ns > pid_params.sample_rate_ns * 3
-           && cpu->last_sample_time > 0) {
-               sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
-                                     int_tofp(duration_ns));
+       if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
+               sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
                core_busy = mul_fp(core_busy, sample_ratio);
        }
 
@@ -997,6 +1135,21 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
        return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
 }
 
+static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+{
+       int max_perf, min_perf;
+
+       update_turbo_state();
+
+       intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+       pstate = clamp_t(int, pstate, min_perf, max_perf);
+       if (pstate == cpu->pstate.current_pstate)
+               return;
+
+       intel_pstate_record_pstate(cpu, pstate);
+       wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+}
+
 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 {
        int from, target_pstate;
@@ -1006,7 +1159,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 
        target_pstate = pstate_funcs.get_target_pstate(cpu);
 
-       intel_pstate_set_pstate(cpu, target_pstate, true);
+       intel_pstate_update_pstate(cpu, target_pstate);
 
        sample = &cpu->sample;
        trace_pstate_sample(fp_toint(sample->core_pct_busy),
@@ -1087,12 +1240,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        intel_pstate_get_cpu_pstates(cpu);
 
        intel_pstate_busy_pid_reset(cpu);
-       intel_pstate_sample(cpu, 0);
 
-       cpu->update_util.func = intel_pstate_update_util;
-       cpufreq_set_update_util_data(cpunum, &cpu->update_util);
-
-       pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
+       pr_debug("controlling: cpu %d\n", cpunum);
 
        return 0;
 }
@@ -1109,22 +1258,55 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
        return get_avg_frequency(cpu);
 }
 
+static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
+{
+       struct cpudata *cpu = all_cpu_data[cpu_num];
+
+       /* Prevent intel_pstate_update_util() from using stale data. */
+       cpu->sample.time = 0;
+       cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
+                                    intel_pstate_update_util);
+}
+
+static void intel_pstate_clear_update_util_hook(unsigned int cpu)
+{
+       cpufreq_remove_update_util_hook(cpu);
+       synchronize_sched();
+}
+
+static void intel_pstate_set_performance_limits(struct perf_limits *limits)
+{
+       limits->no_turbo = 0;
+       limits->turbo_disabled = 0;
+       limits->max_perf_pct = 100;
+       limits->max_perf = int_tofp(1);
+       limits->min_perf_pct = 100;
+       limits->min_perf = int_tofp(1);
+       limits->max_policy_pct = 100;
+       limits->max_sysfs_pct = 100;
+       limits->min_policy_pct = 0;
+       limits->min_sysfs_pct = 0;
+}
+
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
-           policy->max >= policy->cpuinfo.max_freq) {
-               pr_debug("intel_pstate: set performance\n");
+       intel_pstate_clear_update_util_hook(policy->cpu);
+
+       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits = &performance_limits;
-               if (hwp_active)
-                       intel_pstate_hwp_set(policy->cpus);
-               return 0;
+               if (policy->max >= policy->cpuinfo.max_freq) {
+                       pr_debug("set performance\n");
+                       intel_pstate_set_performance_limits(limits);
+                       goto out;
+               }
+       } else {
+               pr_debug("set powersave\n");
+               limits = &powersave_limits;
        }
 
-       pr_debug("intel_pstate: set powersave\n");
-       limits = &powersave_limits;
        limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
        limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
        limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1145,10 +1327,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
-       limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-                                 int_tofp(100));
-       limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-                                 int_tofp(100));
+       limits->min_perf = div_fp(limits->min_perf_pct, 100);
+       limits->max_perf = div_fp(limits->max_perf_pct, 100);
+
+ out:
+       intel_pstate_set_update_util_hook(policy->cpu);
 
        if (hwp_active)
                intel_pstate_hwp_set(policy->cpus);
@@ -1172,15 +1355,14 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
        int cpu_num = policy->cpu;
        struct cpudata *cpu = all_cpu_data[cpu_num];
 
-       pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
+       pr_debug("CPU %d exiting\n", cpu_num);
 
-       cpufreq_set_update_util_data(cpu_num, NULL);
-       synchronize_sched();
+       intel_pstate_clear_update_util_hook(cpu_num);
 
        if (hwp_active)
                return;
 
-       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
+       intel_pstate_set_min_pstate(cpu);
 }
 
 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -1255,7 +1437,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
        pstate_funcs.get_min   = funcs->get_min;
        pstate_funcs.get_turbo = funcs->get_turbo;
        pstate_funcs.get_scaling = funcs->get_scaling;
-       pstate_funcs.set       = funcs->set;
+       pstate_funcs.get_val   = funcs->get_val;
        pstate_funcs.get_vid   = funcs->get_vid;
        pstate_funcs.get_target_pstate = funcs->get_target_pstate;
 
@@ -1418,7 +1600,7 @@ hwp_cpu_matched:
        if (intel_pstate_platform_pwr_mgmt_exists())
                return -ENODEV;
 
-       pr_info("Intel P-state driver initializing.\n");
+       pr_info("Intel P-state driver initializing\n");
 
        all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
@@ -1435,15 +1617,14 @@ hwp_cpu_matched:
        intel_pstate_sysfs_expose_params();
 
        if (hwp_active)
-               pr_info("intel_pstate: HWP enabled\n");
+               pr_info("HWP enabled\n");
 
        return rc;
 out:
        get_online_cpus();
        for_each_online_cpu(cpu) {
                if (all_cpu_data[cpu]) {
-                       cpufreq_set_update_util_data(cpu, NULL);
-                       synchronize_sched();
+                       intel_pstate_clear_update_util_hook(cpu);
                        kfree(all_cpu_data[cpu]);
                }
        }
@@ -1462,7 +1643,7 @@ static int __init intel_pstate_setup(char *str)
        if (!strcmp(str, "disable"))
                no_load = 1;
        if (!strcmp(str, "no_hwp")) {
-               pr_info("intel_pstate: HWP disabled\n");
+               pr_info("HWP disabled\n");
                no_hwp = 1;
        }
        if (!strcmp(str, "force"))