2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 /* Macros to iterate over lists */
35 /* Iterate over online CPUs policies */
36 static LIST_HEAD(cpufreq_policy_list);
37 #define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
41 * The "cpufreq driver" - the arch- or hardware-dependent low
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
45 static struct cpufreq_driver *cpufreq_driver;
46 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
47 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
49 DEFINE_MUTEX(cpufreq_governor_lock);
51 /* This one keeps track of the previously set governor of a removed CPU */
52 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
54 /* Flag to suspend/resume CPUFreq governors */
55 static bool cpufreq_suspended;
57 static inline bool has_target(void)
59 return cpufreq_driver->target_index || cpufreq_driver->target;
63 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
66 static DECLARE_RWSEM(cpufreq_rwsem);
68 /* internal prototypes */
69 static int __cpufreq_governor(struct cpufreq_policy *policy,
71 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
72 static void handle_update(struct work_struct *work);
75 * Two notifier lists: the "policy" list is involved in the
76 * validation process for a new CPU frequency policy; the
77 * "transition" list for kernel code that needs to handle
78 * changes to devices when the CPU clock speed changes.
79 * The mutex locks both lists.
81 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
82 static struct srcu_notifier_head cpufreq_transition_notifier_list;
84 static bool init_cpufreq_transition_notifier_list_called;
85 static int __init init_cpufreq_transition_notifier_list(void)
87 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
88 init_cpufreq_transition_notifier_list_called = true;
91 pure_initcall(init_cpufreq_transition_notifier_list);
93 static int off __read_mostly;
94 static int cpufreq_disabled(void)
98 void disable_cpufreq(void)
102 static LIST_HEAD(cpufreq_governor_list);
103 static DEFINE_MUTEX(cpufreq_governor_mutex);
105 bool have_governor_per_policy(void)
107 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
109 EXPORT_SYMBOL_GPL(have_governor_per_policy);
111 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
113 if (have_governor_per_policy())
114 return &policy->kobj;
116 return cpufreq_global_kobject;
118 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
120 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
126 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
128 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
131 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
135 idle_time = cur_wall_time - busy_time;
137 *wall = cputime_to_usecs(cur_wall_time);
139 return cputime_to_usecs(idle_time);
142 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
144 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
146 if (idle_time == -1ULL)
147 return get_cpu_idle_time_jiffy(cpu, wall);
149 idle_time += get_cpu_iowait_time_us(cpu, wall);
153 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
156 * This is a generic cpufreq init() routine which can be used by cpufreq
157 * drivers of SMP systems. It will do following:
158 * - validate & show freq table passed
159 * - set policies transition latency
160 * - policy->cpus with all possible CPUs
162 int cpufreq_generic_init(struct cpufreq_policy *policy,
163 struct cpufreq_frequency_table *table,
164 unsigned int transition_latency)
168 ret = cpufreq_table_validate_and_show(policy, table);
170 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
174 policy->cpuinfo.transition_latency = transition_latency;
177 * The driver only supports the SMP configuartion where all processors
178 * share the clock and voltage and clock.
180 cpumask_setall(policy->cpus);
184 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
186 unsigned int cpufreq_generic_get(unsigned int cpu)
188 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
190 if (!policy || IS_ERR(policy->clk)) {
191 pr_err("%s: No %s associated to cpu: %d\n",
192 __func__, policy ? "clk" : "policy", cpu);
196 return clk_get_rate(policy->clk) / 1000;
198 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
200 /* Only for cpufreq core internal use */
201 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
203 return per_cpu(cpufreq_cpu_data, cpu);
206 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
208 struct cpufreq_policy *policy = NULL;
211 if (cpu >= nr_cpu_ids)
214 if (!down_read_trylock(&cpufreq_rwsem))
217 /* get the cpufreq driver */
218 read_lock_irqsave(&cpufreq_driver_lock, flags);
220 if (cpufreq_driver) {
222 policy = per_cpu(cpufreq_cpu_data, cpu);
224 kobject_get(&policy->kobj);
227 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
230 up_read(&cpufreq_rwsem);
234 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
236 void cpufreq_cpu_put(struct cpufreq_policy *policy)
238 kobject_put(&policy->kobj);
239 up_read(&cpufreq_rwsem);
241 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
243 /*********************************************************************
244 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
245 *********************************************************************/
248 * adjust_jiffies - adjust the system "loops_per_jiffy"
250 * This function alters the system "loops_per_jiffy" for the clock
251 * speed change. Note that loops_per_jiffy cannot be updated on SMP
252 * systems as each CPU might be scaled differently. So, use the arch
253 * per-CPU loops_per_jiffy value wherever possible.
255 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
258 static unsigned long l_p_j_ref;
259 static unsigned int l_p_j_ref_freq;
261 if (ci->flags & CPUFREQ_CONST_LOOPS)
264 if (!l_p_j_ref_freq) {
265 l_p_j_ref = loops_per_jiffy;
266 l_p_j_ref_freq = ci->old;
267 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
268 l_p_j_ref, l_p_j_ref_freq);
270 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
271 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
273 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
274 loops_per_jiffy, ci->new);
279 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
280 struct cpufreq_freqs *freqs, unsigned int state)
282 BUG_ON(irqs_disabled());
284 if (cpufreq_disabled())
287 freqs->flags = cpufreq_driver->flags;
288 pr_debug("notification %u of frequency transition to %u kHz\n",
293 case CPUFREQ_PRECHANGE:
294 /* detect if the driver reported a value as "old frequency"
295 * which is not equal to what the cpufreq core thinks is
298 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
299 if ((policy) && (policy->cpu == freqs->cpu) &&
300 (policy->cur) && (policy->cur != freqs->old)) {
301 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
302 freqs->old, policy->cur);
303 freqs->old = policy->cur;
306 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
307 CPUFREQ_PRECHANGE, freqs);
308 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
311 case CPUFREQ_POSTCHANGE:
312 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
313 pr_debug("FREQ: %lu - CPU: %lu\n",
314 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
315 trace_cpu_frequency(freqs->new, freqs->cpu);
316 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
317 CPUFREQ_POSTCHANGE, freqs);
318 if (likely(policy) && likely(policy->cpu == freqs->cpu))
319 policy->cur = freqs->new;
325 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
326 * on frequency transition.
328 * This function calls the transition notifiers and the "adjust_jiffies"
329 * function. It is called twice on all CPU frequency changes that have
332 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
333 struct cpufreq_freqs *freqs, unsigned int state)
335 for_each_cpu(freqs->cpu, policy->cpus)
336 __cpufreq_notify_transition(policy, freqs, state);
339 /* Do post notifications when there are chances that transition has failed */
340 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
341 struct cpufreq_freqs *freqs, int transition_failed)
343 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
344 if (!transition_failed)
347 swap(freqs->old, freqs->new);
348 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
349 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
352 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
353 struct cpufreq_freqs *freqs)
357 * Catch double invocations of _begin() which lead to self-deadlock.
358 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
359 * doesn't invoke _begin() on their behalf, and hence the chances of
360 * double invocations are very low. Moreover, there are scenarios
361 * where these checks can emit false-positive warnings in these
362 * drivers; so we avoid that by skipping them altogether.
364 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
365 && current == policy->transition_task);
368 wait_event(policy->transition_wait, !policy->transition_ongoing);
370 spin_lock(&policy->transition_lock);
372 if (unlikely(policy->transition_ongoing)) {
373 spin_unlock(&policy->transition_lock);
377 policy->transition_ongoing = true;
378 policy->transition_task = current;
380 spin_unlock(&policy->transition_lock);
382 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
384 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
386 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
387 struct cpufreq_freqs *freqs, int transition_failed)
389 if (unlikely(WARN_ON(!policy->transition_ongoing)))
392 cpufreq_notify_post_transition(policy, freqs, transition_failed);
394 policy->transition_ongoing = false;
395 policy->transition_task = NULL;
397 wake_up(&policy->transition_wait);
399 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
402 /*********************************************************************
404 *********************************************************************/
405 static ssize_t show_boost(struct kobject *kobj,
406 struct attribute *attr, char *buf)
408 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
411 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
412 const char *buf, size_t count)
416 ret = sscanf(buf, "%d", &enable);
417 if (ret != 1 || enable < 0 || enable > 1)
420 if (cpufreq_boost_trigger_state(enable)) {
421 pr_err("%s: Cannot %s BOOST!\n",
422 __func__, enable ? "enable" : "disable");
426 pr_debug("%s: cpufreq BOOST %s\n",
427 __func__, enable ? "enabled" : "disabled");
431 define_one_global_rw(boost);
433 static struct cpufreq_governor *find_governor(const char *str_governor)
435 struct cpufreq_governor *t;
437 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
438 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
445 * cpufreq_parse_governor - parse a governor string
447 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
448 struct cpufreq_governor **governor)
455 if (cpufreq_driver->setpolicy) {
456 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
457 *policy = CPUFREQ_POLICY_PERFORMANCE;
459 } else if (!strncasecmp(str_governor, "powersave",
461 *policy = CPUFREQ_POLICY_POWERSAVE;
465 struct cpufreq_governor *t;
467 mutex_lock(&cpufreq_governor_mutex);
469 t = find_governor(str_governor);
474 mutex_unlock(&cpufreq_governor_mutex);
475 ret = request_module("cpufreq_%s", str_governor);
476 mutex_lock(&cpufreq_governor_mutex);
479 t = find_governor(str_governor);
487 mutex_unlock(&cpufreq_governor_mutex);
494 * cpufreq_per_cpu_attr_read() / show_##file_name() -
495 * print out cpufreq information
497 * Write out information from cpufreq_driver->policy[cpu]; object must be
501 #define show_one(file_name, object) \
502 static ssize_t show_##file_name \
503 (struct cpufreq_policy *policy, char *buf) \
505 return sprintf(buf, "%u\n", policy->object); \
508 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
509 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
510 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
511 show_one(scaling_min_freq, min);
512 show_one(scaling_max_freq, max);
514 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
518 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
519 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
521 ret = sprintf(buf, "%u\n", policy->cur);
525 static int cpufreq_set_policy(struct cpufreq_policy *policy,
526 struct cpufreq_policy *new_policy);
529 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
531 #define store_one(file_name, object) \
532 static ssize_t store_##file_name \
533 (struct cpufreq_policy *policy, const char *buf, size_t count) \
536 struct cpufreq_policy new_policy; \
538 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
542 ret = sscanf(buf, "%u", &new_policy.object); \
546 temp = new_policy.object; \
547 ret = cpufreq_set_policy(policy, &new_policy); \
549 policy->user_policy.object = temp; \
551 return ret ? ret : count; \
554 store_one(scaling_min_freq, min);
555 store_one(scaling_max_freq, max);
558 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
560 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
563 unsigned int cur_freq = __cpufreq_get(policy);
565 return sprintf(buf, "<unknown>");
566 return sprintf(buf, "%u\n", cur_freq);
570 * show_scaling_governor - show the current policy for the specified CPU
572 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
574 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
575 return sprintf(buf, "powersave\n");
576 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
577 return sprintf(buf, "performance\n");
578 else if (policy->governor)
579 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
580 policy->governor->name);
585 * store_scaling_governor - store policy for the specified CPU
587 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
588 const char *buf, size_t count)
591 char str_governor[16];
592 struct cpufreq_policy new_policy;
594 ret = cpufreq_get_policy(&new_policy, policy->cpu);
598 ret = sscanf(buf, "%15s", str_governor);
602 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
603 &new_policy.governor))
606 ret = cpufreq_set_policy(policy, &new_policy);
608 policy->user_policy.policy = policy->policy;
609 policy->user_policy.governor = policy->governor;
618 * show_scaling_driver - show the cpufreq driver currently loaded
620 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
622 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
626 * show_scaling_available_governors - show the available CPUfreq governors
628 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
632 struct cpufreq_governor *t;
635 i += sprintf(buf, "performance powersave");
639 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
640 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
641 - (CPUFREQ_NAME_LEN + 2)))
643 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
646 i += sprintf(&buf[i], "\n");
650 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
655 for_each_cpu(cpu, mask) {
657 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
658 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
659 if (i >= (PAGE_SIZE - 5))
662 i += sprintf(&buf[i], "\n");
665 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
668 * show_related_cpus - show the CPUs affected by each transition even if
669 * hw coordination is in use
671 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
673 return cpufreq_show_cpus(policy->related_cpus, buf);
677 * show_affected_cpus - show the CPUs affected by each transition
679 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
681 return cpufreq_show_cpus(policy->cpus, buf);
684 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
685 const char *buf, size_t count)
687 unsigned int freq = 0;
690 if (!policy->governor || !policy->governor->store_setspeed)
693 ret = sscanf(buf, "%u", &freq);
697 policy->governor->store_setspeed(policy, freq);
702 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
704 if (!policy->governor || !policy->governor->show_setspeed)
705 return sprintf(buf, "<unsupported>\n");
707 return policy->governor->show_setspeed(policy, buf);
711 * show_bios_limit - show the current cpufreq HW/BIOS limitation
713 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
717 if (cpufreq_driver->bios_limit) {
718 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
720 return sprintf(buf, "%u\n", limit);
722 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
725 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
726 cpufreq_freq_attr_ro(cpuinfo_min_freq);
727 cpufreq_freq_attr_ro(cpuinfo_max_freq);
728 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
729 cpufreq_freq_attr_ro(scaling_available_governors);
730 cpufreq_freq_attr_ro(scaling_driver);
731 cpufreq_freq_attr_ro(scaling_cur_freq);
732 cpufreq_freq_attr_ro(bios_limit);
733 cpufreq_freq_attr_ro(related_cpus);
734 cpufreq_freq_attr_ro(affected_cpus);
735 cpufreq_freq_attr_rw(scaling_min_freq);
736 cpufreq_freq_attr_rw(scaling_max_freq);
737 cpufreq_freq_attr_rw(scaling_governor);
738 cpufreq_freq_attr_rw(scaling_setspeed);
740 static struct attribute *default_attrs[] = {
741 &cpuinfo_min_freq.attr,
742 &cpuinfo_max_freq.attr,
743 &cpuinfo_transition_latency.attr,
744 &scaling_min_freq.attr,
745 &scaling_max_freq.attr,
748 &scaling_governor.attr,
749 &scaling_driver.attr,
750 &scaling_available_governors.attr,
751 &scaling_setspeed.attr,
755 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
756 #define to_attr(a) container_of(a, struct freq_attr, attr)
758 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
760 struct cpufreq_policy *policy = to_policy(kobj);
761 struct freq_attr *fattr = to_attr(attr);
764 if (!down_read_trylock(&cpufreq_rwsem))
767 down_read(&policy->rwsem);
770 ret = fattr->show(policy, buf);
774 up_read(&policy->rwsem);
775 up_read(&cpufreq_rwsem);
780 static ssize_t store(struct kobject *kobj, struct attribute *attr,
781 const char *buf, size_t count)
783 struct cpufreq_policy *policy = to_policy(kobj);
784 struct freq_attr *fattr = to_attr(attr);
785 ssize_t ret = -EINVAL;
789 if (!cpu_online(policy->cpu))
792 if (!down_read_trylock(&cpufreq_rwsem))
795 down_write(&policy->rwsem);
798 ret = fattr->store(policy, buf, count);
802 up_write(&policy->rwsem);
804 up_read(&cpufreq_rwsem);
811 static void cpufreq_sysfs_release(struct kobject *kobj)
813 struct cpufreq_policy *policy = to_policy(kobj);
814 pr_debug("last reference is dropped\n");
815 complete(&policy->kobj_unregister);
818 static const struct sysfs_ops sysfs_ops = {
823 static struct kobj_type ktype_cpufreq = {
824 .sysfs_ops = &sysfs_ops,
825 .default_attrs = default_attrs,
826 .release = cpufreq_sysfs_release,
829 struct kobject *cpufreq_global_kobject;
830 EXPORT_SYMBOL(cpufreq_global_kobject);
832 static int cpufreq_global_kobject_usage;
834 int cpufreq_get_global_kobject(void)
836 if (!cpufreq_global_kobject_usage++)
837 return kobject_add(cpufreq_global_kobject,
838 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
842 EXPORT_SYMBOL(cpufreq_get_global_kobject);
844 void cpufreq_put_global_kobject(void)
846 if (!--cpufreq_global_kobject_usage)
847 kobject_del(cpufreq_global_kobject);
849 EXPORT_SYMBOL(cpufreq_put_global_kobject);
851 int cpufreq_sysfs_create_file(const struct attribute *attr)
853 int ret = cpufreq_get_global_kobject();
856 ret = sysfs_create_file(cpufreq_global_kobject, attr);
858 cpufreq_put_global_kobject();
863 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
865 void cpufreq_sysfs_remove_file(const struct attribute *attr)
867 sysfs_remove_file(cpufreq_global_kobject, attr);
868 cpufreq_put_global_kobject();
870 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
872 /* symlink affected CPUs */
873 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
878 for_each_cpu(j, policy->cpus) {
879 struct device *cpu_dev;
881 if (j == policy->cpu)
884 pr_debug("Adding link for CPU: %u\n", j);
885 cpu_dev = get_cpu_device(j);
886 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
894 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
897 struct freq_attr **drv_attr;
900 /* set up files for this cpu device */
901 drv_attr = cpufreq_driver->attr;
902 while (drv_attr && *drv_attr) {
903 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
908 if (cpufreq_driver->get) {
909 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
914 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
918 if (cpufreq_driver->bios_limit) {
919 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
924 return cpufreq_add_dev_symlink(policy);
927 static void cpufreq_init_policy(struct cpufreq_policy *policy)
929 struct cpufreq_governor *gov = NULL;
930 struct cpufreq_policy new_policy;
933 memcpy(&new_policy, policy, sizeof(*policy));
935 /* Update governor of new_policy to the governor used before hotplug */
936 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
938 pr_debug("Restoring governor %s for cpu %d\n",
939 policy->governor->name, policy->cpu);
941 gov = CPUFREQ_DEFAULT_GOVERNOR;
943 new_policy.governor = gov;
945 /* Use the default policy if its valid. */
946 if (cpufreq_driver->setpolicy)
947 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
949 /* set default policy */
950 ret = cpufreq_set_policy(policy, &new_policy);
952 pr_debug("setting policy failed\n");
953 if (cpufreq_driver->exit)
954 cpufreq_driver->exit(policy);
958 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
959 unsigned int cpu, struct device *dev)
965 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
967 pr_err("%s: Failed to stop governor\n", __func__);
972 down_write(&policy->rwsem);
974 write_lock_irqsave(&cpufreq_driver_lock, flags);
976 cpumask_set_cpu(cpu, policy->cpus);
977 per_cpu(cpufreq_cpu_data, cpu) = policy;
978 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
980 up_write(&policy->rwsem);
983 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
985 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
988 pr_err("%s: Failed to start governor\n", __func__);
993 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
996 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
998 struct cpufreq_policy *policy;
1001 read_lock_irqsave(&cpufreq_driver_lock, flags);
1003 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1005 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1008 policy->governor = NULL;
1013 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1015 struct cpufreq_policy *policy;
1017 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1021 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1022 goto err_free_policy;
1024 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1025 goto err_free_cpumask;
1027 INIT_LIST_HEAD(&policy->policy_list);
1028 init_rwsem(&policy->rwsem);
1029 spin_lock_init(&policy->transition_lock);
1030 init_waitqueue_head(&policy->transition_wait);
1031 init_completion(&policy->kobj_unregister);
1032 INIT_WORK(&policy->update, handle_update);
1037 free_cpumask_var(policy->cpus);
1044 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1046 struct kobject *kobj;
1047 struct completion *cmp;
1049 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1050 CPUFREQ_REMOVE_POLICY, policy);
1052 down_read(&policy->rwsem);
1053 kobj = &policy->kobj;
1054 cmp = &policy->kobj_unregister;
1055 up_read(&policy->rwsem);
1059 * We need to make sure that the underlying kobj is
1060 * actually not referenced anymore by anybody before we
1061 * proceed with unloading.
1063 pr_debug("waiting for dropping of refcount\n");
1064 wait_for_completion(cmp);
1065 pr_debug("wait complete\n");
1068 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1070 free_cpumask_var(policy->related_cpus);
1071 free_cpumask_var(policy->cpus);
1075 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1076 struct device *cpu_dev)
1080 if (WARN_ON(cpu == policy->cpu))
1083 /* Move kobject to the new policy->cpu */
1084 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1086 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1090 down_write(&policy->rwsem);
1092 up_write(&policy->rwsem);
1097 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1099 unsigned int j, cpu = dev->id;
1101 struct cpufreq_policy *policy;
1102 unsigned long flags;
1103 bool recover_policy = cpufreq_suspended;
1105 if (cpu_is_offline(cpu))
1108 pr_debug("adding CPU %u\n", cpu);
1110 /* check whether a different CPU already registered this
1111 * CPU because it is in the same boat. */
1112 policy = cpufreq_cpu_get_raw(cpu);
1113 if (unlikely(policy))
1116 if (!down_read_trylock(&cpufreq_rwsem))
1119 /* Check if this cpu was hot-unplugged earlier and has siblings */
1120 read_lock_irqsave(&cpufreq_driver_lock, flags);
1121 for_each_policy(policy) {
1122 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
1123 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1124 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1125 up_read(&cpufreq_rwsem);
1129 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1132 * Restore the saved policy when doing light-weight init and fall back
1133 * to the full init if that fails.
1135 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1137 recover_policy = false;
1138 policy = cpufreq_policy_alloc();
1144 * In the resume path, since we restore a saved policy, the assignment
1145 * to policy->cpu is like an update of the existing policy, rather than
1146 * the creation of a brand new one. So we need to perform this update
1147 * by invoking update_policy_cpu().
1149 if (recover_policy && cpu != policy->cpu)
1150 WARN_ON(update_policy_cpu(policy, cpu, dev));
1154 cpumask_copy(policy->cpus, cpumask_of(cpu));
1156 /* call driver. From then on the cpufreq must be able
1157 * to accept all calls to ->verify and ->setpolicy for this CPU
1159 ret = cpufreq_driver->init(policy);
1161 pr_debug("initialization failed\n");
1162 goto err_set_policy_cpu;
1165 down_write(&policy->rwsem);
1167 /* related cpus should atleast have policy->cpus */
1168 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1171 * affected cpus must always be the one, which are online. We aren't
1172 * managing offline cpus here.
1174 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1176 if (!recover_policy) {
1177 policy->user_policy.min = policy->min;
1178 policy->user_policy.max = policy->max;
1180 /* prepare interface data */
1181 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1182 &dev->kobj, "cpufreq");
1184 pr_err("%s: failed to init policy->kobj: %d\n",
1186 goto err_init_policy_kobj;
1190 write_lock_irqsave(&cpufreq_driver_lock, flags);
1191 for_each_cpu(j, policy->cpus)
1192 per_cpu(cpufreq_cpu_data, j) = policy;
1193 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1195 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1196 policy->cur = cpufreq_driver->get(policy->cpu);
1198 pr_err("%s: ->get() failed\n", __func__);
1204 * Sometimes boot loaders set CPU frequency to a value outside of
1205 * frequency table present with cpufreq core. In such cases CPU might be
1206 * unstable if it has to run on that frequency for long duration of time
1207 * and so its better to set it to a frequency which is specified in
1208 * freq-table. This also makes cpufreq stats inconsistent as
1209 * cpufreq-stats would fail to register because current frequency of CPU
1210 * isn't found in freq-table.
1212 * Because we don't want this change to effect boot process badly, we go
1213 * for the next freq which is >= policy->cur ('cur' must be set by now,
1214 * otherwise we will end up setting freq to lowest of the table as 'cur'
1215 * is initialized to zero).
1217 * We are passing target-freq as "policy->cur - 1" otherwise
1218 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1219 * equal to target-freq.
1221 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1223 /* Are we running at unknown frequency ? */
1224 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1225 if (ret == -EINVAL) {
1226 /* Warn user and fix it */
1227 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1228 __func__, policy->cpu, policy->cur);
1229 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1230 CPUFREQ_RELATION_L);
1233 * Reaching here after boot in a few seconds may not
1234 * mean that system will remain stable at "unknown"
1235 * frequency for longer duration. Hence, a BUG_ON().
1238 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1239 __func__, policy->cpu, policy->cur);
1243 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1244 CPUFREQ_START, policy);
1246 if (!recover_policy) {
1247 ret = cpufreq_add_dev_interface(policy, dev);
1249 goto err_out_unregister;
1250 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1251 CPUFREQ_CREATE_POLICY, policy);
1254 write_lock_irqsave(&cpufreq_driver_lock, flags);
1255 list_add(&policy->policy_list, &cpufreq_policy_list);
1256 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1258 cpufreq_init_policy(policy);
1260 if (!recover_policy) {
1261 policy->user_policy.policy = policy->policy;
1262 policy->user_policy.governor = policy->governor;
1264 up_write(&policy->rwsem);
1266 kobject_uevent(&policy->kobj, KOBJ_ADD);
1268 up_read(&cpufreq_rwsem);
1270 /* Callback for handling stuff after policy is ready */
1271 if (cpufreq_driver->ready)
1272 cpufreq_driver->ready(policy);
1274 pr_debug("initialization complete\n");
1280 write_lock_irqsave(&cpufreq_driver_lock, flags);
1281 for_each_cpu(j, policy->cpus)
1282 per_cpu(cpufreq_cpu_data, j) = NULL;
1283 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1285 if (!recover_policy) {
1286 kobject_put(&policy->kobj);
1287 wait_for_completion(&policy->kobj_unregister);
1289 err_init_policy_kobj:
1290 up_write(&policy->rwsem);
1292 if (cpufreq_driver->exit)
1293 cpufreq_driver->exit(policy);
1295 if (recover_policy) {
1296 /* Do not leave stale fallback data behind. */
1297 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1298 cpufreq_policy_put_kobj(policy);
1300 cpufreq_policy_free(policy);
1303 up_read(&cpufreq_rwsem);
1309 * cpufreq_add_dev - add a CPU device
1311 * Adds the cpufreq interface for a CPU device.
1313 * The Oracle says: try running cpufreq registration/unregistration concurrently
1314 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1315 * mess up, but more thorough testing is needed. - Mathieu
1317 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1319 return __cpufreq_add_dev(dev, sif);
1322 static int __cpufreq_remove_dev_prepare(struct device *dev,
1323 struct subsys_interface *sif)
1325 unsigned int cpu = dev->id, cpus;
1327 unsigned long flags;
1328 struct cpufreq_policy *policy;
1330 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1332 write_lock_irqsave(&cpufreq_driver_lock, flags);
1334 policy = per_cpu(cpufreq_cpu_data, cpu);
1336 /* Save the policy somewhere when doing a light-weight tear-down */
1337 if (cpufreq_suspended)
1338 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1340 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1343 pr_debug("%s: No cpu_data found\n", __func__);
1348 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1350 pr_err("%s: Failed to stop governor\n", __func__);
1354 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1355 policy->governor->name, CPUFREQ_NAME_LEN);
1358 down_read(&policy->rwsem);
1359 cpus = cpumask_weight(policy->cpus);
1360 up_read(&policy->rwsem);
1362 if (cpu != policy->cpu) {
1363 sysfs_remove_link(&dev->kobj, "cpufreq");
1364 } else if (cpus > 1) {
1365 /* Nominate new CPU */
1366 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1367 struct device *cpu_dev = get_cpu_device(new_cpu);
1369 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1370 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1372 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1374 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1375 __func__, cpu_dev->id);
1379 if (!cpufreq_suspended)
1380 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1381 __func__, new_cpu, cpu);
1382 } else if (cpufreq_driver->stop_cpu) {
1383 cpufreq_driver->stop_cpu(policy);
1389 static int __cpufreq_remove_dev_finish(struct device *dev,
1390 struct subsys_interface *sif)
1392 unsigned int cpu = dev->id, cpus;
1394 unsigned long flags;
1395 struct cpufreq_policy *policy;
1397 write_lock_irqsave(&cpufreq_driver_lock, flags);
1398 policy = per_cpu(cpufreq_cpu_data, cpu);
1399 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1400 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1403 pr_debug("%s: No cpu_data found\n", __func__);
1407 down_write(&policy->rwsem);
1408 cpus = cpumask_weight(policy->cpus);
1411 cpumask_clear_cpu(cpu, policy->cpus);
1412 up_write(&policy->rwsem);
1414 /* If cpu is last user of policy, free policy */
1417 ret = __cpufreq_governor(policy,
1418 CPUFREQ_GOV_POLICY_EXIT);
1420 pr_err("%s: Failed to exit governor\n",
1426 if (!cpufreq_suspended)
1427 cpufreq_policy_put_kobj(policy);
1430 * Perform the ->exit() even during light-weight tear-down,
1431 * since this is a core component, and is essential for the
1432 * subsequent light-weight ->init() to succeed.
1434 if (cpufreq_driver->exit)
1435 cpufreq_driver->exit(policy);
1437 /* Remove policy from list of active policies */
1438 write_lock_irqsave(&cpufreq_driver_lock, flags);
1439 list_del(&policy->policy_list);
1440 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1442 if (!cpufreq_suspended)
1443 cpufreq_policy_free(policy);
1444 } else if (has_target()) {
1445 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1447 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1450 pr_err("%s: Failed to start governor\n", __func__);
1459 * cpufreq_remove_dev - remove a CPU device
1461 * Removes the cpufreq interface for a CPU device.
1463 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1465 unsigned int cpu = dev->id;
1468 if (cpu_is_offline(cpu))
1471 ret = __cpufreq_remove_dev_prepare(dev, sif);
1474 ret = __cpufreq_remove_dev_finish(dev, sif);
1479 static void handle_update(struct work_struct *work)
1481 struct cpufreq_policy *policy =
1482 container_of(work, struct cpufreq_policy, update);
1483 unsigned int cpu = policy->cpu;
1484 pr_debug("handle_update for cpu %u called\n", cpu);
1485 cpufreq_update_policy(cpu);
1489 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1491 * @policy: policy managing CPUs
1492 * @new_freq: CPU frequency the CPU actually runs at
1494 * We adjust to current frequency first, and need to clean up later.
1495 * So either call to cpufreq_update_policy() or schedule handle_update()).
1497 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1498 unsigned int new_freq)
1500 struct cpufreq_freqs freqs;
1502 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1503 policy->cur, new_freq);
1505 freqs.old = policy->cur;
1506 freqs.new = new_freq;
1508 cpufreq_freq_transition_begin(policy, &freqs);
1509 cpufreq_freq_transition_end(policy, &freqs, 0);
1513 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1516 * This is the last known freq, without actually getting it from the driver.
1517 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1519 unsigned int cpufreq_quick_get(unsigned int cpu)
1521 struct cpufreq_policy *policy;
1522 unsigned int ret_freq = 0;
1524 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1525 return cpufreq_driver->get(cpu);
1527 policy = cpufreq_cpu_get(cpu);
1529 ret_freq = policy->cur;
1530 cpufreq_cpu_put(policy);
1535 EXPORT_SYMBOL(cpufreq_quick_get);
1538 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1541 * Just return the max possible frequency for a given CPU.
1543 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1545 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1546 unsigned int ret_freq = 0;
1549 ret_freq = policy->max;
1550 cpufreq_cpu_put(policy);
1555 EXPORT_SYMBOL(cpufreq_quick_get_max);
1557 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1559 unsigned int ret_freq = 0;
1561 if (!cpufreq_driver->get)
1564 ret_freq = cpufreq_driver->get(policy->cpu);
1566 if (ret_freq && policy->cur &&
1567 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1568 /* verify no discrepancy between actual and
1569 saved value exists */
1570 if (unlikely(ret_freq != policy->cur)) {
1571 cpufreq_out_of_sync(policy, ret_freq);
1572 schedule_work(&policy->update);
1580 * cpufreq_get - get the current CPU frequency (in kHz)
1583 * Get the CPU current (static) CPU frequency
1585 unsigned int cpufreq_get(unsigned int cpu)
1587 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1588 unsigned int ret_freq = 0;
1591 down_read(&policy->rwsem);
1592 ret_freq = __cpufreq_get(policy);
1593 up_read(&policy->rwsem);
1595 cpufreq_cpu_put(policy);
1600 EXPORT_SYMBOL(cpufreq_get);
1602 static struct subsys_interface cpufreq_interface = {
1604 .subsys = &cpu_subsys,
1605 .add_dev = cpufreq_add_dev,
1606 .remove_dev = cpufreq_remove_dev,
1610 * In case platform wants some specific frequency to be configured
1613 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1617 if (!policy->suspend_freq) {
1618 pr_err("%s: suspend_freq can't be zero\n", __func__);
1622 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1623 policy->suspend_freq);
1625 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1626 CPUFREQ_RELATION_H);
1628 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1629 __func__, policy->suspend_freq, ret);
1633 EXPORT_SYMBOL(cpufreq_generic_suspend);
1636 * cpufreq_suspend() - Suspend CPUFreq governors
1638 * Called during system wide Suspend/Hibernate cycles for suspending governors
1639 * as some platforms can't change frequency after this point in suspend cycle.
1640 * Because some of the devices (like: i2c, regulators, etc) they use for
1641 * changing frequency are suspended quickly after this point.
1643 void cpufreq_suspend(void)
1645 struct cpufreq_policy *policy;
1647 if (!cpufreq_driver)
1653 pr_debug("%s: Suspending Governors\n", __func__);
1655 for_each_policy(policy) {
1656 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1657 pr_err("%s: Failed to stop governor for policy: %p\n",
1659 else if (cpufreq_driver->suspend
1660 && cpufreq_driver->suspend(policy))
1661 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1666 cpufreq_suspended = true;
1670 * cpufreq_resume() - Resume CPUFreq governors
1672 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1673 * are suspended with cpufreq_suspend().
1675 void cpufreq_resume(void)
1677 struct cpufreq_policy *policy;
1679 if (!cpufreq_driver)
1682 cpufreq_suspended = false;
1687 pr_debug("%s: Resuming Governors\n", __func__);
1689 for_each_policy(policy) {
1690 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1691 pr_err("%s: Failed to resume driver: %p\n", __func__,
1693 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1694 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1695 pr_err("%s: Failed to start governor for policy: %p\n",
1699 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1700 * policy in list. It will verify that the current freq is in
1701 * sync with what we believe it to be.
1703 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1704 schedule_work(&policy->update);
1709 * cpufreq_get_current_driver - return current driver's name
1711 * Return the name string of the currently loaded cpufreq driver
1714 const char *cpufreq_get_current_driver(void)
1717 return cpufreq_driver->name;
1721 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1724 * cpufreq_get_driver_data - return current driver data
1726 * Return the private data of the currently loaded cpufreq
1727 * driver, or NULL if no cpufreq driver is loaded.
1729 void *cpufreq_get_driver_data(void)
1732 return cpufreq_driver->driver_data;
1736 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1738 /*********************************************************************
1739 * NOTIFIER LISTS INTERFACE *
1740 *********************************************************************/
1743 * cpufreq_register_notifier - register a driver with cpufreq
1744 * @nb: notifier function to register
1745 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1747 * Add a driver to one of two lists: either a list of drivers that
1748 * are notified about clock rate changes (once before and once after
1749 * the transition), or a list of drivers that are notified about
1750 * changes in cpufreq policy.
1752 * This function may sleep, and has the same return conditions as
1753 * blocking_notifier_chain_register.
1755 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1759 if (cpufreq_disabled())
1762 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1765 case CPUFREQ_TRANSITION_NOTIFIER:
1766 ret = srcu_notifier_chain_register(
1767 &cpufreq_transition_notifier_list, nb);
1769 case CPUFREQ_POLICY_NOTIFIER:
1770 ret = blocking_notifier_chain_register(
1771 &cpufreq_policy_notifier_list, nb);
1779 EXPORT_SYMBOL(cpufreq_register_notifier);
1782 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1783 * @nb: notifier block to be unregistered
1784 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1786 * Remove a driver from the CPU frequency notifier list.
1788 * This function may sleep, and has the same return conditions as
1789 * blocking_notifier_chain_unregister.
1791 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1795 if (cpufreq_disabled())
1799 case CPUFREQ_TRANSITION_NOTIFIER:
1800 ret = srcu_notifier_chain_unregister(
1801 &cpufreq_transition_notifier_list, nb);
1803 case CPUFREQ_POLICY_NOTIFIER:
1804 ret = blocking_notifier_chain_unregister(
1805 &cpufreq_policy_notifier_list, nb);
1813 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1816 /*********************************************************************
1818 *********************************************************************/
1820 /* Must set freqs->new to intermediate frequency */
1821 static int __target_intermediate(struct cpufreq_policy *policy,
1822 struct cpufreq_freqs *freqs, int index)
1826 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1828 /* We don't need to switch to intermediate freq */
1832 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1833 __func__, policy->cpu, freqs->old, freqs->new);
1835 cpufreq_freq_transition_begin(policy, freqs);
1836 ret = cpufreq_driver->target_intermediate(policy, index);
1837 cpufreq_freq_transition_end(policy, freqs, ret);
1840 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1846 static int __target_index(struct cpufreq_policy *policy,
1847 struct cpufreq_frequency_table *freq_table, int index)
1849 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1850 unsigned int intermediate_freq = 0;
1851 int retval = -EINVAL;
1854 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1856 /* Handle switching to intermediate frequency */
1857 if (cpufreq_driver->get_intermediate) {
1858 retval = __target_intermediate(policy, &freqs, index);
1862 intermediate_freq = freqs.new;
1863 /* Set old freq to intermediate */
1864 if (intermediate_freq)
1865 freqs.old = freqs.new;
1868 freqs.new = freq_table[index].frequency;
1869 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1870 __func__, policy->cpu, freqs.old, freqs.new);
1872 cpufreq_freq_transition_begin(policy, &freqs);
1875 retval = cpufreq_driver->target_index(policy, index);
1877 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1881 cpufreq_freq_transition_end(policy, &freqs, retval);
1884 * Failed after setting to intermediate freq? Driver should have
1885 * reverted back to initial frequency and so should we. Check
1886 * here for intermediate_freq instead of get_intermediate, in
1887 * case we have't switched to intermediate freq at all.
1889 if (unlikely(retval && intermediate_freq)) {
1890 freqs.old = intermediate_freq;
1891 freqs.new = policy->restore_freq;
1892 cpufreq_freq_transition_begin(policy, &freqs);
1893 cpufreq_freq_transition_end(policy, &freqs, 0);
1900 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1901 unsigned int target_freq,
1902 unsigned int relation)
1904 unsigned int old_target_freq = target_freq;
1905 int retval = -EINVAL;
1907 if (cpufreq_disabled())
1910 /* Make sure that target_freq is within supported range */
1911 if (target_freq > policy->max)
1912 target_freq = policy->max;
1913 if (target_freq < policy->min)
1914 target_freq = policy->min;
1916 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1917 policy->cpu, target_freq, relation, old_target_freq);
1920 * This might look like a redundant call as we are checking it again
1921 * after finding index. But it is left intentionally for cases where
1922 * exactly same freq is called again and so we can save on few function
1925 if (target_freq == policy->cur)
1928 /* Save last value to restore later on errors */
1929 policy->restore_freq = policy->cur;
1931 if (cpufreq_driver->target)
1932 retval = cpufreq_driver->target(policy, target_freq, relation);
1933 else if (cpufreq_driver->target_index) {
1934 struct cpufreq_frequency_table *freq_table;
1937 freq_table = cpufreq_frequency_get_table(policy->cpu);
1938 if (unlikely(!freq_table)) {
1939 pr_err("%s: Unable to find freq_table\n", __func__);
1943 retval = cpufreq_frequency_table_target(policy, freq_table,
1944 target_freq, relation, &index);
1945 if (unlikely(retval)) {
1946 pr_err("%s: Unable to find matching freq\n", __func__);
1950 if (freq_table[index].frequency == policy->cur) {
1955 retval = __target_index(policy, freq_table, index);
1961 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1963 int cpufreq_driver_target(struct cpufreq_policy *policy,
1964 unsigned int target_freq,
1965 unsigned int relation)
1969 down_write(&policy->rwsem);
1971 ret = __cpufreq_driver_target(policy, target_freq, relation);
1973 up_write(&policy->rwsem);
1977 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1979 static int __cpufreq_governor(struct cpufreq_policy *policy,
1984 /* Only must be defined when default governor is known to have latency
1985 restrictions, like e.g. conservative or ondemand.
1986 That this is the case is already ensured in Kconfig
1988 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1989 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1991 struct cpufreq_governor *gov = NULL;
1994 /* Don't start any governor operations if we are entering suspend */
1995 if (cpufreq_suspended)
1998 * Governor might not be initiated here if ACPI _PPC changed
1999 * notification happened, so check it.
2001 if (!policy->governor)
2004 if (policy->governor->max_transition_latency &&
2005 policy->cpuinfo.transition_latency >
2006 policy->governor->max_transition_latency) {
2010 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2011 policy->governor->name, gov->name);
2012 policy->governor = gov;
2016 if (event == CPUFREQ_GOV_POLICY_INIT)
2017 if (!try_module_get(policy->governor->owner))
2020 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2021 policy->cpu, event);
2023 mutex_lock(&cpufreq_governor_lock);
2024 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2025 || (!policy->governor_enabled
2026 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2027 mutex_unlock(&cpufreq_governor_lock);
2031 if (event == CPUFREQ_GOV_STOP)
2032 policy->governor_enabled = false;
2033 else if (event == CPUFREQ_GOV_START)
2034 policy->governor_enabled = true;
2036 mutex_unlock(&cpufreq_governor_lock);
2038 ret = policy->governor->governor(policy, event);
2041 if (event == CPUFREQ_GOV_POLICY_INIT)
2042 policy->governor->initialized++;
2043 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2044 policy->governor->initialized--;
2046 /* Restore original values */
2047 mutex_lock(&cpufreq_governor_lock);
2048 if (event == CPUFREQ_GOV_STOP)
2049 policy->governor_enabled = true;
2050 else if (event == CPUFREQ_GOV_START)
2051 policy->governor_enabled = false;
2052 mutex_unlock(&cpufreq_governor_lock);
2055 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2056 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2057 module_put(policy->governor->owner);
2062 int cpufreq_register_governor(struct cpufreq_governor *governor)
2069 if (cpufreq_disabled())
2072 mutex_lock(&cpufreq_governor_mutex);
2074 governor->initialized = 0;
2076 if (!find_governor(governor->name)) {
2078 list_add(&governor->governor_list, &cpufreq_governor_list);
2081 mutex_unlock(&cpufreq_governor_mutex);
2084 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2086 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2093 if (cpufreq_disabled())
2096 for_each_present_cpu(cpu) {
2097 if (cpu_online(cpu))
2099 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2100 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2103 mutex_lock(&cpufreq_governor_mutex);
2104 list_del(&governor->governor_list);
2105 mutex_unlock(&cpufreq_governor_mutex);
2108 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2111 /*********************************************************************
2112 * POLICY INTERFACE *
2113 *********************************************************************/
2116 * cpufreq_get_policy - get the current cpufreq_policy
2117 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2120 * Reads the current cpufreq policy.
2122 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2124 struct cpufreq_policy *cpu_policy;
2128 cpu_policy = cpufreq_cpu_get(cpu);
2132 memcpy(policy, cpu_policy, sizeof(*policy));
2134 cpufreq_cpu_put(cpu_policy);
2137 EXPORT_SYMBOL(cpufreq_get_policy);
2140 * policy : current policy.
2141 * new_policy: policy to be set.
2143 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2144 struct cpufreq_policy *new_policy)
2146 struct cpufreq_governor *old_gov;
2149 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2150 new_policy->cpu, new_policy->min, new_policy->max);
2152 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2154 if (new_policy->min > policy->max || new_policy->max < policy->min)
2157 /* verify the cpu speed can be set within this limit */
2158 ret = cpufreq_driver->verify(new_policy);
2162 /* adjust if necessary - all reasons */
2163 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2164 CPUFREQ_ADJUST, new_policy);
2166 /* adjust if necessary - hardware incompatibility*/
2167 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2168 CPUFREQ_INCOMPATIBLE, new_policy);
2171 * verify the cpu speed can be set within this limit, which might be
2172 * different to the first one
2174 ret = cpufreq_driver->verify(new_policy);
2178 /* notification of the new policy */
2179 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2180 CPUFREQ_NOTIFY, new_policy);
2182 policy->min = new_policy->min;
2183 policy->max = new_policy->max;
2185 pr_debug("new min and max freqs are %u - %u kHz\n",
2186 policy->min, policy->max);
2188 if (cpufreq_driver->setpolicy) {
2189 policy->policy = new_policy->policy;
2190 pr_debug("setting range\n");
2191 return cpufreq_driver->setpolicy(new_policy);
2194 if (new_policy->governor == policy->governor)
2197 pr_debug("governor switch\n");
2199 /* save old, working values */
2200 old_gov = policy->governor;
2201 /* end old governor */
2203 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2204 up_write(&policy->rwsem);
2205 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2206 down_write(&policy->rwsem);
2209 /* start new governor */
2210 policy->governor = new_policy->governor;
2211 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2212 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2215 up_write(&policy->rwsem);
2216 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2217 down_write(&policy->rwsem);
2220 /* new governor failed, so re-start old one */
2221 pr_debug("starting governor %s failed\n", policy->governor->name);
2223 policy->governor = old_gov;
2224 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2225 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2231 pr_debug("governor: change or update limits\n");
2232 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2236 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2237 * @cpu: CPU which shall be re-evaluated
2239 * Useful for policy notifiers which have different necessities
2240 * at different times.
2242 int cpufreq_update_policy(unsigned int cpu)
2244 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2245 struct cpufreq_policy new_policy;
2251 down_write(&policy->rwsem);
2253 pr_debug("updating policy for CPU %u\n", cpu);
2254 memcpy(&new_policy, policy, sizeof(*policy));
2255 new_policy.min = policy->user_policy.min;
2256 new_policy.max = policy->user_policy.max;
2257 new_policy.policy = policy->user_policy.policy;
2258 new_policy.governor = policy->user_policy.governor;
2261 * BIOS might change freq behind our back
2262 * -> ask driver for current freq and notify governors about a change
2264 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2265 new_policy.cur = cpufreq_driver->get(cpu);
2266 if (WARN_ON(!new_policy.cur)) {
2272 pr_debug("Driver did not initialize current freq\n");
2273 policy->cur = new_policy.cur;
2275 if (policy->cur != new_policy.cur && has_target())
2276 cpufreq_out_of_sync(policy, new_policy.cur);
2280 ret = cpufreq_set_policy(policy, &new_policy);
2283 up_write(&policy->rwsem);
2285 cpufreq_cpu_put(policy);
2288 EXPORT_SYMBOL(cpufreq_update_policy);
2290 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2291 unsigned long action, void *hcpu)
2293 unsigned int cpu = (unsigned long)hcpu;
2296 dev = get_cpu_device(cpu);
2298 switch (action & ~CPU_TASKS_FROZEN) {
2300 __cpufreq_add_dev(dev, NULL);
2303 case CPU_DOWN_PREPARE:
2304 __cpufreq_remove_dev_prepare(dev, NULL);
2308 __cpufreq_remove_dev_finish(dev, NULL);
2311 case CPU_DOWN_FAILED:
2312 __cpufreq_add_dev(dev, NULL);
2319 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2320 .notifier_call = cpufreq_cpu_callback,
2323 /*********************************************************************
2325 *********************************************************************/
2326 static int cpufreq_boost_set_sw(int state)
2328 struct cpufreq_frequency_table *freq_table;
2329 struct cpufreq_policy *policy;
2332 for_each_policy(policy) {
2333 freq_table = cpufreq_frequency_get_table(policy->cpu);
2335 ret = cpufreq_frequency_table_cpuinfo(policy,
2338 pr_err("%s: Policy frequency update failed\n",
2342 policy->user_policy.max = policy->max;
2343 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2350 int cpufreq_boost_trigger_state(int state)
2352 unsigned long flags;
2355 if (cpufreq_driver->boost_enabled == state)
2358 write_lock_irqsave(&cpufreq_driver_lock, flags);
2359 cpufreq_driver->boost_enabled = state;
2360 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2362 ret = cpufreq_driver->set_boost(state);
2364 write_lock_irqsave(&cpufreq_driver_lock, flags);
2365 cpufreq_driver->boost_enabled = !state;
2366 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2368 pr_err("%s: Cannot %s BOOST\n",
2369 __func__, state ? "enable" : "disable");
2375 int cpufreq_boost_supported(void)
2377 if (likely(cpufreq_driver))
2378 return cpufreq_driver->boost_supported;
2382 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2384 int cpufreq_boost_enabled(void)
2386 return cpufreq_driver->boost_enabled;
2388 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2390 /*********************************************************************
2391 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2392 *********************************************************************/
2395 * cpufreq_register_driver - register a CPU Frequency driver
2396 * @driver_data: A struct cpufreq_driver containing the values#
2397 * submitted by the CPU Frequency driver.
2399 * Registers a CPU Frequency driver to this core code. This code
2400 * returns zero on success, -EBUSY when another driver got here first
2401 * (and isn't unregistered in the meantime).
2404 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2406 unsigned long flags;
2409 if (cpufreq_disabled())
2412 if (!driver_data || !driver_data->verify || !driver_data->init ||
2413 !(driver_data->setpolicy || driver_data->target_index ||
2414 driver_data->target) ||
2415 (driver_data->setpolicy && (driver_data->target_index ||
2416 driver_data->target)) ||
2417 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2420 pr_debug("trying to register driver %s\n", driver_data->name);
2422 write_lock_irqsave(&cpufreq_driver_lock, flags);
2423 if (cpufreq_driver) {
2424 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2427 cpufreq_driver = driver_data;
2428 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2430 if (driver_data->setpolicy)
2431 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2433 if (cpufreq_boost_supported()) {
2435 * Check if driver provides function to enable boost -
2436 * if not, use cpufreq_boost_set_sw as default
2438 if (!cpufreq_driver->set_boost)
2439 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2441 ret = cpufreq_sysfs_create_file(&boost.attr);
2443 pr_err("%s: cannot register global BOOST sysfs file\n",
2445 goto err_null_driver;
2449 ret = subsys_interface_register(&cpufreq_interface);
2451 goto err_boost_unreg;
2453 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2454 list_empty(&cpufreq_policy_list)) {
2455 /* if all ->init() calls failed, unregister */
2456 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2461 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2462 pr_debug("driver %s up and running\n", driver_data->name);
2466 subsys_interface_unregister(&cpufreq_interface);
2468 if (cpufreq_boost_supported())
2469 cpufreq_sysfs_remove_file(&boost.attr);
2471 write_lock_irqsave(&cpufreq_driver_lock, flags);
2472 cpufreq_driver = NULL;
2473 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2476 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2479 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2481 * Unregister the current CPUFreq driver. Only call this if you have
2482 * the right to do so, i.e. if you have succeeded in initialising before!
2483 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2484 * currently not initialised.
2486 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2488 unsigned long flags;
2490 if (!cpufreq_driver || (driver != cpufreq_driver))
2493 pr_debug("unregistering driver %s\n", driver->name);
2495 subsys_interface_unregister(&cpufreq_interface);
2496 if (cpufreq_boost_supported())
2497 cpufreq_sysfs_remove_file(&boost.attr);
2499 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2501 down_write(&cpufreq_rwsem);
2502 write_lock_irqsave(&cpufreq_driver_lock, flags);
2504 cpufreq_driver = NULL;
2506 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2507 up_write(&cpufreq_rwsem);
2511 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2514 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2515 * or mutexes when secondary CPUs are halted.
2517 static struct syscore_ops cpufreq_syscore_ops = {
2518 .shutdown = cpufreq_suspend,
2521 static int __init cpufreq_core_init(void)
2523 if (cpufreq_disabled())
2526 cpufreq_global_kobject = kobject_create();
2527 BUG_ON(!cpufreq_global_kobject);
2529 register_syscore_ops(&cpufreq_syscore_ops);
2533 core_initcall(cpufreq_core_init);