2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 static inline bool has_target(void)
52 return cpufreq_driver->target_index || cpufreq_driver->target;
56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
59 static DECLARE_RWSEM(cpufreq_rwsem);
61 /* internal prototypes */
62 static int __cpufreq_governor(struct cpufreq_policy *policy,
64 static unsigned int __cpufreq_get(unsigned int cpu);
65 static void handle_update(struct work_struct *work);
68 * Two notifier lists: the "policy" list is involved in the
69 * validation process for a new CPU frequency policy; the
70 * "transition" list for kernel code that needs to handle
71 * changes to devices when the CPU clock speed changes.
72 * The mutex locks both lists.
74 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
75 static struct srcu_notifier_head cpufreq_transition_notifier_list;
77 static bool init_cpufreq_transition_notifier_list_called;
78 static int __init init_cpufreq_transition_notifier_list(void)
80 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
81 init_cpufreq_transition_notifier_list_called = true;
84 pure_initcall(init_cpufreq_transition_notifier_list);
86 static int off __read_mostly;
87 static int cpufreq_disabled(void)
91 void disable_cpufreq(void)
95 static LIST_HEAD(cpufreq_governor_list);
96 static DEFINE_MUTEX(cpufreq_governor_mutex);
98 bool have_governor_per_policy(void)
100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
102 EXPORT_SYMBOL_GPL(have_governor_per_policy);
104 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106 if (have_governor_per_policy())
107 return &policy->kobj;
109 return cpufreq_global_kobject;
111 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
119 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128 idle_time = cur_wall_time - busy_time;
130 *wall = cputime_to_usecs(cur_wall_time);
132 return cputime_to_usecs(idle_time);
135 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139 if (idle_time == -1ULL)
140 return get_cpu_idle_time_jiffy(cpu, wall);
142 idle_time += get_cpu_iowait_time_us(cpu, wall);
146 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
155 int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
161 ret = cpufreq_table_validate_and_show(policy, table);
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
167 policy->cpuinfo.transition_latency = transition_latency;
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
173 cpumask_setall(policy->cpus);
177 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
181 struct cpufreq_policy *policy = NULL;
184 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
187 if (!down_read_trylock(&cpufreq_rwsem))
190 /* get the cpufreq driver */
191 read_lock_irqsave(&cpufreq_driver_lock, flags);
193 if (cpufreq_driver) {
195 policy = per_cpu(cpufreq_cpu_data, cpu);
197 kobject_get(&policy->kobj);
200 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
203 up_read(&cpufreq_rwsem);
207 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
209 void cpufreq_cpu_put(struct cpufreq_policy *policy)
211 if (cpufreq_disabled())
214 kobject_put(&policy->kobj);
215 up_read(&cpufreq_rwsem);
217 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
219 /*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
228 * systems as each CPU might be scaled differently. So, use the arch
229 * per-CPU loops_per_jiffy value wherever possible.
232 static unsigned long l_p_j_ref;
233 static unsigned int l_p_j_ref_freq;
235 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
250 pr_debug("scaling loops_per_jiffy to %lu "
251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
255 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
261 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
262 struct cpufreq_freqs *freqs, unsigned int state)
264 BUG_ON(irqs_disabled());
266 if (cpufreq_disabled())
269 freqs->flags = cpufreq_driver->flags;
270 pr_debug("notification %u of frequency transition to %u kHz\n",
275 case CPUFREQ_PRECHANGE:
276 /* detect if the driver reported a value as "old frequency"
277 * which is not equal to what the cpufreq core thinks is
280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
283 pr_debug("Warning: CPU frequency is"
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
290 CPUFREQ_PRECHANGE, freqs);
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
297 (unsigned long)freqs->cpu);
298 trace_cpu_frequency(freqs->new, freqs->cpu);
299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
300 CPUFREQ_POSTCHANGE, freqs);
301 if (likely(policy) && likely(policy->cpu == freqs->cpu))
302 policy->cur = freqs->new;
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
315 void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
321 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
323 /* Do post notifications when there are chances that transition has failed */
324 void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
325 struct cpufreq_freqs *freqs, int transition_failed)
327 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
328 if (!transition_failed)
331 swap(freqs->old, freqs->new);
332 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
333 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
335 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
338 /*********************************************************************
340 *********************************************************************/
342 static struct cpufreq_governor *__find_governor(const char *str_governor)
344 struct cpufreq_governor *t;
346 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
347 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
354 * cpufreq_parse_governor - parse a governor string
356 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
357 struct cpufreq_governor **governor)
364 if (cpufreq_driver->setpolicy) {
365 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
366 *policy = CPUFREQ_POLICY_PERFORMANCE;
368 } else if (!strnicmp(str_governor, "powersave",
370 *policy = CPUFREQ_POLICY_POWERSAVE;
373 } else if (has_target()) {
374 struct cpufreq_governor *t;
376 mutex_lock(&cpufreq_governor_mutex);
378 t = __find_governor(str_governor);
383 mutex_unlock(&cpufreq_governor_mutex);
384 ret = request_module("cpufreq_%s", str_governor);
385 mutex_lock(&cpufreq_governor_mutex);
388 t = __find_governor(str_governor);
396 mutex_unlock(&cpufreq_governor_mutex);
403 * cpufreq_per_cpu_attr_read() / show_##file_name() -
404 * print out cpufreq information
406 * Write out information from cpufreq_driver->policy[cpu]; object must be
410 #define show_one(file_name, object) \
411 static ssize_t show_##file_name \
412 (struct cpufreq_policy *policy, char *buf) \
414 return sprintf(buf, "%u\n", policy->object); \
417 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
418 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
419 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
420 show_one(scaling_min_freq, min);
421 show_one(scaling_max_freq, max);
422 show_one(scaling_cur_freq, cur);
424 static int cpufreq_set_policy(struct cpufreq_policy *policy,
425 struct cpufreq_policy *new_policy);
428 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
430 #define store_one(file_name, object) \
431 static ssize_t store_##file_name \
432 (struct cpufreq_policy *policy, const char *buf, size_t count) \
435 struct cpufreq_policy new_policy; \
437 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
441 ret = sscanf(buf, "%u", &new_policy.object); \
445 ret = cpufreq_set_policy(policy, &new_policy); \
446 policy->user_policy.object = policy->object; \
448 return ret ? ret : count; \
451 store_one(scaling_min_freq, min);
452 store_one(scaling_max_freq, max);
455 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
457 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
460 unsigned int cur_freq = __cpufreq_get(policy->cpu);
462 return sprintf(buf, "<unknown>");
463 return sprintf(buf, "%u\n", cur_freq);
467 * show_scaling_governor - show the current policy for the specified CPU
469 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
471 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
472 return sprintf(buf, "powersave\n");
473 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
474 return sprintf(buf, "performance\n");
475 else if (policy->governor)
476 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
477 policy->governor->name);
482 * store_scaling_governor - store policy for the specified CPU
484 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
485 const char *buf, size_t count)
488 char str_governor[16];
489 struct cpufreq_policy new_policy;
491 ret = cpufreq_get_policy(&new_policy, policy->cpu);
495 ret = sscanf(buf, "%15s", str_governor);
499 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
500 &new_policy.governor))
503 ret = cpufreq_set_policy(policy, &new_policy);
505 policy->user_policy.policy = policy->policy;
506 policy->user_policy.governor = policy->governor;
515 * show_scaling_driver - show the cpufreq driver currently loaded
517 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
519 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
523 * show_scaling_available_governors - show the available CPUfreq governors
525 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
529 struct cpufreq_governor *t;
532 i += sprintf(buf, "performance powersave");
536 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
537 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
538 - (CPUFREQ_NAME_LEN + 2)))
540 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
543 i += sprintf(&buf[i], "\n");
547 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
552 for_each_cpu(cpu, mask) {
554 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
555 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
556 if (i >= (PAGE_SIZE - 5))
559 i += sprintf(&buf[i], "\n");
562 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
565 * show_related_cpus - show the CPUs affected by each transition even if
566 * hw coordination is in use
568 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
570 return cpufreq_show_cpus(policy->related_cpus, buf);
574 * show_affected_cpus - show the CPUs affected by each transition
576 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
578 return cpufreq_show_cpus(policy->cpus, buf);
581 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
582 const char *buf, size_t count)
584 unsigned int freq = 0;
587 if (!policy->governor || !policy->governor->store_setspeed)
590 ret = sscanf(buf, "%u", &freq);
594 policy->governor->store_setspeed(policy, freq);
599 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
601 if (!policy->governor || !policy->governor->show_setspeed)
602 return sprintf(buf, "<unsupported>\n");
604 return policy->governor->show_setspeed(policy, buf);
608 * show_bios_limit - show the current cpufreq HW/BIOS limitation
610 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
614 if (cpufreq_driver->bios_limit) {
615 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
617 return sprintf(buf, "%u\n", limit);
619 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
622 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
623 cpufreq_freq_attr_ro(cpuinfo_min_freq);
624 cpufreq_freq_attr_ro(cpuinfo_max_freq);
625 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
626 cpufreq_freq_attr_ro(scaling_available_governors);
627 cpufreq_freq_attr_ro(scaling_driver);
628 cpufreq_freq_attr_ro(scaling_cur_freq);
629 cpufreq_freq_attr_ro(bios_limit);
630 cpufreq_freq_attr_ro(related_cpus);
631 cpufreq_freq_attr_ro(affected_cpus);
632 cpufreq_freq_attr_rw(scaling_min_freq);
633 cpufreq_freq_attr_rw(scaling_max_freq);
634 cpufreq_freq_attr_rw(scaling_governor);
635 cpufreq_freq_attr_rw(scaling_setspeed);
637 static struct attribute *default_attrs[] = {
638 &cpuinfo_min_freq.attr,
639 &cpuinfo_max_freq.attr,
640 &cpuinfo_transition_latency.attr,
641 &scaling_min_freq.attr,
642 &scaling_max_freq.attr,
645 &scaling_governor.attr,
646 &scaling_driver.attr,
647 &scaling_available_governors.attr,
648 &scaling_setspeed.attr,
652 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
653 #define to_attr(a) container_of(a, struct freq_attr, attr)
655 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
657 struct cpufreq_policy *policy = to_policy(kobj);
658 struct freq_attr *fattr = to_attr(attr);
661 if (!down_read_trylock(&cpufreq_rwsem))
664 down_read(&policy->rwsem);
667 ret = fattr->show(policy, buf);
671 up_read(&policy->rwsem);
672 up_read(&cpufreq_rwsem);
677 static ssize_t store(struct kobject *kobj, struct attribute *attr,
678 const char *buf, size_t count)
680 struct cpufreq_policy *policy = to_policy(kobj);
681 struct freq_attr *fattr = to_attr(attr);
682 ssize_t ret = -EINVAL;
686 if (!cpu_online(policy->cpu))
689 if (!down_read_trylock(&cpufreq_rwsem))
692 down_write(&policy->rwsem);
695 ret = fattr->store(policy, buf, count);
699 up_write(&policy->rwsem);
701 up_read(&cpufreq_rwsem);
708 static void cpufreq_sysfs_release(struct kobject *kobj)
710 struct cpufreq_policy *policy = to_policy(kobj);
711 pr_debug("last reference is dropped\n");
712 complete(&policy->kobj_unregister);
715 static const struct sysfs_ops sysfs_ops = {
720 static struct kobj_type ktype_cpufreq = {
721 .sysfs_ops = &sysfs_ops,
722 .default_attrs = default_attrs,
723 .release = cpufreq_sysfs_release,
726 struct kobject *cpufreq_global_kobject;
727 EXPORT_SYMBOL(cpufreq_global_kobject);
729 static int cpufreq_global_kobject_usage;
731 int cpufreq_get_global_kobject(void)
733 if (!cpufreq_global_kobject_usage++)
734 return kobject_add(cpufreq_global_kobject,
735 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
739 EXPORT_SYMBOL(cpufreq_get_global_kobject);
741 void cpufreq_put_global_kobject(void)
743 if (!--cpufreq_global_kobject_usage)
744 kobject_del(cpufreq_global_kobject);
746 EXPORT_SYMBOL(cpufreq_put_global_kobject);
748 int cpufreq_sysfs_create_file(const struct attribute *attr)
750 int ret = cpufreq_get_global_kobject();
753 ret = sysfs_create_file(cpufreq_global_kobject, attr);
755 cpufreq_put_global_kobject();
760 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
762 void cpufreq_sysfs_remove_file(const struct attribute *attr)
764 sysfs_remove_file(cpufreq_global_kobject, attr);
765 cpufreq_put_global_kobject();
767 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
769 /* symlink affected CPUs */
770 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
775 for_each_cpu(j, policy->cpus) {
776 struct device *cpu_dev;
778 if (j == policy->cpu)
781 pr_debug("Adding link for CPU: %u\n", j);
782 cpu_dev = get_cpu_device(j);
783 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
791 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
794 struct freq_attr **drv_attr;
797 /* prepare interface data */
798 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
799 &dev->kobj, "cpufreq");
803 /* set up files for this cpu device */
804 drv_attr = cpufreq_driver->attr;
805 while ((drv_attr) && (*drv_attr)) {
806 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
808 goto err_out_kobj_put;
811 if (cpufreq_driver->get) {
812 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
814 goto err_out_kobj_put;
817 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
819 goto err_out_kobj_put;
821 if (cpufreq_driver->bios_limit) {
822 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
824 goto err_out_kobj_put;
827 ret = cpufreq_add_dev_symlink(policy);
829 goto err_out_kobj_put;
834 kobject_put(&policy->kobj);
835 wait_for_completion(&policy->kobj_unregister);
839 static void cpufreq_init_policy(struct cpufreq_policy *policy)
841 struct cpufreq_policy new_policy;
844 memcpy(&new_policy, policy, sizeof(*policy));
846 /* Use the default policy if its valid. */
847 if (cpufreq_driver->setpolicy)
848 cpufreq_parse_governor(policy->governor->name,
849 &new_policy.policy, NULL);
851 /* assure that the starting sequence is run in cpufreq_set_policy */
852 policy->governor = NULL;
854 /* set default policy */
855 ret = cpufreq_set_policy(policy, &new_policy);
857 pr_debug("setting policy failed\n");
858 if (cpufreq_driver->exit)
859 cpufreq_driver->exit(policy);
863 #ifdef CONFIG_HOTPLUG_CPU
864 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
865 unsigned int cpu, struct device *dev)
871 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
873 pr_err("%s: Failed to stop governor\n", __func__);
878 down_write(&policy->rwsem);
880 write_lock_irqsave(&cpufreq_driver_lock, flags);
882 cpumask_set_cpu(cpu, policy->cpus);
883 per_cpu(cpufreq_cpu_data, cpu) = policy;
884 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
886 up_write(&policy->rwsem);
889 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
890 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
891 pr_err("%s: Failed to start governor\n", __func__);
896 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
900 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
902 struct cpufreq_policy *policy;
905 read_lock_irqsave(&cpufreq_driver_lock, flags);
907 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
909 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
914 static struct cpufreq_policy *cpufreq_policy_alloc(void)
916 struct cpufreq_policy *policy;
918 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
922 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
923 goto err_free_policy;
925 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
926 goto err_free_cpumask;
928 INIT_LIST_HEAD(&policy->policy_list);
929 init_rwsem(&policy->rwsem);
934 free_cpumask_var(policy->cpus);
941 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
943 struct kobject *kobj;
944 struct completion *cmp;
946 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
947 CPUFREQ_REMOVE_POLICY, policy);
949 down_read(&policy->rwsem);
950 kobj = &policy->kobj;
951 cmp = &policy->kobj_unregister;
952 up_read(&policy->rwsem);
956 * We need to make sure that the underlying kobj is
957 * actually not referenced anymore by anybody before we
958 * proceed with unloading.
960 pr_debug("waiting for dropping of refcount\n");
961 wait_for_completion(cmp);
962 pr_debug("wait complete\n");
965 static void cpufreq_policy_free(struct cpufreq_policy *policy)
967 free_cpumask_var(policy->related_cpus);
968 free_cpumask_var(policy->cpus);
972 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
974 if (WARN_ON(cpu == policy->cpu))
977 down_write(&policy->rwsem);
979 policy->last_cpu = policy->cpu;
982 up_write(&policy->rwsem);
984 cpufreq_frequency_table_update_policy_cpu(policy);
985 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
986 CPUFREQ_UPDATE_POLICY_CPU, policy);
989 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
992 unsigned int j, cpu = dev->id;
994 struct cpufreq_policy *policy;
996 #ifdef CONFIG_HOTPLUG_CPU
997 struct cpufreq_policy *tpolicy;
998 struct cpufreq_governor *gov;
1001 if (cpu_is_offline(cpu))
1004 pr_debug("adding CPU %u\n", cpu);
1007 /* check whether a different CPU already registered this
1008 * CPU because it is in the same boat. */
1009 policy = cpufreq_cpu_get(cpu);
1010 if (unlikely(policy)) {
1011 cpufreq_cpu_put(policy);
1016 if (!down_read_trylock(&cpufreq_rwsem))
1019 #ifdef CONFIG_HOTPLUG_CPU
1020 /* Check if this cpu was hot-unplugged earlier and has siblings */
1021 read_lock_irqsave(&cpufreq_driver_lock, flags);
1022 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1023 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1024 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1025 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1026 up_read(&cpufreq_rwsem);
1030 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1034 * Restore the saved policy when doing light-weight init and fall back
1035 * to the full init if that fails.
1037 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1040 policy = cpufreq_policy_alloc();
1046 * In the resume path, since we restore a saved policy, the assignment
1047 * to policy->cpu is like an update of the existing policy, rather than
1048 * the creation of a brand new one. So we need to perform this update
1049 * by invoking update_policy_cpu().
1051 if (frozen && cpu != policy->cpu)
1052 update_policy_cpu(policy, cpu);
1056 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1057 cpumask_copy(policy->cpus, cpumask_of(cpu));
1059 init_completion(&policy->kobj_unregister);
1060 INIT_WORK(&policy->update, handle_update);
1062 /* call driver. From then on the cpufreq must be able
1063 * to accept all calls to ->verify and ->setpolicy for this CPU
1065 ret = cpufreq_driver->init(policy);
1067 pr_debug("initialization failed\n");
1068 goto err_set_policy_cpu;
1071 if (cpufreq_driver->get) {
1072 policy->cur = cpufreq_driver->get(policy->cpu);
1074 pr_err("%s: ->get() failed\n", __func__);
1080 * Sometimes boot loaders set CPU frequency to a value outside of
1081 * frequency table present with cpufreq core. In such cases CPU might be
1082 * unstable if it has to run on that frequency for long duration of time
1083 * and so its better to set it to a frequency which is specified in
1084 * freq-table. This also makes cpufreq stats inconsistent as
1085 * cpufreq-stats would fail to register because current frequency of CPU
1086 * isn't found in freq-table.
1088 * Because we don't want this change to effect boot process badly, we go
1089 * for the next freq which is >= policy->cur ('cur' must be set by now,
1090 * otherwise we will end up setting freq to lowest of the table as 'cur'
1091 * is initialized to zero).
1093 * We are passing target-freq as "policy->cur - 1" otherwise
1094 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1095 * equal to target-freq.
1097 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1099 /* Are we running at unknown frequency ? */
1100 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1101 if (ret == -EINVAL) {
1102 /* Warn user and fix it */
1103 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1104 __func__, policy->cpu, policy->cur);
1105 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1106 CPUFREQ_RELATION_L);
1109 * Reaching here after boot in a few seconds may not
1110 * mean that system will remain stable at "unknown"
1111 * frequency for longer duration. Hence, a BUG_ON().
1114 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1115 __func__, policy->cpu, policy->cur);
1119 /* related cpus should atleast have policy->cpus */
1120 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1123 * affected cpus must always be the one, which are online. We aren't
1124 * managing offline cpus here.
1126 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1129 policy->user_policy.min = policy->min;
1130 policy->user_policy.max = policy->max;
1133 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1134 CPUFREQ_START, policy);
1136 #ifdef CONFIG_HOTPLUG_CPU
1137 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1139 policy->governor = gov;
1140 pr_debug("Restoring governor %s for cpu %d\n",
1141 policy->governor->name, cpu);
1145 write_lock_irqsave(&cpufreq_driver_lock, flags);
1146 for_each_cpu(j, policy->cpus)
1147 per_cpu(cpufreq_cpu_data, j) = policy;
1148 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1151 ret = cpufreq_add_dev_interface(policy, dev);
1153 goto err_out_unregister;
1154 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1155 CPUFREQ_CREATE_POLICY, policy);
1158 write_lock_irqsave(&cpufreq_driver_lock, flags);
1159 list_add(&policy->policy_list, &cpufreq_policy_list);
1160 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1162 cpufreq_init_policy(policy);
1165 policy->user_policy.policy = policy->policy;
1166 policy->user_policy.governor = policy->governor;
1169 kobject_uevent(&policy->kobj, KOBJ_ADD);
1170 up_read(&cpufreq_rwsem);
1172 pr_debug("initialization complete\n");
1177 write_lock_irqsave(&cpufreq_driver_lock, flags);
1178 for_each_cpu(j, policy->cpus)
1179 per_cpu(cpufreq_cpu_data, j) = NULL;
1180 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1183 if (cpufreq_driver->exit)
1184 cpufreq_driver->exit(policy);
1187 /* Do not leave stale fallback data behind. */
1188 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1189 cpufreq_policy_put_kobj(policy);
1191 cpufreq_policy_free(policy);
1194 up_read(&cpufreq_rwsem);
1200 * cpufreq_add_dev - add a CPU device
1202 * Adds the cpufreq interface for a CPU device.
1204 * The Oracle says: try running cpufreq registration/unregistration concurrently
1205 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1206 * mess up, but more thorough testing is needed. - Mathieu
1208 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1210 return __cpufreq_add_dev(dev, sif, false);
1213 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1214 unsigned int old_cpu)
1216 struct device *cpu_dev;
1219 /* first sibling now owns the new sysfs dir */
1220 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1222 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1223 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1225 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1227 down_write(&policy->rwsem);
1228 cpumask_set_cpu(old_cpu, policy->cpus);
1229 up_write(&policy->rwsem);
1231 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1240 static int __cpufreq_remove_dev_prepare(struct device *dev,
1241 struct subsys_interface *sif,
1244 unsigned int cpu = dev->id, cpus;
1246 unsigned long flags;
1247 struct cpufreq_policy *policy;
1249 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1251 write_lock_irqsave(&cpufreq_driver_lock, flags);
1253 policy = per_cpu(cpufreq_cpu_data, cpu);
1255 /* Save the policy somewhere when doing a light-weight tear-down */
1257 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1259 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1262 pr_debug("%s: No cpu_data found\n", __func__);
1267 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1269 pr_err("%s: Failed to stop governor\n", __func__);
1274 #ifdef CONFIG_HOTPLUG_CPU
1275 if (!cpufreq_driver->setpolicy)
1276 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1277 policy->governor->name, CPUFREQ_NAME_LEN);
1280 down_read(&policy->rwsem);
1281 cpus = cpumask_weight(policy->cpus);
1282 up_read(&policy->rwsem);
1284 if (cpu != policy->cpu) {
1286 sysfs_remove_link(&dev->kobj, "cpufreq");
1287 } else if (cpus > 1) {
1288 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1290 update_policy_cpu(policy, new_cpu);
1293 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1294 __func__, new_cpu, cpu);
1302 static int __cpufreq_remove_dev_finish(struct device *dev,
1303 struct subsys_interface *sif,
1306 unsigned int cpu = dev->id, cpus;
1308 unsigned long flags;
1309 struct cpufreq_policy *policy;
1311 read_lock_irqsave(&cpufreq_driver_lock, flags);
1312 policy = per_cpu(cpufreq_cpu_data, cpu);
1313 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1316 pr_debug("%s: No cpu_data found\n", __func__);
1320 down_write(&policy->rwsem);
1321 cpus = cpumask_weight(policy->cpus);
1324 cpumask_clear_cpu(cpu, policy->cpus);
1325 up_write(&policy->rwsem);
1327 /* If cpu is last user of policy, free policy */
1330 ret = __cpufreq_governor(policy,
1331 CPUFREQ_GOV_POLICY_EXIT);
1333 pr_err("%s: Failed to exit governor\n",
1340 cpufreq_policy_put_kobj(policy);
1343 * Perform the ->exit() even during light-weight tear-down,
1344 * since this is a core component, and is essential for the
1345 * subsequent light-weight ->init() to succeed.
1347 if (cpufreq_driver->exit)
1348 cpufreq_driver->exit(policy);
1350 /* Remove policy from list of active policies */
1351 write_lock_irqsave(&cpufreq_driver_lock, flags);
1352 list_del(&policy->policy_list);
1353 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1356 cpufreq_policy_free(policy);
1359 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1360 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1361 pr_err("%s: Failed to start governor\n",
1368 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1373 * cpufreq_remove_dev - remove a CPU device
1375 * Removes the cpufreq interface for a CPU device.
1377 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1379 unsigned int cpu = dev->id;
1382 if (cpu_is_offline(cpu))
1385 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1388 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1393 static void handle_update(struct work_struct *work)
1395 struct cpufreq_policy *policy =
1396 container_of(work, struct cpufreq_policy, update);
1397 unsigned int cpu = policy->cpu;
1398 pr_debug("handle_update for cpu %u called\n", cpu);
1399 cpufreq_update_policy(cpu);
1403 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1406 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1407 * @new_freq: CPU frequency the CPU actually runs at
1409 * We adjust to current frequency first, and need to clean up later.
1410 * So either call to cpufreq_update_policy() or schedule handle_update()).
1412 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1413 unsigned int new_freq)
1415 struct cpufreq_policy *policy;
1416 struct cpufreq_freqs freqs;
1417 unsigned long flags;
1419 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1420 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1422 freqs.old = old_freq;
1423 freqs.new = new_freq;
1425 read_lock_irqsave(&cpufreq_driver_lock, flags);
1426 policy = per_cpu(cpufreq_cpu_data, cpu);
1427 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1429 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1430 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1434 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1437 * This is the last known freq, without actually getting it from the driver.
1438 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1440 unsigned int cpufreq_quick_get(unsigned int cpu)
1442 struct cpufreq_policy *policy;
1443 unsigned int ret_freq = 0;
1445 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1446 return cpufreq_driver->get(cpu);
1448 policy = cpufreq_cpu_get(cpu);
1450 ret_freq = policy->cur;
1451 cpufreq_cpu_put(policy);
1456 EXPORT_SYMBOL(cpufreq_quick_get);
1459 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1462 * Just return the max possible frequency for a given CPU.
1464 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1466 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1467 unsigned int ret_freq = 0;
1470 ret_freq = policy->max;
1471 cpufreq_cpu_put(policy);
1476 EXPORT_SYMBOL(cpufreq_quick_get_max);
1478 static unsigned int __cpufreq_get(unsigned int cpu)
1480 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1481 unsigned int ret_freq = 0;
1483 if (!cpufreq_driver->get)
1486 ret_freq = cpufreq_driver->get(cpu);
1488 if (ret_freq && policy->cur &&
1489 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1490 /* verify no discrepancy between actual and
1491 saved value exists */
1492 if (unlikely(ret_freq != policy->cur)) {
1493 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1494 schedule_work(&policy->update);
1502 * cpufreq_get - get the current CPU frequency (in kHz)
1505 * Get the CPU current (static) CPU frequency
1507 unsigned int cpufreq_get(unsigned int cpu)
1509 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1510 unsigned int ret_freq = 0;
1512 if (cpufreq_disabled() || !cpufreq_driver)
1517 if (!down_read_trylock(&cpufreq_rwsem))
1520 down_read(&policy->rwsem);
1522 ret_freq = __cpufreq_get(cpu);
1524 up_read(&policy->rwsem);
1525 up_read(&cpufreq_rwsem);
1529 EXPORT_SYMBOL(cpufreq_get);
1531 static struct subsys_interface cpufreq_interface = {
1533 .subsys = &cpu_subsys,
1534 .add_dev = cpufreq_add_dev,
1535 .remove_dev = cpufreq_remove_dev,
1539 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1541 * This function is only executed for the boot processor. The other CPUs
1542 * have been put offline by means of CPU hotplug.
1544 static int cpufreq_bp_suspend(void)
1548 int cpu = smp_processor_id();
1549 struct cpufreq_policy *policy;
1551 pr_debug("suspending cpu %u\n", cpu);
1553 /* If there's no policy for the boot CPU, we have nothing to do. */
1554 policy = cpufreq_cpu_get(cpu);
1558 if (cpufreq_driver->suspend) {
1559 ret = cpufreq_driver->suspend(policy);
1561 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1562 "step on CPU %u\n", policy->cpu);
1565 cpufreq_cpu_put(policy);
1570 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1572 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1573 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1574 * restored. It will verify that the current freq is in sync with
1575 * what we believe it to be. This is a bit later than when it
1576 * should be, but nonethteless it's better than calling
1577 * cpufreq_driver->get() here which might re-enable interrupts...
1579 * This function is only executed for the boot CPU. The other CPUs have not
1580 * been turned on yet.
1582 static void cpufreq_bp_resume(void)
1586 int cpu = smp_processor_id();
1587 struct cpufreq_policy *policy;
1589 pr_debug("resuming cpu %u\n", cpu);
1591 /* If there's no policy for the boot CPU, we have nothing to do. */
1592 policy = cpufreq_cpu_get(cpu);
1596 if (cpufreq_driver->resume) {
1597 ret = cpufreq_driver->resume(policy);
1599 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1600 "step on CPU %u\n", policy->cpu);
1605 schedule_work(&policy->update);
1608 cpufreq_cpu_put(policy);
1611 static struct syscore_ops cpufreq_syscore_ops = {
1612 .suspend = cpufreq_bp_suspend,
1613 .resume = cpufreq_bp_resume,
1617 * cpufreq_get_current_driver - return current driver's name
1619 * Return the name string of the currently loaded cpufreq driver
1622 const char *cpufreq_get_current_driver(void)
1625 return cpufreq_driver->name;
1629 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1631 /*********************************************************************
1632 * NOTIFIER LISTS INTERFACE *
1633 *********************************************************************/
1636 * cpufreq_register_notifier - register a driver with cpufreq
1637 * @nb: notifier function to register
1638 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1640 * Add a driver to one of two lists: either a list of drivers that
1641 * are notified about clock rate changes (once before and once after
1642 * the transition), or a list of drivers that are notified about
1643 * changes in cpufreq policy.
1645 * This function may sleep, and has the same return conditions as
1646 * blocking_notifier_chain_register.
1648 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1652 if (cpufreq_disabled())
1655 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1658 case CPUFREQ_TRANSITION_NOTIFIER:
1659 ret = srcu_notifier_chain_register(
1660 &cpufreq_transition_notifier_list, nb);
1662 case CPUFREQ_POLICY_NOTIFIER:
1663 ret = blocking_notifier_chain_register(
1664 &cpufreq_policy_notifier_list, nb);
1672 EXPORT_SYMBOL(cpufreq_register_notifier);
1675 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1676 * @nb: notifier block to be unregistered
1677 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1679 * Remove a driver from the CPU frequency notifier list.
1681 * This function may sleep, and has the same return conditions as
1682 * blocking_notifier_chain_unregister.
1684 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1688 if (cpufreq_disabled())
1692 case CPUFREQ_TRANSITION_NOTIFIER:
1693 ret = srcu_notifier_chain_unregister(
1694 &cpufreq_transition_notifier_list, nb);
1696 case CPUFREQ_POLICY_NOTIFIER:
1697 ret = blocking_notifier_chain_unregister(
1698 &cpufreq_policy_notifier_list, nb);
1706 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1709 /*********************************************************************
1711 *********************************************************************/
1713 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1714 unsigned int target_freq,
1715 unsigned int relation)
1717 int retval = -EINVAL;
1718 unsigned int old_target_freq = target_freq;
1720 if (cpufreq_disabled())
1723 /* Make sure that target_freq is within supported range */
1724 if (target_freq > policy->max)
1725 target_freq = policy->max;
1726 if (target_freq < policy->min)
1727 target_freq = policy->min;
1729 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1730 policy->cpu, target_freq, relation, old_target_freq);
1733 * This might look like a redundant call as we are checking it again
1734 * after finding index. But it is left intentionally for cases where
1735 * exactly same freq is called again and so we can save on few function
1738 if (target_freq == policy->cur)
1741 if (cpufreq_driver->target)
1742 retval = cpufreq_driver->target(policy, target_freq, relation);
1743 else if (cpufreq_driver->target_index) {
1744 struct cpufreq_frequency_table *freq_table;
1745 struct cpufreq_freqs freqs;
1749 freq_table = cpufreq_frequency_get_table(policy->cpu);
1750 if (unlikely(!freq_table)) {
1751 pr_err("%s: Unable to find freq_table\n", __func__);
1755 retval = cpufreq_frequency_table_target(policy, freq_table,
1756 target_freq, relation, &index);
1757 if (unlikely(retval)) {
1758 pr_err("%s: Unable to find matching freq\n", __func__);
1762 if (freq_table[index].frequency == policy->cur) {
1767 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1770 freqs.old = policy->cur;
1771 freqs.new = freq_table[index].frequency;
1774 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1775 __func__, policy->cpu, freqs.old,
1778 cpufreq_notify_transition(policy, &freqs,
1782 retval = cpufreq_driver->target_index(policy, index);
1784 pr_err("%s: Failed to change cpu frequency: %d\n",
1788 cpufreq_notify_post_transition(policy, &freqs, retval);
1794 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1796 int cpufreq_driver_target(struct cpufreq_policy *policy,
1797 unsigned int target_freq,
1798 unsigned int relation)
1802 down_write(&policy->rwsem);
1804 ret = __cpufreq_driver_target(policy, target_freq, relation);
1806 up_write(&policy->rwsem);
1810 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1813 * when "event" is CPUFREQ_GOV_LIMITS
1816 static int __cpufreq_governor(struct cpufreq_policy *policy,
1821 /* Only must be defined when default governor is known to have latency
1822 restrictions, like e.g. conservative or ondemand.
1823 That this is the case is already ensured in Kconfig
1825 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1826 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1828 struct cpufreq_governor *gov = NULL;
1831 if (policy->governor->max_transition_latency &&
1832 policy->cpuinfo.transition_latency >
1833 policy->governor->max_transition_latency) {
1837 printk(KERN_WARNING "%s governor failed, too long"
1838 " transition latency of HW, fallback"
1839 " to %s governor\n",
1840 policy->governor->name,
1842 policy->governor = gov;
1846 if (event == CPUFREQ_GOV_POLICY_INIT)
1847 if (!try_module_get(policy->governor->owner))
1850 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1851 policy->cpu, event);
1853 mutex_lock(&cpufreq_governor_lock);
1854 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1855 || (!policy->governor_enabled
1856 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1857 mutex_unlock(&cpufreq_governor_lock);
1861 if (event == CPUFREQ_GOV_STOP)
1862 policy->governor_enabled = false;
1863 else if (event == CPUFREQ_GOV_START)
1864 policy->governor_enabled = true;
1866 mutex_unlock(&cpufreq_governor_lock);
1868 ret = policy->governor->governor(policy, event);
1871 if (event == CPUFREQ_GOV_POLICY_INIT)
1872 policy->governor->initialized++;
1873 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1874 policy->governor->initialized--;
1876 /* Restore original values */
1877 mutex_lock(&cpufreq_governor_lock);
1878 if (event == CPUFREQ_GOV_STOP)
1879 policy->governor_enabled = true;
1880 else if (event == CPUFREQ_GOV_START)
1881 policy->governor_enabled = false;
1882 mutex_unlock(&cpufreq_governor_lock);
1885 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1886 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1887 module_put(policy->governor->owner);
1892 int cpufreq_register_governor(struct cpufreq_governor *governor)
1899 if (cpufreq_disabled())
1902 mutex_lock(&cpufreq_governor_mutex);
1904 governor->initialized = 0;
1906 if (__find_governor(governor->name) == NULL) {
1908 list_add(&governor->governor_list, &cpufreq_governor_list);
1911 mutex_unlock(&cpufreq_governor_mutex);
1914 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1916 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1918 #ifdef CONFIG_HOTPLUG_CPU
1925 if (cpufreq_disabled())
1928 #ifdef CONFIG_HOTPLUG_CPU
1929 for_each_present_cpu(cpu) {
1930 if (cpu_online(cpu))
1932 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1933 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1937 mutex_lock(&cpufreq_governor_mutex);
1938 list_del(&governor->governor_list);
1939 mutex_unlock(&cpufreq_governor_mutex);
1942 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1945 /*********************************************************************
1946 * POLICY INTERFACE *
1947 *********************************************************************/
1950 * cpufreq_get_policy - get the current cpufreq_policy
1951 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1954 * Reads the current cpufreq policy.
1956 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1958 struct cpufreq_policy *cpu_policy;
1962 cpu_policy = cpufreq_cpu_get(cpu);
1966 memcpy(policy, cpu_policy, sizeof(*policy));
1968 cpufreq_cpu_put(cpu_policy);
1971 EXPORT_SYMBOL(cpufreq_get_policy);
1974 * policy : current policy.
1975 * new_policy: policy to be set.
1977 static int cpufreq_set_policy(struct cpufreq_policy *policy,
1978 struct cpufreq_policy *new_policy)
1980 int ret = 0, failed = 1;
1982 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1983 new_policy->min, new_policy->max);
1985 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1987 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1992 /* verify the cpu speed can be set within this limit */
1993 ret = cpufreq_driver->verify(new_policy);
1997 /* adjust if necessary - all reasons */
1998 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1999 CPUFREQ_ADJUST, new_policy);
2001 /* adjust if necessary - hardware incompatibility*/
2002 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2003 CPUFREQ_INCOMPATIBLE, new_policy);
2006 * verify the cpu speed can be set within this limit, which might be
2007 * different to the first one
2009 ret = cpufreq_driver->verify(new_policy);
2013 /* notification of the new policy */
2014 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2015 CPUFREQ_NOTIFY, new_policy);
2017 policy->min = new_policy->min;
2018 policy->max = new_policy->max;
2020 pr_debug("new min and max freqs are %u - %u kHz\n",
2021 policy->min, policy->max);
2023 if (cpufreq_driver->setpolicy) {
2024 policy->policy = new_policy->policy;
2025 pr_debug("setting range\n");
2026 ret = cpufreq_driver->setpolicy(new_policy);
2028 if (new_policy->governor != policy->governor) {
2029 /* save old, working values */
2030 struct cpufreq_governor *old_gov = policy->governor;
2032 pr_debug("governor switch\n");
2034 /* end old governor */
2035 if (policy->governor) {
2036 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2037 up_write(&policy->rwsem);
2038 __cpufreq_governor(policy,
2039 CPUFREQ_GOV_POLICY_EXIT);
2040 down_write(&policy->rwsem);
2043 /* start new governor */
2044 policy->governor = new_policy->governor;
2045 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2046 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
2049 up_write(&policy->rwsem);
2050 __cpufreq_governor(policy,
2051 CPUFREQ_GOV_POLICY_EXIT);
2052 down_write(&policy->rwsem);
2057 /* new governor failed, so re-start old one */
2058 pr_debug("starting governor %s failed\n",
2059 policy->governor->name);
2061 policy->governor = old_gov;
2062 __cpufreq_governor(policy,
2063 CPUFREQ_GOV_POLICY_INIT);
2064 __cpufreq_governor(policy,
2070 /* might be a policy change, too, so fall through */
2072 pr_debug("governor: change or update limits\n");
2073 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2081 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2082 * @cpu: CPU which shall be re-evaluated
2084 * Useful for policy notifiers which have different necessities
2085 * at different times.
2087 int cpufreq_update_policy(unsigned int cpu)
2089 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2090 struct cpufreq_policy new_policy;
2098 down_write(&policy->rwsem);
2100 pr_debug("updating policy for CPU %u\n", cpu);
2101 memcpy(&new_policy, policy, sizeof(*policy));
2102 new_policy.min = policy->user_policy.min;
2103 new_policy.max = policy->user_policy.max;
2104 new_policy.policy = policy->user_policy.policy;
2105 new_policy.governor = policy->user_policy.governor;
2108 * BIOS might change freq behind our back
2109 * -> ask driver for current freq and notify governors about a change
2111 if (cpufreq_driver->get) {
2112 new_policy.cur = cpufreq_driver->get(cpu);
2114 pr_debug("Driver did not initialize current freq");
2115 policy->cur = new_policy.cur;
2117 if (policy->cur != new_policy.cur && has_target())
2118 cpufreq_out_of_sync(cpu, policy->cur,
2123 ret = cpufreq_set_policy(policy, &new_policy);
2125 up_write(&policy->rwsem);
2127 cpufreq_cpu_put(policy);
2131 EXPORT_SYMBOL(cpufreq_update_policy);
2133 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2134 unsigned long action, void *hcpu)
2136 unsigned int cpu = (unsigned long)hcpu;
2138 bool frozen = false;
2140 dev = get_cpu_device(cpu);
2143 if (action & CPU_TASKS_FROZEN)
2146 switch (action & ~CPU_TASKS_FROZEN) {
2148 __cpufreq_add_dev(dev, NULL, frozen);
2149 cpufreq_update_policy(cpu);
2152 case CPU_DOWN_PREPARE:
2153 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2157 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2160 case CPU_DOWN_FAILED:
2161 __cpufreq_add_dev(dev, NULL, frozen);
2168 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2169 .notifier_call = cpufreq_cpu_callback,
2172 /*********************************************************************
2173 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2174 *********************************************************************/
2177 * cpufreq_register_driver - register a CPU Frequency driver
2178 * @driver_data: A struct cpufreq_driver containing the values#
2179 * submitted by the CPU Frequency driver.
2181 * Registers a CPU Frequency driver to this core code. This code
2182 * returns zero on success, -EBUSY when another driver got here first
2183 * (and isn't unregistered in the meantime).
2186 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2188 unsigned long flags;
2191 if (cpufreq_disabled())
2194 if (!driver_data || !driver_data->verify || !driver_data->init ||
2195 !(driver_data->setpolicy || driver_data->target_index ||
2196 driver_data->target))
2199 pr_debug("trying to register driver %s\n", driver_data->name);
2201 if (driver_data->setpolicy)
2202 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2204 write_lock_irqsave(&cpufreq_driver_lock, flags);
2205 if (cpufreq_driver) {
2206 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2209 cpufreq_driver = driver_data;
2210 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2212 ret = subsys_interface_register(&cpufreq_interface);
2214 goto err_null_driver;
2216 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2220 /* check for at least one working CPU */
2221 for (i = 0; i < nr_cpu_ids; i++)
2222 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2227 /* if all ->init() calls failed, unregister */
2229 pr_debug("no CPU initialized for driver %s\n",
2235 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2236 pr_debug("driver %s up and running\n", driver_data->name);
2240 subsys_interface_unregister(&cpufreq_interface);
2242 write_lock_irqsave(&cpufreq_driver_lock, flags);
2243 cpufreq_driver = NULL;
2244 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2247 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2250 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2252 * Unregister the current CPUFreq driver. Only call this if you have
2253 * the right to do so, i.e. if you have succeeded in initialising before!
2254 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2255 * currently not initialised.
2257 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2259 unsigned long flags;
2261 if (!cpufreq_driver || (driver != cpufreq_driver))
2264 pr_debug("unregistering driver %s\n", driver->name);
2266 subsys_interface_unregister(&cpufreq_interface);
2267 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2269 down_write(&cpufreq_rwsem);
2270 write_lock_irqsave(&cpufreq_driver_lock, flags);
2272 cpufreq_driver = NULL;
2274 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2275 up_write(&cpufreq_rwsem);
2279 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2281 static int __init cpufreq_core_init(void)
2283 if (cpufreq_disabled())
2286 cpufreq_global_kobject = kobject_create();
2287 BUG_ON(!cpufreq_global_kobject);
2288 register_syscore_ops(&cpufreq_syscore_ops);
2292 core_initcall(cpufreq_core_init);