ALSA: hda - Fix initialization for HP 2011 notebooks
[cascardo/linux.git] / drivers / sh / clk / core.c
1 /*
2  * SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2010  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *      Copyright (C) 2004 - 2008 Nokia Corporation
9  *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  * This file is subject to the terms and conditions of the GNU General Public
14  * License.  See the file "COPYING" in the main directory of this archive
15  * for more details.
16  */
17 #define pr_fmt(fmt) "clock: " fmt
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/list.h>
24 #include <linux/kobject.h>
25 #include <linux/sysdev.h>
26 #include <linux/seq_file.h>
27 #include <linux/err.h>
28 #include <linux/io.h>
29 #include <linux/debugfs.h>
30 #include <linux/cpufreq.h>
31 #include <linux/clk.h>
32 #include <linux/sh_clk.h>
33
34 static LIST_HEAD(clock_list);
35 static DEFINE_SPINLOCK(clock_lock);
36 static DEFINE_MUTEX(clock_list_sem);
37
38 void clk_rate_table_build(struct clk *clk,
39                           struct cpufreq_frequency_table *freq_table,
40                           int nr_freqs,
41                           struct clk_div_mult_table *src_table,
42                           unsigned long *bitmap)
43 {
44         unsigned long mult, div;
45         unsigned long freq;
46         int i;
47
48         clk->nr_freqs = nr_freqs;
49
50         for (i = 0; i < nr_freqs; i++) {
51                 div = 1;
52                 mult = 1;
53
54                 if (src_table->divisors && i < src_table->nr_divisors)
55                         div = src_table->divisors[i];
56
57                 if (src_table->multipliers && i < src_table->nr_multipliers)
58                         mult = src_table->multipliers[i];
59
60                 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
61                         freq = CPUFREQ_ENTRY_INVALID;
62                 else
63                         freq = clk->parent->rate * mult / div;
64
65                 freq_table[i].index = i;
66                 freq_table[i].frequency = freq;
67         }
68
69         /* Termination entry */
70         freq_table[i].index = i;
71         freq_table[i].frequency = CPUFREQ_TABLE_END;
72 }
73
74 struct clk_rate_round_data;
75
76 struct clk_rate_round_data {
77         unsigned long rate;
78         unsigned int min, max;
79         long (*func)(unsigned int, struct clk_rate_round_data *);
80         void *arg;
81 };
82
83 #define for_each_frequency(pos, r, freq)                        \
84         for (pos = r->min, freq = r->func(pos, r);              \
85              pos <= r->max; pos++, freq = r->func(pos, r))      \
86                 if (unlikely(freq == 0))                        \
87                         ;                                       \
88                 else
89
90 static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
91 {
92         unsigned long rate_error, rate_error_prev = ~0UL;
93         unsigned long highest, lowest, freq;
94         long rate_best_fit = -ENOENT;
95         int i;
96
97         highest = 0;
98         lowest = ~0UL;
99
100         for_each_frequency(i, rounder, freq) {
101                 if (freq > highest)
102                         highest = freq;
103                 if (freq < lowest)
104                         lowest = freq;
105
106                 rate_error = abs(freq - rounder->rate);
107                 if (rate_error < rate_error_prev) {
108                         rate_best_fit = freq;
109                         rate_error_prev = rate_error;
110                 }
111
112                 if (rate_error == 0)
113                         break;
114         }
115
116         if (rounder->rate >= highest)
117                 rate_best_fit = highest;
118         if (rounder->rate <= lowest)
119                 rate_best_fit = lowest;
120
121         return rate_best_fit;
122 }
123
124 static long clk_rate_table_iter(unsigned int pos,
125                                 struct clk_rate_round_data *rounder)
126 {
127         struct cpufreq_frequency_table *freq_table = rounder->arg;
128         unsigned long freq = freq_table[pos].frequency;
129
130         if (freq == CPUFREQ_ENTRY_INVALID)
131                 freq = 0;
132
133         return freq;
134 }
135
136 long clk_rate_table_round(struct clk *clk,
137                           struct cpufreq_frequency_table *freq_table,
138                           unsigned long rate)
139 {
140         struct clk_rate_round_data table_round = {
141                 .min    = 0,
142                 .max    = clk->nr_freqs - 1,
143                 .func   = clk_rate_table_iter,
144                 .arg    = freq_table,
145                 .rate   = rate,
146         };
147
148         if (clk->nr_freqs < 1)
149                 return -ENOSYS;
150
151         return clk_rate_round_helper(&table_round);
152 }
153
154 static long clk_rate_div_range_iter(unsigned int pos,
155                                     struct clk_rate_round_data *rounder)
156 {
157         return clk_get_rate(rounder->arg) / pos;
158 }
159
160 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
161                               unsigned int div_max, unsigned long rate)
162 {
163         struct clk_rate_round_data div_range_round = {
164                 .min    = div_min,
165                 .max    = div_max,
166                 .func   = clk_rate_div_range_iter,
167                 .arg    = clk_get_parent(clk),
168                 .rate   = rate,
169         };
170
171         return clk_rate_round_helper(&div_range_round);
172 }
173
174 int clk_rate_table_find(struct clk *clk,
175                         struct cpufreq_frequency_table *freq_table,
176                         unsigned long rate)
177 {
178         int i;
179
180         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
181                 unsigned long freq = freq_table[i].frequency;
182
183                 if (freq == CPUFREQ_ENTRY_INVALID)
184                         continue;
185
186                 if (freq == rate)
187                         return i;
188         }
189
190         return -ENOENT;
191 }
192
193 /* Used for clocks that always have same value as the parent clock */
194 unsigned long followparent_recalc(struct clk *clk)
195 {
196         return clk->parent ? clk->parent->rate : 0;
197 }
198
199 int clk_reparent(struct clk *child, struct clk *parent)
200 {
201         list_del_init(&child->sibling);
202         if (parent)
203                 list_add(&child->sibling, &parent->children);
204         child->parent = parent;
205
206         /* now do the debugfs renaming to reattach the child
207            to the proper parent */
208
209         return 0;
210 }
211
212 /* Propagate rate to children */
213 void propagate_rate(struct clk *tclk)
214 {
215         struct clk *clkp;
216
217         list_for_each_entry(clkp, &tclk->children, sibling) {
218                 if (clkp->ops && clkp->ops->recalc)
219                         clkp->rate = clkp->ops->recalc(clkp);
220
221                 propagate_rate(clkp);
222         }
223 }
224
225 static void __clk_disable(struct clk *clk)
226 {
227         if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
228                  clk))
229                 return;
230
231         if (!(--clk->usecount)) {
232                 if (likely(clk->ops && clk->ops->disable))
233                         clk->ops->disable(clk);
234                 if (likely(clk->parent))
235                         __clk_disable(clk->parent);
236         }
237 }
238
239 void clk_disable(struct clk *clk)
240 {
241         unsigned long flags;
242
243         if (!clk)
244                 return;
245
246         spin_lock_irqsave(&clock_lock, flags);
247         __clk_disable(clk);
248         spin_unlock_irqrestore(&clock_lock, flags);
249 }
250 EXPORT_SYMBOL_GPL(clk_disable);
251
252 static int __clk_enable(struct clk *clk)
253 {
254         int ret = 0;
255
256         if (clk->usecount++ == 0) {
257                 if (clk->parent) {
258                         ret = __clk_enable(clk->parent);
259                         if (unlikely(ret))
260                                 goto err;
261                 }
262
263                 if (clk->ops && clk->ops->enable) {
264                         ret = clk->ops->enable(clk);
265                         if (ret) {
266                                 if (clk->parent)
267                                         __clk_disable(clk->parent);
268                                 goto err;
269                         }
270                 }
271         }
272
273         return ret;
274 err:
275         clk->usecount--;
276         return ret;
277 }
278
279 int clk_enable(struct clk *clk)
280 {
281         unsigned long flags;
282         int ret;
283
284         if (!clk)
285                 return -EINVAL;
286
287         spin_lock_irqsave(&clock_lock, flags);
288         ret = __clk_enable(clk);
289         spin_unlock_irqrestore(&clock_lock, flags);
290
291         return ret;
292 }
293 EXPORT_SYMBOL_GPL(clk_enable);
294
295 static LIST_HEAD(root_clks);
296
297 /**
298  * recalculate_root_clocks - recalculate and propagate all root clocks
299  *
300  * Recalculates all root clocks (clocks with no parent), which if the
301  * clock's .recalc is set correctly, should also propagate their rates.
302  * Called at init.
303  */
304 void recalculate_root_clocks(void)
305 {
306         struct clk *clkp;
307
308         list_for_each_entry(clkp, &root_clks, sibling) {
309                 if (clkp->ops && clkp->ops->recalc)
310                         clkp->rate = clkp->ops->recalc(clkp);
311                 propagate_rate(clkp);
312         }
313 }
314
315 static struct clk_mapping dummy_mapping;
316
317 static struct clk *lookup_root_clock(struct clk *clk)
318 {
319         while (clk->parent)
320                 clk = clk->parent;
321
322         return clk;
323 }
324
325 static int clk_establish_mapping(struct clk *clk)
326 {
327         struct clk_mapping *mapping = clk->mapping;
328
329         /*
330          * Propagate mappings.
331          */
332         if (!mapping) {
333                 struct clk *clkp;
334
335                 /*
336                  * dummy mapping for root clocks with no specified ranges
337                  */
338                 if (!clk->parent) {
339                         clk->mapping = &dummy_mapping;
340                         return 0;
341                 }
342
343                 /*
344                  * If we're on a child clock and it provides no mapping of its
345                  * own, inherit the mapping from its root clock.
346                  */
347                 clkp = lookup_root_clock(clk);
348                 mapping = clkp->mapping;
349                 BUG_ON(!mapping);
350         }
351
352         /*
353          * Establish initial mapping.
354          */
355         if (!mapping->base && mapping->phys) {
356                 kref_init(&mapping->ref);
357
358                 mapping->base = ioremap_nocache(mapping->phys, mapping->len);
359                 if (unlikely(!mapping->base))
360                         return -ENXIO;
361         } else if (mapping->base) {
362                 /*
363                  * Bump the refcount for an existing mapping
364                  */
365                 kref_get(&mapping->ref);
366         }
367
368         clk->mapping = mapping;
369         return 0;
370 }
371
372 static void clk_destroy_mapping(struct kref *kref)
373 {
374         struct clk_mapping *mapping;
375
376         mapping = container_of(kref, struct clk_mapping, ref);
377
378         iounmap(mapping->base);
379 }
380
381 static void clk_teardown_mapping(struct clk *clk)
382 {
383         struct clk_mapping *mapping = clk->mapping;
384
385         /* Nothing to do */
386         if (mapping == &dummy_mapping)
387                 return;
388
389         kref_put(&mapping->ref, clk_destroy_mapping);
390         clk->mapping = NULL;
391 }
392
393 int clk_register(struct clk *clk)
394 {
395         int ret;
396
397         if (clk == NULL || IS_ERR(clk))
398                 return -EINVAL;
399
400         /*
401          * trap out already registered clocks
402          */
403         if (clk->node.next || clk->node.prev)
404                 return 0;
405
406         mutex_lock(&clock_list_sem);
407
408         INIT_LIST_HEAD(&clk->children);
409         clk->usecount = 0;
410
411         ret = clk_establish_mapping(clk);
412         if (unlikely(ret))
413                 goto out_unlock;
414
415         if (clk->parent)
416                 list_add(&clk->sibling, &clk->parent->children);
417         else
418                 list_add(&clk->sibling, &root_clks);
419
420         list_add(&clk->node, &clock_list);
421         if (clk->ops && clk->ops->init)
422                 clk->ops->init(clk);
423
424 out_unlock:
425         mutex_unlock(&clock_list_sem);
426
427         return ret;
428 }
429 EXPORT_SYMBOL_GPL(clk_register);
430
431 void clk_unregister(struct clk *clk)
432 {
433         mutex_lock(&clock_list_sem);
434         list_del(&clk->sibling);
435         list_del(&clk->node);
436         clk_teardown_mapping(clk);
437         mutex_unlock(&clock_list_sem);
438 }
439 EXPORT_SYMBOL_GPL(clk_unregister);
440
441 void clk_enable_init_clocks(void)
442 {
443         struct clk *clkp;
444
445         list_for_each_entry(clkp, &clock_list, node)
446                 if (clkp->flags & CLK_ENABLE_ON_INIT)
447                         clk_enable(clkp);
448 }
449
450 unsigned long clk_get_rate(struct clk *clk)
451 {
452         return clk->rate;
453 }
454 EXPORT_SYMBOL_GPL(clk_get_rate);
455
456 int clk_set_rate(struct clk *clk, unsigned long rate)
457 {
458         return clk_set_rate_ex(clk, rate, 0);
459 }
460 EXPORT_SYMBOL_GPL(clk_set_rate);
461
462 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
463 {
464         int ret = -EOPNOTSUPP;
465         unsigned long flags;
466
467         spin_lock_irqsave(&clock_lock, flags);
468
469         if (likely(clk->ops && clk->ops->set_rate)) {
470                 ret = clk->ops->set_rate(clk, rate, algo_id);
471                 if (ret != 0)
472                         goto out_unlock;
473         } else {
474                 clk->rate = rate;
475                 ret = 0;
476         }
477
478         if (clk->ops && clk->ops->recalc)
479                 clk->rate = clk->ops->recalc(clk);
480
481         propagate_rate(clk);
482
483 out_unlock:
484         spin_unlock_irqrestore(&clock_lock, flags);
485
486         return ret;
487 }
488 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
489
490 int clk_set_parent(struct clk *clk, struct clk *parent)
491 {
492         unsigned long flags;
493         int ret = -EINVAL;
494
495         if (!parent || !clk)
496                 return ret;
497         if (clk->parent == parent)
498                 return 0;
499
500         spin_lock_irqsave(&clock_lock, flags);
501         if (clk->usecount == 0) {
502                 if (clk->ops->set_parent)
503                         ret = clk->ops->set_parent(clk, parent);
504                 else
505                         ret = clk_reparent(clk, parent);
506
507                 if (ret == 0) {
508                         if (clk->ops->recalc)
509                                 clk->rate = clk->ops->recalc(clk);
510                         pr_debug("set parent of %p to %p (new rate %ld)\n",
511                                  clk, clk->parent, clk->rate);
512                         propagate_rate(clk);
513                 }
514         } else
515                 ret = -EBUSY;
516         spin_unlock_irqrestore(&clock_lock, flags);
517
518         return ret;
519 }
520 EXPORT_SYMBOL_GPL(clk_set_parent);
521
522 struct clk *clk_get_parent(struct clk *clk)
523 {
524         return clk->parent;
525 }
526 EXPORT_SYMBOL_GPL(clk_get_parent);
527
528 long clk_round_rate(struct clk *clk, unsigned long rate)
529 {
530         if (likely(clk->ops && clk->ops->round_rate)) {
531                 unsigned long flags, rounded;
532
533                 spin_lock_irqsave(&clock_lock, flags);
534                 rounded = clk->ops->round_rate(clk, rate);
535                 spin_unlock_irqrestore(&clock_lock, flags);
536
537                 return rounded;
538         }
539
540         return clk_get_rate(clk);
541 }
542 EXPORT_SYMBOL_GPL(clk_round_rate);
543
544 long clk_round_parent(struct clk *clk, unsigned long target,
545                       unsigned long *best_freq, unsigned long *parent_freq,
546                       unsigned int div_min, unsigned int div_max)
547 {
548         struct cpufreq_frequency_table *freq, *best = NULL;
549         unsigned long error = ULONG_MAX, freq_high, freq_low, div;
550         struct clk *parent = clk_get_parent(clk);
551
552         if (!parent) {
553                 *parent_freq = 0;
554                 *best_freq = clk_round_rate(clk, target);
555                 return abs(target - *best_freq);
556         }
557
558         for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
559              freq++) {
560                 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
561                         continue;
562
563                 if (unlikely(freq->frequency / target <= div_min - 1)) {
564                         unsigned long freq_max;
565
566                         freq_max = (freq->frequency + div_min / 2) / div_min;
567                         if (error > target - freq_max) {
568                                 error = target - freq_max;
569                                 best = freq;
570                                 if (best_freq)
571                                         *best_freq = freq_max;
572                         }
573
574                         pr_debug("too low freq %u, error %lu\n", freq->frequency,
575                                  target - freq_max);
576
577                         if (!error)
578                                 break;
579
580                         continue;
581                 }
582
583                 if (unlikely(freq->frequency / target >= div_max)) {
584                         unsigned long freq_min;
585
586                         freq_min = (freq->frequency + div_max / 2) / div_max;
587                         if (error > freq_min - target) {
588                                 error = freq_min - target;
589                                 best = freq;
590                                 if (best_freq)
591                                         *best_freq = freq_min;
592                         }
593
594                         pr_debug("too high freq %u, error %lu\n", freq->frequency,
595                                  freq_min - target);
596
597                         if (!error)
598                                 break;
599
600                         continue;
601                 }
602
603                 div = freq->frequency / target;
604                 freq_high = freq->frequency / div;
605                 freq_low = freq->frequency / (div + 1);
606
607                 if (freq_high - target < error) {
608                         error = freq_high - target;
609                         best = freq;
610                         if (best_freq)
611                                 *best_freq = freq_high;
612                 }
613
614                 if (target - freq_low < error) {
615                         error = target - freq_low;
616                         best = freq;
617                         if (best_freq)
618                                 *best_freq = freq_low;
619                 }
620
621                 pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
622                          freq->frequency, div, freq_high, div + 1, freq_low,
623                          *best_freq, best->frequency);
624
625                 if (!error)
626                         break;
627         }
628
629         if (parent_freq)
630                 *parent_freq = best->frequency;
631
632         return error;
633 }
634 EXPORT_SYMBOL_GPL(clk_round_parent);
635
636 #ifdef CONFIG_PM
637 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
638 {
639         static pm_message_t prev_state;
640         struct clk *clkp;
641
642         switch (state.event) {
643         case PM_EVENT_ON:
644                 /* Resumeing from hibernation */
645                 if (prev_state.event != PM_EVENT_FREEZE)
646                         break;
647
648                 list_for_each_entry(clkp, &clock_list, node) {
649                         if (likely(clkp->ops)) {
650                                 unsigned long rate = clkp->rate;
651
652                                 if (likely(clkp->ops->set_parent))
653                                         clkp->ops->set_parent(clkp,
654                                                 clkp->parent);
655                                 if (likely(clkp->ops->set_rate))
656                                         clkp->ops->set_rate(clkp,
657                                                 rate, NO_CHANGE);
658                                 else if (likely(clkp->ops->recalc))
659                                         clkp->rate = clkp->ops->recalc(clkp);
660                         }
661                 }
662                 break;
663         case PM_EVENT_FREEZE:
664                 break;
665         case PM_EVENT_SUSPEND:
666                 break;
667         }
668
669         prev_state = state;
670         return 0;
671 }
672
673 static int clks_sysdev_resume(struct sys_device *dev)
674 {
675         return clks_sysdev_suspend(dev, PMSG_ON);
676 }
677
678 static struct sysdev_class clks_sysdev_class = {
679         .name = "clks",
680 };
681
682 static struct sysdev_driver clks_sysdev_driver = {
683         .suspend = clks_sysdev_suspend,
684         .resume = clks_sysdev_resume,
685 };
686
687 static struct sys_device clks_sysdev_dev = {
688         .cls = &clks_sysdev_class,
689 };
690
691 static int __init clk_sysdev_init(void)
692 {
693         sysdev_class_register(&clks_sysdev_class);
694         sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
695         sysdev_register(&clks_sysdev_dev);
696
697         return 0;
698 }
699 subsys_initcall(clk_sysdev_init);
700 #endif
701
702 /*
703  *      debugfs support to trace clock tree hierarchy and attributes
704  */
705 static struct dentry *clk_debugfs_root;
706
707 static int clk_debugfs_register_one(struct clk *c)
708 {
709         int err;
710         struct dentry *d, *child, *child_tmp;
711         struct clk *pa = c->parent;
712         char s[255];
713         char *p = s;
714
715         p += sprintf(p, "%p", c);
716         d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
717         if (!d)
718                 return -ENOMEM;
719         c->dentry = d;
720
721         d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
722         if (!d) {
723                 err = -ENOMEM;
724                 goto err_out;
725         }
726         d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
727         if (!d) {
728                 err = -ENOMEM;
729                 goto err_out;
730         }
731         d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
732         if (!d) {
733                 err = -ENOMEM;
734                 goto err_out;
735         }
736         return 0;
737
738 err_out:
739         d = c->dentry;
740         list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
741                 debugfs_remove(child);
742         debugfs_remove(c->dentry);
743         return err;
744 }
745
746 static int clk_debugfs_register(struct clk *c)
747 {
748         int err;
749         struct clk *pa = c->parent;
750
751         if (pa && !pa->dentry) {
752                 err = clk_debugfs_register(pa);
753                 if (err)
754                         return err;
755         }
756
757         if (!c->dentry) {
758                 err = clk_debugfs_register_one(c);
759                 if (err)
760                         return err;
761         }
762         return 0;
763 }
764
765 static int __init clk_debugfs_init(void)
766 {
767         struct clk *c;
768         struct dentry *d;
769         int err;
770
771         d = debugfs_create_dir("clock", NULL);
772         if (!d)
773                 return -ENOMEM;
774         clk_debugfs_root = d;
775
776         list_for_each_entry(c, &clock_list, node) {
777                 err = clk_debugfs_register(c);
778                 if (err)
779                         goto err_out;
780         }
781         return 0;
782 err_out:
783         debugfs_remove_recursive(clk_debugfs_root);
784         return err;
785 }
786 late_initcall(clk_debugfs_init);