Merge branch 'parisc-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[cascardo/linux.git] / drivers / clk / clk-xgene.c
1 /*
2  * clk-xgene.c - AppliedMicro X-Gene Clock Interface
3  *
4  * Copyright (c) 2013, Applied Micro Circuits Corporation
5  * Author: Loc Ho <lho@apm.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20  * MA 02111-1307 USA
21  *
22  */
23 #include <linux/module.h>
24 #include <linux/spinlock.h>
25 #include <linux/io.h>
26 #include <linux/of.h>
27 #include <linux/clkdev.h>
28 #include <linux/clk-provider.h>
29 #include <linux/of_address.h>
30
31 /* Register SCU_PCPPLL bit fields */
32 #define N_DIV_RD(src)                   ((src) & 0x000001ff)
33 #define SC_N_DIV_RD(src)                ((src) & 0x0000007f)
34 #define SC_OUTDIV2(src)                 (((src) & 0x00000100) >> 8)
35
36 /* Register SCU_SOCPLL bit fields */
37 #define CLKR_RD(src)                    (((src) & 0x07000000)>>24)
38 #define CLKOD_RD(src)                   (((src) & 0x00300000)>>20)
39 #define REGSPEC_RESET_F1_MASK           0x00010000
40 #define CLKF_RD(src)                    (((src) & 0x000001ff))
41
42 #define XGENE_CLK_DRIVER_VER            "0.1"
43
44 static DEFINE_SPINLOCK(clk_lock);
45
46 static inline u32 xgene_clk_read(void __iomem *csr)
47 {
48         return readl_relaxed(csr);
49 }
50
51 static inline void xgene_clk_write(u32 data, void __iomem *csr)
52 {
53         writel_relaxed(data, csr);
54 }
55
56 /* PLL Clock */
57 enum xgene_pll_type {
58         PLL_TYPE_PCP = 0,
59         PLL_TYPE_SOC = 1,
60 };
61
62 struct xgene_clk_pll {
63         struct clk_hw   hw;
64         void __iomem    *reg;
65         spinlock_t      *lock;
66         u32             pll_offset;
67         enum xgene_pll_type     type;
68         int             version;
69 };
70
71 #define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
72
73 static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
74 {
75         struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
76         u32 data;
77
78         data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
79         pr_debug("%s pll %s\n", clk_hw_get_name(hw),
80                 data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
81
82         return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
83 }
84
85 static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
86                                 unsigned long parent_rate)
87 {
88         struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
89         unsigned long fref;
90         unsigned long fvco;
91         u32 pll;
92         u32 nref;
93         u32 nout;
94         u32 nfb;
95
96         pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
97
98         if (pllclk->version <= 1) {
99                 if (pllclk->type == PLL_TYPE_PCP) {
100                         /*
101                         * PLL VCO = Reference clock * NF
102                         * PCP PLL = PLL_VCO / 2
103                         */
104                         nout = 2;
105                         fvco = parent_rate * (N_DIV_RD(pll) + 4);
106                 } else {
107                         /*
108                         * Fref = Reference Clock / NREF;
109                         * Fvco = Fref * NFB;
110                         * Fout = Fvco / NOUT;
111                         */
112                         nref = CLKR_RD(pll) + 1;
113                         nout = CLKOD_RD(pll) + 1;
114                         nfb = CLKF_RD(pll);
115                         fref = parent_rate / nref;
116                         fvco = fref * nfb;
117                 }
118         } else {
119                 /*
120                  * fvco = Reference clock * FBDIVC
121                  * PLL freq = fvco / NOUT
122                  */
123                 nout = SC_OUTDIV2(pll) ? 2 : 3;
124                 fvco = parent_rate * SC_N_DIV_RD(pll);
125         }
126         pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
127                  clk_hw_get_name(hw), fvco / nout, parent_rate,
128                  pllclk->version);
129
130         return fvco / nout;
131 }
132
133 static const struct clk_ops xgene_clk_pll_ops = {
134         .is_enabled = xgene_clk_pll_is_enabled,
135         .recalc_rate = xgene_clk_pll_recalc_rate,
136 };
137
138 static struct clk *xgene_register_clk_pll(struct device *dev,
139         const char *name, const char *parent_name,
140         unsigned long flags, void __iomem *reg, u32 pll_offset,
141         u32 type, spinlock_t *lock, int version)
142 {
143         struct xgene_clk_pll *apmclk;
144         struct clk *clk;
145         struct clk_init_data init;
146
147         /* allocate the APM clock structure */
148         apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
149         if (!apmclk) {
150                 pr_err("%s: could not allocate APM clk\n", __func__);
151                 return ERR_PTR(-ENOMEM);
152         }
153
154         init.name = name;
155         init.ops = &xgene_clk_pll_ops;
156         init.flags = flags;
157         init.parent_names = parent_name ? &parent_name : NULL;
158         init.num_parents = parent_name ? 1 : 0;
159
160         apmclk->version = version;
161         apmclk->reg = reg;
162         apmclk->lock = lock;
163         apmclk->pll_offset = pll_offset;
164         apmclk->type = type;
165         apmclk->hw.init = &init;
166
167         /* Register the clock */
168         clk = clk_register(dev, &apmclk->hw);
169         if (IS_ERR(clk)) {
170                 pr_err("%s: could not register clk %s\n", __func__, name);
171                 kfree(apmclk);
172                 return NULL;
173         }
174         return clk;
175 }
176
177 static int xgene_pllclk_version(struct device_node *np)
178 {
179         if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
180                 return 1;
181         if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
182                 return 1;
183         return 2;
184 }
185
186 static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
187 {
188         const char *clk_name = np->full_name;
189         struct clk *clk;
190         void __iomem *reg;
191         int version = xgene_pllclk_version(np);
192
193         reg = of_iomap(np, 0);
194         if (reg == NULL) {
195                 pr_err("Unable to map CSR register for %s\n", np->full_name);
196                 return;
197         }
198         of_property_read_string(np, "clock-output-names", &clk_name);
199         clk = xgene_register_clk_pll(NULL,
200                         clk_name, of_clk_get_parent_name(np, 0),
201                         0, reg, 0, pll_type, &clk_lock,
202                         version);
203         if (!IS_ERR(clk)) {
204                 of_clk_add_provider(np, of_clk_src_simple_get, clk);
205                 clk_register_clkdev(clk, clk_name, NULL);
206                 pr_debug("Add %s clock PLL\n", clk_name);
207         }
208 }
209
210 static void xgene_socpllclk_init(struct device_node *np)
211 {
212         xgene_pllclk_init(np, PLL_TYPE_SOC);
213 }
214
215 static void xgene_pcppllclk_init(struct device_node *np)
216 {
217         xgene_pllclk_init(np, PLL_TYPE_PCP);
218 }
219
220 /**
221  * struct xgene_clk_pmd - PMD clock
222  *
223  * @hw:         handle between common and hardware-specific interfaces
224  * @reg:        register containing the fractional scale multiplier (scaler)
225  * @shift:      shift to the unit bit field
226  * @denom:      1/denominator unit
227  * @lock:       register lock
228  * Flags:
229  * XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
230  *      from the register plus one. For example,
231  *              0 for (0 + 1) / denom,
232  *              1 for (1 + 1) / denom and etc.
233  *      If this flag is set, it is
234  *              0 for (denom - 0) / denom,
235  *              1 for (denom - 1) / denom and etc.
236  *
237  */
238 struct xgene_clk_pmd {
239         struct clk_hw   hw;
240         void __iomem    *reg;
241         u8              shift;
242         u32             mask;
243         u64             denom;
244         u32             flags;
245         spinlock_t      *lock;
246 };
247
248 #define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
249
250 #define XGENE_CLK_PMD_SCALE_INVERTED    BIT(0)
251 #define XGENE_CLK_PMD_SHIFT             8
252 #define XGENE_CLK_PMD_WIDTH             3
253
254 static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
255                                                unsigned long parent_rate)
256 {
257         struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
258         unsigned long flags = 0;
259         u64 ret, scale;
260         u32 val;
261
262         if (fd->lock)
263                 spin_lock_irqsave(fd->lock, flags);
264         else
265                 __acquire(fd->lock);
266
267         val = clk_readl(fd->reg);
268
269         if (fd->lock)
270                 spin_unlock_irqrestore(fd->lock, flags);
271         else
272                 __release(fd->lock);
273
274         ret = (u64)parent_rate;
275
276         scale = (val & fd->mask) >> fd->shift;
277         if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
278                 scale = fd->denom - scale;
279         else
280                 scale++;
281
282         /* freq = parent_rate * scaler / denom */
283         do_div(ret, fd->denom);
284         ret *= scale;
285         if (ret == 0)
286                 ret = (u64)parent_rate;
287
288         return ret;
289 }
290
291 static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
292                                      unsigned long *parent_rate)
293 {
294         struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
295         u64 ret, scale;
296
297         if (!rate || rate >= *parent_rate)
298                 return *parent_rate;
299
300         /* freq = parent_rate * scaler / denom */
301         ret = rate * fd->denom;
302         scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
303
304         ret = (u64)*parent_rate * scale;
305         do_div(ret, fd->denom);
306
307         return ret;
308 }
309
310 static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
311                                   unsigned long parent_rate)
312 {
313         struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
314         unsigned long flags = 0;
315         u64 scale, ret;
316         u32 val;
317
318         /*
319          * Compute the scaler:
320          *
321          * freq = parent_rate * scaler / denom, or
322          * scaler = freq * denom / parent_rate
323          */
324         ret = rate * fd->denom;
325         scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
326
327         /* Check if inverted */
328         if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
329                 scale = fd->denom - scale;
330         else
331                 scale--;
332
333         if (fd->lock)
334                 spin_lock_irqsave(fd->lock, flags);
335         else
336                 __acquire(fd->lock);
337
338         val = clk_readl(fd->reg);
339         val &= ~fd->mask;
340         val |= (scale << fd->shift);
341         clk_writel(val, fd->reg);
342
343         if (fd->lock)
344                 spin_unlock_irqrestore(fd->lock, flags);
345         else
346                 __release(fd->lock);
347
348         return 0;
349 }
350
351 static const struct clk_ops xgene_clk_pmd_ops = {
352         .recalc_rate = xgene_clk_pmd_recalc_rate,
353         .round_rate = xgene_clk_pmd_round_rate,
354         .set_rate = xgene_clk_pmd_set_rate,
355 };
356
357 static struct clk *
358 xgene_register_clk_pmd(struct device *dev,
359                        const char *name, const char *parent_name,
360                        unsigned long flags, void __iomem *reg, u8 shift,
361                        u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
362 {
363         struct xgene_clk_pmd *fd;
364         struct clk_init_data init;
365         struct clk *clk;
366
367         fd = kzalloc(sizeof(*fd), GFP_KERNEL);
368         if (!fd)
369                 return ERR_PTR(-ENOMEM);
370
371         init.name = name;
372         init.ops = &xgene_clk_pmd_ops;
373         init.flags = flags;
374         init.parent_names = parent_name ? &parent_name : NULL;
375         init.num_parents = parent_name ? 1 : 0;
376
377         fd->reg = reg;
378         fd->shift = shift;
379         fd->mask = (BIT(width) - 1) << shift;
380         fd->denom = denom;
381         fd->flags = clk_flags;
382         fd->lock = lock;
383         fd->hw.init = &init;
384
385         clk = clk_register(dev, &fd->hw);
386         if (IS_ERR(clk)) {
387                 pr_err("%s: could not register clk %s\n", __func__, name);
388                 kfree(fd);
389                 return NULL;
390         }
391
392         return clk;
393 }
394
395 static void xgene_pmdclk_init(struct device_node *np)
396 {
397         const char *clk_name = np->full_name;
398         void __iomem *csr_reg;
399         struct resource res;
400         struct clk *clk;
401         u64 denom;
402         u32 flags = 0;
403         int rc;
404
405         /* Check if the entry is disabled */
406         if (!of_device_is_available(np))
407                 return;
408
409         /* Parse the DTS register for resource */
410         rc = of_address_to_resource(np, 0, &res);
411         if (rc != 0) {
412                 pr_err("no DTS register for %s\n", np->full_name);
413                 return;
414         }
415         csr_reg = of_iomap(np, 0);
416         if (!csr_reg) {
417                 pr_err("Unable to map resource for %s\n", np->full_name);
418                 return;
419         }
420         of_property_read_string(np, "clock-output-names", &clk_name);
421
422         denom = BIT(XGENE_CLK_PMD_WIDTH);
423         flags |= XGENE_CLK_PMD_SCALE_INVERTED;
424
425         clk = xgene_register_clk_pmd(NULL, clk_name,
426                                      of_clk_get_parent_name(np, 0), 0,
427                                      csr_reg, XGENE_CLK_PMD_SHIFT,
428                                      XGENE_CLK_PMD_WIDTH, denom,
429                                      flags, &clk_lock);
430         if (!IS_ERR(clk)) {
431                 of_clk_add_provider(np, of_clk_src_simple_get, clk);
432                 clk_register_clkdev(clk, clk_name, NULL);
433                 pr_debug("Add %s clock\n", clk_name);
434         } else {
435                 if (csr_reg)
436                         iounmap(csr_reg);
437         }
438 }
439
440 /* IP Clock */
441 struct xgene_dev_parameters {
442         void __iomem *csr_reg;          /* CSR for IP clock */
443         u32 reg_clk_offset;             /* Offset to clock enable CSR */
444         u32 reg_clk_mask;               /* Mask bit for clock enable */
445         u32 reg_csr_offset;             /* Offset to CSR reset */
446         u32 reg_csr_mask;               /* Mask bit for disable CSR reset */
447         void __iomem *divider_reg;      /* CSR for divider */
448         u32 reg_divider_offset;         /* Offset to divider register */
449         u32 reg_divider_shift;          /* Bit shift to divider field */
450         u32 reg_divider_width;          /* Width of the bit to divider field */
451 };
452
453 struct xgene_clk {
454         struct clk_hw   hw;
455         spinlock_t      *lock;
456         struct xgene_dev_parameters     param;
457 };
458
459 #define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
460
461 static int xgene_clk_enable(struct clk_hw *hw)
462 {
463         struct xgene_clk *pclk = to_xgene_clk(hw);
464         unsigned long flags = 0;
465         u32 data;
466         phys_addr_t reg;
467
468         if (pclk->lock)
469                 spin_lock_irqsave(pclk->lock, flags);
470
471         if (pclk->param.csr_reg != NULL) {
472                 pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
473                 reg = __pa(pclk->param.csr_reg);
474                 /* First enable the clock */
475                 data = xgene_clk_read(pclk->param.csr_reg +
476                                         pclk->param.reg_clk_offset);
477                 data |= pclk->param.reg_clk_mask;
478                 xgene_clk_write(data, pclk->param.csr_reg +
479                                         pclk->param.reg_clk_offset);
480                 pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
481                         clk_hw_get_name(hw), &reg,
482                         pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
483                         data);
484
485                 /* Second enable the CSR */
486                 data = xgene_clk_read(pclk->param.csr_reg +
487                                         pclk->param.reg_csr_offset);
488                 data &= ~pclk->param.reg_csr_mask;
489                 xgene_clk_write(data, pclk->param.csr_reg +
490                                         pclk->param.reg_csr_offset);
491                 pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
492                         clk_hw_get_name(hw), &reg,
493                         pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
494                         data);
495         }
496
497         if (pclk->lock)
498                 spin_unlock_irqrestore(pclk->lock, flags);
499
500         return 0;
501 }
502
503 static void xgene_clk_disable(struct clk_hw *hw)
504 {
505         struct xgene_clk *pclk = to_xgene_clk(hw);
506         unsigned long flags = 0;
507         u32 data;
508
509         if (pclk->lock)
510                 spin_lock_irqsave(pclk->lock, flags);
511
512         if (pclk->param.csr_reg != NULL) {
513                 pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
514                 /* First put the CSR in reset */
515                 data = xgene_clk_read(pclk->param.csr_reg +
516                                         pclk->param.reg_csr_offset);
517                 data |= pclk->param.reg_csr_mask;
518                 xgene_clk_write(data, pclk->param.csr_reg +
519                                         pclk->param.reg_csr_offset);
520
521                 /* Second disable the clock */
522                 data = xgene_clk_read(pclk->param.csr_reg +
523                                         pclk->param.reg_clk_offset);
524                 data &= ~pclk->param.reg_clk_mask;
525                 xgene_clk_write(data, pclk->param.csr_reg +
526                                         pclk->param.reg_clk_offset);
527         }
528
529         if (pclk->lock)
530                 spin_unlock_irqrestore(pclk->lock, flags);
531 }
532
533 static int xgene_clk_is_enabled(struct clk_hw *hw)
534 {
535         struct xgene_clk *pclk = to_xgene_clk(hw);
536         u32 data = 0;
537
538         if (pclk->param.csr_reg != NULL) {
539                 pr_debug("%s clock checking\n", clk_hw_get_name(hw));
540                 data = xgene_clk_read(pclk->param.csr_reg +
541                                         pclk->param.reg_clk_offset);
542                 pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
543                         data & pclk->param.reg_clk_mask ? "enabled" :
544                                                         "disabled");
545         }
546
547         if (pclk->param.csr_reg == NULL)
548                 return 1;
549         return data & pclk->param.reg_clk_mask ? 1 : 0;
550 }
551
552 static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
553                                 unsigned long parent_rate)
554 {
555         struct xgene_clk *pclk = to_xgene_clk(hw);
556         u32 data;
557
558         if (pclk->param.divider_reg) {
559                 data = xgene_clk_read(pclk->param.divider_reg +
560                                         pclk->param.reg_divider_offset);
561                 data >>= pclk->param.reg_divider_shift;
562                 data &= (1 << pclk->param.reg_divider_width) - 1;
563
564                 pr_debug("%s clock recalc rate %ld parent %ld\n",
565                         clk_hw_get_name(hw),
566                         parent_rate / data, parent_rate);
567
568                 return parent_rate / data;
569         } else {
570                 pr_debug("%s clock recalc rate %ld parent %ld\n",
571                         clk_hw_get_name(hw), parent_rate, parent_rate);
572                 return parent_rate;
573         }
574 }
575
576 static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
577                                 unsigned long parent_rate)
578 {
579         struct xgene_clk *pclk = to_xgene_clk(hw);
580         unsigned long flags = 0;
581         u32 data;
582         u32 divider;
583         u32 divider_save;
584
585         if (pclk->lock)
586                 spin_lock_irqsave(pclk->lock, flags);
587
588         if (pclk->param.divider_reg) {
589                 /* Let's compute the divider */
590                 if (rate > parent_rate)
591                         rate = parent_rate;
592                 divider_save = divider = parent_rate / rate; /* Rounded down */
593                 divider &= (1 << pclk->param.reg_divider_width) - 1;
594                 divider <<= pclk->param.reg_divider_shift;
595
596                 /* Set new divider */
597                 data = xgene_clk_read(pclk->param.divider_reg +
598                                 pclk->param.reg_divider_offset);
599                 data &= ~(((1 << pclk->param.reg_divider_width) - 1)
600                                 << pclk->param.reg_divider_shift);
601                 data |= divider;
602                 xgene_clk_write(data, pclk->param.divider_reg +
603                                         pclk->param.reg_divider_offset);
604                 pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
605                         parent_rate / divider_save);
606         } else {
607                 divider_save = 1;
608         }
609
610         if (pclk->lock)
611                 spin_unlock_irqrestore(pclk->lock, flags);
612
613         return parent_rate / divider_save;
614 }
615
616 static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
617                                 unsigned long *prate)
618 {
619         struct xgene_clk *pclk = to_xgene_clk(hw);
620         unsigned long parent_rate = *prate;
621         u32 divider;
622
623         if (pclk->param.divider_reg) {
624                 /* Let's compute the divider */
625                 if (rate > parent_rate)
626                         rate = parent_rate;
627                 divider = parent_rate / rate;   /* Rounded down */
628         } else {
629                 divider = 1;
630         }
631
632         return parent_rate / divider;
633 }
634
635 static const struct clk_ops xgene_clk_ops = {
636         .enable = xgene_clk_enable,
637         .disable = xgene_clk_disable,
638         .is_enabled = xgene_clk_is_enabled,
639         .recalc_rate = xgene_clk_recalc_rate,
640         .set_rate = xgene_clk_set_rate,
641         .round_rate = xgene_clk_round_rate,
642 };
643
644 static struct clk *xgene_register_clk(struct device *dev,
645                 const char *name, const char *parent_name,
646                 struct xgene_dev_parameters *parameters, spinlock_t *lock)
647 {
648         struct xgene_clk *apmclk;
649         struct clk *clk;
650         struct clk_init_data init;
651         int rc;
652
653         /* allocate the APM clock structure */
654         apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
655         if (!apmclk) {
656                 pr_err("%s: could not allocate APM clk\n", __func__);
657                 return ERR_PTR(-ENOMEM);
658         }
659
660         init.name = name;
661         init.ops = &xgene_clk_ops;
662         init.flags = 0;
663         init.parent_names = parent_name ? &parent_name : NULL;
664         init.num_parents = parent_name ? 1 : 0;
665
666         apmclk->lock = lock;
667         apmclk->hw.init = &init;
668         apmclk->param = *parameters;
669
670         /* Register the clock */
671         clk = clk_register(dev, &apmclk->hw);
672         if (IS_ERR(clk)) {
673                 pr_err("%s: could not register clk %s\n", __func__, name);
674                 kfree(apmclk);
675                 return clk;
676         }
677
678         /* Register the clock for lookup */
679         rc = clk_register_clkdev(clk, name, NULL);
680         if (rc != 0) {
681                 pr_err("%s: could not register lookup clk %s\n",
682                         __func__, name);
683         }
684         return clk;
685 }
686
687 static void __init xgene_devclk_init(struct device_node *np)
688 {
689         const char *clk_name = np->full_name;
690         struct clk *clk;
691         struct resource res;
692         int rc;
693         struct xgene_dev_parameters parameters;
694         int i;
695
696         /* Check if the entry is disabled */
697         if (!of_device_is_available(np))
698                 return;
699
700         /* Parse the DTS register for resource */
701         parameters.csr_reg = NULL;
702         parameters.divider_reg = NULL;
703         for (i = 0; i < 2; i++) {
704                 void __iomem *map_res;
705                 rc = of_address_to_resource(np, i, &res);
706                 if (rc != 0) {
707                         if (i == 0) {
708                                 pr_err("no DTS register for %s\n",
709                                         np->full_name);
710                                 return;
711                         }
712                         break;
713                 }
714                 map_res = of_iomap(np, i);
715                 if (map_res == NULL) {
716                         pr_err("Unable to map resource %d for %s\n",
717                                 i, np->full_name);
718                         goto err;
719                 }
720                 if (strcmp(res.name, "div-reg") == 0)
721                         parameters.divider_reg = map_res;
722                 else /* if (strcmp(res->name, "csr-reg") == 0) */
723                         parameters.csr_reg = map_res;
724         }
725         if (of_property_read_u32(np, "csr-offset", &parameters.reg_csr_offset))
726                 parameters.reg_csr_offset = 0;
727         if (of_property_read_u32(np, "csr-mask", &parameters.reg_csr_mask))
728                 parameters.reg_csr_mask = 0xF;
729         if (of_property_read_u32(np, "enable-offset",
730                                 &parameters.reg_clk_offset))
731                 parameters.reg_clk_offset = 0x8;
732         if (of_property_read_u32(np, "enable-mask", &parameters.reg_clk_mask))
733                 parameters.reg_clk_mask = 0xF;
734         if (of_property_read_u32(np, "divider-offset",
735                                 &parameters.reg_divider_offset))
736                 parameters.reg_divider_offset = 0;
737         if (of_property_read_u32(np, "divider-width",
738                                 &parameters.reg_divider_width))
739                 parameters.reg_divider_width = 0;
740         if (of_property_read_u32(np, "divider-shift",
741                                 &parameters.reg_divider_shift))
742                 parameters.reg_divider_shift = 0;
743         of_property_read_string(np, "clock-output-names", &clk_name);
744
745         clk = xgene_register_clk(NULL, clk_name,
746                 of_clk_get_parent_name(np, 0), &parameters, &clk_lock);
747         if (IS_ERR(clk))
748                 goto err;
749         pr_debug("Add %s clock\n", clk_name);
750         rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
751         if (rc != 0)
752                 pr_err("%s: could register provider clk %s\n", __func__,
753                         np->full_name);
754
755         return;
756
757 err:
758         if (parameters.csr_reg)
759                 iounmap(parameters.csr_reg);
760         if (parameters.divider_reg)
761                 iounmap(parameters.divider_reg);
762 }
763
764 CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
765 CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
766 CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
767 CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
768                xgene_socpllclk_init);
769 CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
770                xgene_pcppllclk_init);
771 CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);