treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
[sfrench/cifs-2.6.git] / drivers / clk / ingenic / cgu.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/math64.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include "cgu.h"
21
22 #define MHZ (1000 * 1000)
23
24 /**
25  * ingenic_cgu_gate_get() - get the value of clock gate register bit
26  * @cgu: reference to the CGU whose registers should be read
27  * @info: info struct describing the gate bit
28  *
29  * Retrieves the state of the clock gate bit described by info. The
30  * caller must hold cgu->lock.
31  *
32  * Return: true if the gate bit is set, else false.
33  */
34 static inline bool
35 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
36                      const struct ingenic_cgu_gate_info *info)
37 {
38         return !!(readl(cgu->base + info->reg) & BIT(info->bit))
39                 ^ info->clear_to_gate;
40 }
41
42 /**
43  * ingenic_cgu_gate_set() - set the value of clock gate register bit
44  * @cgu: reference to the CGU whose registers should be modified
45  * @info: info struct describing the gate bit
46  * @val: non-zero to gate a clock, otherwise zero
47  *
48  * Sets the given gate bit in order to gate or ungate a clock.
49  *
50  * The caller must hold cgu->lock.
51  */
52 static inline void
53 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
54                      const struct ingenic_cgu_gate_info *info, bool val)
55 {
56         u32 clkgr = readl(cgu->base + info->reg);
57
58         if (val ^ info->clear_to_gate)
59                 clkgr |= BIT(info->bit);
60         else
61                 clkgr &= ~BIT(info->bit);
62
63         writel(clkgr, cgu->base + info->reg);
64 }
65
66 /*
67  * PLL operations
68  */
69
70 static unsigned long
71 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
72 {
73         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
74         struct ingenic_cgu *cgu = ingenic_clk->cgu;
75         const struct ingenic_cgu_clk_info *clk_info;
76         const struct ingenic_cgu_pll_info *pll_info;
77         unsigned m, n, od_enc, od;
78         bool bypass;
79         unsigned long flags;
80         u32 ctl;
81
82         clk_info = &cgu->clock_info[ingenic_clk->idx];
83         BUG_ON(clk_info->type != CGU_CLK_PLL);
84         pll_info = &clk_info->pll;
85
86         spin_lock_irqsave(&cgu->lock, flags);
87         ctl = readl(cgu->base + pll_info->reg);
88         spin_unlock_irqrestore(&cgu->lock, flags);
89
90         m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
91         m += pll_info->m_offset;
92         n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
93         n += pll_info->n_offset;
94         od_enc = ctl >> pll_info->od_shift;
95         od_enc &= GENMASK(pll_info->od_bits - 1, 0);
96         bypass = !pll_info->no_bypass_bit &&
97                  !!(ctl & BIT(pll_info->bypass_bit));
98
99         if (bypass)
100                 return parent_rate;
101
102         for (od = 0; od < pll_info->od_max; od++) {
103                 if (pll_info->od_encoding[od] == od_enc)
104                         break;
105         }
106         BUG_ON(od == pll_info->od_max);
107         od++;
108
109         return div_u64((u64)parent_rate * m, n * od);
110 }
111
112 static unsigned long
113 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
114                  unsigned long rate, unsigned long parent_rate,
115                  unsigned *pm, unsigned *pn, unsigned *pod)
116 {
117         const struct ingenic_cgu_pll_info *pll_info;
118         unsigned m, n, od;
119
120         pll_info = &clk_info->pll;
121         od = 1;
122
123         /*
124          * The frequency after the input divider must be between 10 and 50 MHz.
125          * The highest divider yields the best resolution.
126          */
127         n = parent_rate / (10 * MHZ);
128         n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
129         n = max_t(unsigned, n, pll_info->n_offset);
130
131         m = (rate / MHZ) * od * n / (parent_rate / MHZ);
132         m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
133         m = max_t(unsigned, m, pll_info->m_offset);
134
135         if (pm)
136                 *pm = m;
137         if (pn)
138                 *pn = n;
139         if (pod)
140                 *pod = od;
141
142         return div_u64((u64)parent_rate * m, n * od);
143 }
144
145 static inline const struct ingenic_cgu_clk_info *to_clk_info(
146                 struct ingenic_clk *ingenic_clk)
147 {
148         struct ingenic_cgu *cgu = ingenic_clk->cgu;
149         const struct ingenic_cgu_clk_info *clk_info;
150
151         clk_info = &cgu->clock_info[ingenic_clk->idx];
152         BUG_ON(clk_info->type != CGU_CLK_PLL);
153
154         return clk_info;
155 }
156
157 static long
158 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
159                        unsigned long *prate)
160 {
161         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
162         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
163
164         return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
165 }
166
167 static int
168 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
169                      unsigned long parent_rate)
170 {
171         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
172         struct ingenic_cgu *cgu = ingenic_clk->cgu;
173         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
174         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
175         unsigned long rate, flags;
176         unsigned int m, n, od;
177         u32 ctl;
178
179         rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
180                                &m, &n, &od);
181         if (rate != req_rate)
182                 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
183                         clk_info->name, req_rate, rate);
184
185         spin_lock_irqsave(&cgu->lock, flags);
186         ctl = readl(cgu->base + pll_info->reg);
187
188         ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
189         ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
190
191         ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
192         ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
193
194         ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
195         ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
196
197         writel(ctl, cgu->base + pll_info->reg);
198         spin_unlock_irqrestore(&cgu->lock, flags);
199
200         return 0;
201 }
202
203 static int ingenic_pll_enable(struct clk_hw *hw)
204 {
205         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
206         struct ingenic_cgu *cgu = ingenic_clk->cgu;
207         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
208         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
209         const unsigned int timeout = 100;
210         unsigned long flags;
211         unsigned int i;
212         u32 ctl;
213
214         spin_lock_irqsave(&cgu->lock, flags);
215         ctl = readl(cgu->base + pll_info->reg);
216
217         ctl &= ~BIT(pll_info->bypass_bit);
218         ctl |= BIT(pll_info->enable_bit);
219
220         writel(ctl, cgu->base + pll_info->reg);
221
222         /* wait for the PLL to stabilise */
223         for (i = 0; i < timeout; i++) {
224                 ctl = readl(cgu->base + pll_info->reg);
225                 if (ctl & BIT(pll_info->stable_bit))
226                         break;
227                 mdelay(1);
228         }
229
230         spin_unlock_irqrestore(&cgu->lock, flags);
231
232         if (i == timeout)
233                 return -EBUSY;
234
235         return 0;
236 }
237
238 static void ingenic_pll_disable(struct clk_hw *hw)
239 {
240         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
241         struct ingenic_cgu *cgu = ingenic_clk->cgu;
242         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
243         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
244         unsigned long flags;
245         u32 ctl;
246
247         spin_lock_irqsave(&cgu->lock, flags);
248         ctl = readl(cgu->base + pll_info->reg);
249
250         ctl &= ~BIT(pll_info->enable_bit);
251
252         writel(ctl, cgu->base + pll_info->reg);
253         spin_unlock_irqrestore(&cgu->lock, flags);
254 }
255
256 static int ingenic_pll_is_enabled(struct clk_hw *hw)
257 {
258         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
259         struct ingenic_cgu *cgu = ingenic_clk->cgu;
260         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
261         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
262         unsigned long flags;
263         u32 ctl;
264
265         spin_lock_irqsave(&cgu->lock, flags);
266         ctl = readl(cgu->base + pll_info->reg);
267         spin_unlock_irqrestore(&cgu->lock, flags);
268
269         return !!(ctl & BIT(pll_info->enable_bit));
270 }
271
272 static const struct clk_ops ingenic_pll_ops = {
273         .recalc_rate = ingenic_pll_recalc_rate,
274         .round_rate = ingenic_pll_round_rate,
275         .set_rate = ingenic_pll_set_rate,
276
277         .enable = ingenic_pll_enable,
278         .disable = ingenic_pll_disable,
279         .is_enabled = ingenic_pll_is_enabled,
280 };
281
282 /*
283  * Operations for all non-PLL clocks
284  */
285
286 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
287 {
288         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
289         struct ingenic_cgu *cgu = ingenic_clk->cgu;
290         const struct ingenic_cgu_clk_info *clk_info;
291         u32 reg;
292         u8 i, hw_idx, idx = 0;
293
294         clk_info = &cgu->clock_info[ingenic_clk->idx];
295
296         if (clk_info->type & CGU_CLK_MUX) {
297                 reg = readl(cgu->base + clk_info->mux.reg);
298                 hw_idx = (reg >> clk_info->mux.shift) &
299                          GENMASK(clk_info->mux.bits - 1, 0);
300
301                 /*
302                  * Convert the hardware index to the parent index by skipping
303                  * over any -1's in the parents array.
304                  */
305                 for (i = 0; i < hw_idx; i++) {
306                         if (clk_info->parents[i] != -1)
307                                 idx++;
308                 }
309         }
310
311         return idx;
312 }
313
314 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
315 {
316         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
317         struct ingenic_cgu *cgu = ingenic_clk->cgu;
318         const struct ingenic_cgu_clk_info *clk_info;
319         unsigned long flags;
320         u8 curr_idx, hw_idx, num_poss;
321         u32 reg, mask;
322
323         clk_info = &cgu->clock_info[ingenic_clk->idx];
324
325         if (clk_info->type & CGU_CLK_MUX) {
326                 /*
327                  * Convert the parent index to the hardware index by adding
328                  * 1 for any -1 in the parents array preceding the given
329                  * index. That is, we want the index of idx'th entry in
330                  * clk_info->parents which does not equal -1.
331                  */
332                 hw_idx = curr_idx = 0;
333                 num_poss = 1 << clk_info->mux.bits;
334                 for (; hw_idx < num_poss; hw_idx++) {
335                         if (clk_info->parents[hw_idx] == -1)
336                                 continue;
337                         if (curr_idx == idx)
338                                 break;
339                         curr_idx++;
340                 }
341
342                 /* idx should always be a valid parent */
343                 BUG_ON(curr_idx != idx);
344
345                 mask = GENMASK(clk_info->mux.bits - 1, 0);
346                 mask <<= clk_info->mux.shift;
347
348                 spin_lock_irqsave(&cgu->lock, flags);
349
350                 /* write the register */
351                 reg = readl(cgu->base + clk_info->mux.reg);
352                 reg &= ~mask;
353                 reg |= hw_idx << clk_info->mux.shift;
354                 writel(reg, cgu->base + clk_info->mux.reg);
355
356                 spin_unlock_irqrestore(&cgu->lock, flags);
357                 return 0;
358         }
359
360         return idx ? -EINVAL : 0;
361 }
362
363 static unsigned long
364 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
365 {
366         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
367         struct ingenic_cgu *cgu = ingenic_clk->cgu;
368         const struct ingenic_cgu_clk_info *clk_info;
369         unsigned long rate = parent_rate;
370         u32 div_reg, div;
371
372         clk_info = &cgu->clock_info[ingenic_clk->idx];
373
374         if (clk_info->type & CGU_CLK_DIV) {
375                 div_reg = readl(cgu->base + clk_info->div.reg);
376                 div = (div_reg >> clk_info->div.shift) &
377                       GENMASK(clk_info->div.bits - 1, 0);
378                 div += 1;
379                 div *= clk_info->div.div;
380
381                 rate /= div;
382         } else if (clk_info->type & CGU_CLK_FIXDIV) {
383                 rate /= clk_info->fixdiv.div;
384         }
385
386         return rate;
387 }
388
389 static unsigned
390 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
391                      unsigned long parent_rate, unsigned long req_rate)
392 {
393         unsigned div;
394
395         /* calculate the divide */
396         div = DIV_ROUND_UP(parent_rate, req_rate);
397
398         /* and impose hardware constraints */
399         div = min_t(unsigned, div, 1 << clk_info->div.bits);
400         div = max_t(unsigned, div, 1);
401
402         /*
403          * If the divider value itself must be divided before being written to
404          * the divider register, we must ensure we don't have any bits set that
405          * would be lost as a result of doing so.
406          */
407         div /= clk_info->div.div;
408         div *= clk_info->div.div;
409
410         return div;
411 }
412
413 static long
414 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
415                        unsigned long *parent_rate)
416 {
417         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
418         struct ingenic_cgu *cgu = ingenic_clk->cgu;
419         const struct ingenic_cgu_clk_info *clk_info;
420         unsigned int div = 1;
421
422         clk_info = &cgu->clock_info[ingenic_clk->idx];
423
424         if (clk_info->type & CGU_CLK_DIV)
425                 div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
426         else if (clk_info->type & CGU_CLK_FIXDIV)
427                 div = clk_info->fixdiv.div;
428
429         return DIV_ROUND_UP(*parent_rate, div);
430 }
431
432 static int
433 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
434                      unsigned long parent_rate)
435 {
436         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
437         struct ingenic_cgu *cgu = ingenic_clk->cgu;
438         const struct ingenic_cgu_clk_info *clk_info;
439         const unsigned timeout = 100;
440         unsigned long rate, flags;
441         unsigned div, i;
442         u32 reg, mask;
443         int ret = 0;
444
445         clk_info = &cgu->clock_info[ingenic_clk->idx];
446
447         if (clk_info->type & CGU_CLK_DIV) {
448                 div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
449                 rate = DIV_ROUND_UP(parent_rate, div);
450
451                 if (rate != req_rate)
452                         return -EINVAL;
453
454                 spin_lock_irqsave(&cgu->lock, flags);
455                 reg = readl(cgu->base + clk_info->div.reg);
456
457                 /* update the divide */
458                 mask = GENMASK(clk_info->div.bits - 1, 0);
459                 reg &= ~(mask << clk_info->div.shift);
460                 reg |= ((div / clk_info->div.div) - 1) << clk_info->div.shift;
461
462                 /* clear the stop bit */
463                 if (clk_info->div.stop_bit != -1)
464                         reg &= ~BIT(clk_info->div.stop_bit);
465
466                 /* set the change enable bit */
467                 if (clk_info->div.ce_bit != -1)
468                         reg |= BIT(clk_info->div.ce_bit);
469
470                 /* update the hardware */
471                 writel(reg, cgu->base + clk_info->div.reg);
472
473                 /* wait for the change to take effect */
474                 if (clk_info->div.busy_bit != -1) {
475                         for (i = 0; i < timeout; i++) {
476                                 reg = readl(cgu->base + clk_info->div.reg);
477                                 if (!(reg & BIT(clk_info->div.busy_bit)))
478                                         break;
479                                 mdelay(1);
480                         }
481                         if (i == timeout)
482                                 ret = -EBUSY;
483                 }
484
485                 spin_unlock_irqrestore(&cgu->lock, flags);
486                 return ret;
487         }
488
489         return -EINVAL;
490 }
491
492 static int ingenic_clk_enable(struct clk_hw *hw)
493 {
494         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
495         struct ingenic_cgu *cgu = ingenic_clk->cgu;
496         const struct ingenic_cgu_clk_info *clk_info;
497         unsigned long flags;
498
499         clk_info = &cgu->clock_info[ingenic_clk->idx];
500
501         if (clk_info->type & CGU_CLK_GATE) {
502                 /* ungate the clock */
503                 spin_lock_irqsave(&cgu->lock, flags);
504                 ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
505                 spin_unlock_irqrestore(&cgu->lock, flags);
506
507                 if (clk_info->gate.delay_us)
508                         udelay(clk_info->gate.delay_us);
509         }
510
511         return 0;
512 }
513
514 static void ingenic_clk_disable(struct clk_hw *hw)
515 {
516         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
517         struct ingenic_cgu *cgu = ingenic_clk->cgu;
518         const struct ingenic_cgu_clk_info *clk_info;
519         unsigned long flags;
520
521         clk_info = &cgu->clock_info[ingenic_clk->idx];
522
523         if (clk_info->type & CGU_CLK_GATE) {
524                 /* gate the clock */
525                 spin_lock_irqsave(&cgu->lock, flags);
526                 ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
527                 spin_unlock_irqrestore(&cgu->lock, flags);
528         }
529 }
530
531 static int ingenic_clk_is_enabled(struct clk_hw *hw)
532 {
533         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
534         struct ingenic_cgu *cgu = ingenic_clk->cgu;
535         const struct ingenic_cgu_clk_info *clk_info;
536         unsigned long flags;
537         int enabled = 1;
538
539         clk_info = &cgu->clock_info[ingenic_clk->idx];
540
541         if (clk_info->type & CGU_CLK_GATE) {
542                 spin_lock_irqsave(&cgu->lock, flags);
543                 enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
544                 spin_unlock_irqrestore(&cgu->lock, flags);
545         }
546
547         return enabled;
548 }
549
550 static const struct clk_ops ingenic_clk_ops = {
551         .get_parent = ingenic_clk_get_parent,
552         .set_parent = ingenic_clk_set_parent,
553
554         .recalc_rate = ingenic_clk_recalc_rate,
555         .round_rate = ingenic_clk_round_rate,
556         .set_rate = ingenic_clk_set_rate,
557
558         .enable = ingenic_clk_enable,
559         .disable = ingenic_clk_disable,
560         .is_enabled = ingenic_clk_is_enabled,
561 };
562
563 /*
564  * Setup functions.
565  */
566
567 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
568 {
569         const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
570         struct clk_init_data clk_init;
571         struct ingenic_clk *ingenic_clk = NULL;
572         struct clk *clk, *parent;
573         const char *parent_names[4];
574         unsigned caps, i, num_possible;
575         int err = -EINVAL;
576
577         BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
578
579         if (clk_info->type == CGU_CLK_EXT) {
580                 clk = of_clk_get_by_name(cgu->np, clk_info->name);
581                 if (IS_ERR(clk)) {
582                         pr_err("%s: no external clock '%s' provided\n",
583                                __func__, clk_info->name);
584                         err = -ENODEV;
585                         goto out;
586                 }
587                 err = clk_register_clkdev(clk, clk_info->name, NULL);
588                 if (err) {
589                         clk_put(clk);
590                         goto out;
591                 }
592                 cgu->clocks.clks[idx] = clk;
593                 return 0;
594         }
595
596         if (!clk_info->type) {
597                 pr_err("%s: no clock type specified for '%s'\n", __func__,
598                        clk_info->name);
599                 goto out;
600         }
601
602         ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
603         if (!ingenic_clk) {
604                 err = -ENOMEM;
605                 goto out;
606         }
607
608         ingenic_clk->hw.init = &clk_init;
609         ingenic_clk->cgu = cgu;
610         ingenic_clk->idx = idx;
611
612         clk_init.name = clk_info->name;
613         clk_init.flags = 0;
614         clk_init.parent_names = parent_names;
615
616         caps = clk_info->type;
617
618         if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
619                 clk_init.num_parents = 0;
620
621                 if (caps & CGU_CLK_MUX)
622                         num_possible = 1 << clk_info->mux.bits;
623                 else
624                         num_possible = ARRAY_SIZE(clk_info->parents);
625
626                 for (i = 0; i < num_possible; i++) {
627                         if (clk_info->parents[i] == -1)
628                                 continue;
629
630                         parent = cgu->clocks.clks[clk_info->parents[i]];
631                         parent_names[clk_init.num_parents] =
632                                 __clk_get_name(parent);
633                         clk_init.num_parents++;
634                 }
635
636                 BUG_ON(!clk_init.num_parents);
637                 BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
638         } else {
639                 BUG_ON(clk_info->parents[0] == -1);
640                 clk_init.num_parents = 1;
641                 parent = cgu->clocks.clks[clk_info->parents[0]];
642                 parent_names[0] = __clk_get_name(parent);
643         }
644
645         if (caps & CGU_CLK_CUSTOM) {
646                 clk_init.ops = clk_info->custom.clk_ops;
647
648                 caps &= ~CGU_CLK_CUSTOM;
649
650                 if (caps) {
651                         pr_err("%s: custom clock may not be combined with type 0x%x\n",
652                                __func__, caps);
653                         goto out;
654                 }
655         } else if (caps & CGU_CLK_PLL) {
656                 clk_init.ops = &ingenic_pll_ops;
657                 clk_init.flags |= CLK_SET_RATE_GATE;
658
659                 caps &= ~CGU_CLK_PLL;
660
661                 if (caps) {
662                         pr_err("%s: PLL may not be combined with type 0x%x\n",
663                                __func__, caps);
664                         goto out;
665                 }
666         } else {
667                 clk_init.ops = &ingenic_clk_ops;
668         }
669
670         /* nothing to do for gates or fixed dividers */
671         caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
672
673         if (caps & CGU_CLK_MUX) {
674                 if (!(caps & CGU_CLK_MUX_GLITCHFREE))
675                         clk_init.flags |= CLK_SET_PARENT_GATE;
676
677                 caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
678         }
679
680         if (caps & CGU_CLK_DIV) {
681                 caps &= ~CGU_CLK_DIV;
682         } else {
683                 /* pass rate changes to the parent clock */
684                 clk_init.flags |= CLK_SET_RATE_PARENT;
685         }
686
687         if (caps) {
688                 pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
689                 goto out;
690         }
691
692         clk = clk_register(NULL, &ingenic_clk->hw);
693         if (IS_ERR(clk)) {
694                 pr_err("%s: failed to register clock '%s'\n", __func__,
695                        clk_info->name);
696                 err = PTR_ERR(clk);
697                 goto out;
698         }
699
700         err = clk_register_clkdev(clk, clk_info->name, NULL);
701         if (err)
702                 goto out;
703
704         cgu->clocks.clks[idx] = clk;
705 out:
706         if (err)
707                 kfree(ingenic_clk);
708         return err;
709 }
710
711 struct ingenic_cgu *
712 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
713                 unsigned num_clocks, struct device_node *np)
714 {
715         struct ingenic_cgu *cgu;
716
717         cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
718         if (!cgu)
719                 goto err_out;
720
721         cgu->base = of_iomap(np, 0);
722         if (!cgu->base) {
723                 pr_err("%s: failed to map CGU registers\n", __func__);
724                 goto err_out_free;
725         }
726
727         cgu->np = np;
728         cgu->clock_info = clock_info;
729         cgu->clocks.clk_num = num_clocks;
730
731         spin_lock_init(&cgu->lock);
732
733         return cgu;
734
735 err_out_free:
736         kfree(cgu);
737 err_out:
738         return NULL;
739 }
740
741 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
742 {
743         unsigned i;
744         int err;
745
746         cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
747                                    GFP_KERNEL);
748         if (!cgu->clocks.clks) {
749                 err = -ENOMEM;
750                 goto err_out;
751         }
752
753         for (i = 0; i < cgu->clocks.clk_num; i++) {
754                 err = ingenic_register_clock(cgu, i);
755                 if (err)
756                         goto err_out_unregister;
757         }
758
759         err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
760                                   &cgu->clocks);
761         if (err)
762                 goto err_out_unregister;
763
764         return 0;
765
766 err_out_unregister:
767         for (i = 0; i < cgu->clocks.clk_num; i++) {
768                 if (!cgu->clocks.clks[i])
769                         continue;
770                 if (cgu->clock_info[i].type & CGU_CLK_EXT)
771                         clk_put(cgu->clocks.clks[i]);
772                 else
773                         clk_unregister(cgu->clocks.clks[i]);
774         }
775         kfree(cgu->clocks.clks);
776 err_out:
777         return err;
778 }