531b030d4d4e7392cd827b9c9f40f5293d59b1f2
[sfrench/cifs-2.6.git] / drivers / clk / clk-xgene.c
1 /*
2  * clk-xgene.c - AppliedMicro X-Gene Clock Interface
3  *
4  * Copyright (c) 2013, Applied Micro Circuits Corporation
5  * Author: Loc Ho <lho@apm.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20  * MA 02111-1307 USA
21  *
22  */
23 #include <linux/module.h>
24 #include <linux/spinlock.h>
25 #include <linux/io.h>
26 #include <linux/of.h>
27 #include <linux/clkdev.h>
28 #include <linux/clk-provider.h>
29 #include <linux/of_address.h>
30
31 /* Register SCU_PCPPLL bit fields */
32 #define N_DIV_RD(src)                   ((src) & 0x000001ff)
33 #define SC_N_DIV_RD(src)                ((src) & 0x0000007f)
34 #define SC_OUTDIV2(src)                 (((src) & 0x00000100) >> 8)
35
36 /* Register SCU_SOCPLL bit fields */
37 #define CLKR_RD(src)                    (((src) & 0x07000000)>>24)
38 #define CLKOD_RD(src)                   (((src) & 0x00300000)>>20)
39 #define REGSPEC_RESET_F1_MASK           0x00010000
40 #define CLKF_RD(src)                    (((src) & 0x000001ff))
41
42 #define XGENE_CLK_DRIVER_VER            "0.1"
43
44 static DEFINE_SPINLOCK(clk_lock);
45
46 static inline u32 xgene_clk_read(void __iomem *csr)
47 {
48         return readl_relaxed(csr);
49 }
50
51 static inline void xgene_clk_write(u32 data, void __iomem *csr)
52 {
53         writel_relaxed(data, csr);
54 }
55
56 /* PLL Clock */
57 enum xgene_pll_type {
58         PLL_TYPE_PCP = 0,
59         PLL_TYPE_SOC = 1,
60 };
61
62 struct xgene_clk_pll {
63         struct clk_hw   hw;
64         void __iomem    *reg;
65         spinlock_t      *lock;
66         u32             pll_offset;
67         enum xgene_pll_type     type;
68         int             version;
69 };
70
71 #define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
72
73 static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
74 {
75         struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
76         u32 data;
77
78         data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
79         pr_debug("%s pll %s\n", clk_hw_get_name(hw),
80                 data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
81
82         return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
83 }
84
85 static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
86                                 unsigned long parent_rate)
87 {
88         struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
89         unsigned long fref;
90         unsigned long fvco;
91         u32 pll;
92         u32 nref;
93         u32 nout;
94         u32 nfb;
95
96         pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
97
98         if (pllclk->version <= 1) {
99                 if (pllclk->type == PLL_TYPE_PCP) {
100                         /*
101                         * PLL VCO = Reference clock * NF
102                         * PCP PLL = PLL_VCO / 2
103                         */
104                         nout = 2;
105                         fvco = parent_rate * (N_DIV_RD(pll) + 4);
106                 } else {
107                         /*
108                         * Fref = Reference Clock / NREF;
109                         * Fvco = Fref * NFB;
110                         * Fout = Fvco / NOUT;
111                         */
112                         nref = CLKR_RD(pll) + 1;
113                         nout = CLKOD_RD(pll) + 1;
114                         nfb = CLKF_RD(pll);
115                         fref = parent_rate / nref;
116                         fvco = fref * nfb;
117                 }
118         } else {
119                 /*
120                  * fvco = Reference clock * FBDIVC
121                  * PLL freq = fvco / NOUT
122                  */
123                 nout = SC_OUTDIV2(pll) ? 2 : 3;
124                 fvco = parent_rate * SC_N_DIV_RD(pll);
125         }
126         pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
127                  clk_hw_get_name(hw), fvco / nout, parent_rate,
128                  pllclk->version);
129
130         return fvco / nout;
131 }
132
133 static const struct clk_ops xgene_clk_pll_ops = {
134         .is_enabled = xgene_clk_pll_is_enabled,
135         .recalc_rate = xgene_clk_pll_recalc_rate,
136 };
137
138 static struct clk *xgene_register_clk_pll(struct device *dev,
139         const char *name, const char *parent_name,
140         unsigned long flags, void __iomem *reg, u32 pll_offset,
141         u32 type, spinlock_t *lock, int version)
142 {
143         struct xgene_clk_pll *apmclk;
144         struct clk *clk;
145         struct clk_init_data init;
146
147         /* allocate the APM clock structure */
148         apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
149         if (!apmclk)
150                 return ERR_PTR(-ENOMEM);
151
152         init.name = name;
153         init.ops = &xgene_clk_pll_ops;
154         init.flags = flags;
155         init.parent_names = parent_name ? &parent_name : NULL;
156         init.num_parents = parent_name ? 1 : 0;
157
158         apmclk->version = version;
159         apmclk->reg = reg;
160         apmclk->lock = lock;
161         apmclk->pll_offset = pll_offset;
162         apmclk->type = type;
163         apmclk->hw.init = &init;
164
165         /* Register the clock */
166         clk = clk_register(dev, &apmclk->hw);
167         if (IS_ERR(clk)) {
168                 pr_err("%s: could not register clk %s\n", __func__, name);
169                 kfree(apmclk);
170                 return NULL;
171         }
172         return clk;
173 }
174
175 static int xgene_pllclk_version(struct device_node *np)
176 {
177         if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
178                 return 1;
179         if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
180                 return 1;
181         return 2;
182 }
183
184 static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
185 {
186         const char *clk_name = np->full_name;
187         struct clk *clk;
188         void __iomem *reg;
189         int version = xgene_pllclk_version(np);
190
191         reg = of_iomap(np, 0);
192         if (!reg) {
193                 pr_err("Unable to map CSR register for %pOF\n", np);
194                 return;
195         }
196         of_property_read_string(np, "clock-output-names", &clk_name);
197         clk = xgene_register_clk_pll(NULL,
198                         clk_name, of_clk_get_parent_name(np, 0),
199                         0, reg, 0, pll_type, &clk_lock,
200                         version);
201         if (!IS_ERR(clk)) {
202                 of_clk_add_provider(np, of_clk_src_simple_get, clk);
203                 clk_register_clkdev(clk, clk_name, NULL);
204                 pr_debug("Add %s clock PLL\n", clk_name);
205         }
206 }
207
208 static void xgene_socpllclk_init(struct device_node *np)
209 {
210         xgene_pllclk_init(np, PLL_TYPE_SOC);
211 }
212
213 static void xgene_pcppllclk_init(struct device_node *np)
214 {
215         xgene_pllclk_init(np, PLL_TYPE_PCP);
216 }
217
218 /**
219  * struct xgene_clk_pmd - PMD clock
220  *
221  * @hw:         handle between common and hardware-specific interfaces
222  * @reg:        register containing the fractional scale multiplier (scaler)
223  * @shift:      shift to the unit bit field
224  * @denom:      1/denominator unit
225  * @lock:       register lock
226  * Flags:
227  * XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
228  *      from the register plus one. For example,
229  *              0 for (0 + 1) / denom,
230  *              1 for (1 + 1) / denom and etc.
231  *      If this flag is set, it is
232  *              0 for (denom - 0) / denom,
233  *              1 for (denom - 1) / denom and etc.
234  *
235  */
236 struct xgene_clk_pmd {
237         struct clk_hw   hw;
238         void __iomem    *reg;
239         u8              shift;
240         u32             mask;
241         u64             denom;
242         u32             flags;
243         spinlock_t      *lock;
244 };
245
246 #define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
247
248 #define XGENE_CLK_PMD_SCALE_INVERTED    BIT(0)
249 #define XGENE_CLK_PMD_SHIFT             8
250 #define XGENE_CLK_PMD_WIDTH             3
251
252 static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
253                                                unsigned long parent_rate)
254 {
255         struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
256         unsigned long flags = 0;
257         u64 ret, scale;
258         u32 val;
259
260         if (fd->lock)
261                 spin_lock_irqsave(fd->lock, flags);
262         else
263                 __acquire(fd->lock);
264
265         val = clk_readl(fd->reg);
266
267         if (fd->lock)
268                 spin_unlock_irqrestore(fd->lock, flags);
269         else
270                 __release(fd->lock);
271
272         ret = (u64)parent_rate;
273
274         scale = (val & fd->mask) >> fd->shift;
275         if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
276                 scale = fd->denom - scale;
277         else
278                 scale++;
279
280         /* freq = parent_rate * scaler / denom */
281         do_div(ret, fd->denom);
282         ret *= scale;
283         if (ret == 0)
284                 ret = (u64)parent_rate;
285
286         return ret;
287 }
288
289 static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
290                                      unsigned long *parent_rate)
291 {
292         struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
293         u64 ret, scale;
294
295         if (!rate || rate >= *parent_rate)
296                 return *parent_rate;
297
298         /* freq = parent_rate * scaler / denom */
299         ret = rate * fd->denom;
300         scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
301
302         ret = (u64)*parent_rate * scale;
303         do_div(ret, fd->denom);
304
305         return ret;
306 }
307
308 static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
309                                   unsigned long parent_rate)
310 {
311         struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
312         unsigned long flags = 0;
313         u64 scale, ret;
314         u32 val;
315
316         /*
317          * Compute the scaler:
318          *
319          * freq = parent_rate * scaler / denom, or
320          * scaler = freq * denom / parent_rate
321          */
322         ret = rate * fd->denom;
323         scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
324
325         /* Check if inverted */
326         if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
327                 scale = fd->denom - scale;
328         else
329                 scale--;
330
331         if (fd->lock)
332                 spin_lock_irqsave(fd->lock, flags);
333         else
334                 __acquire(fd->lock);
335
336         val = clk_readl(fd->reg);
337         val &= ~fd->mask;
338         val |= (scale << fd->shift);
339         clk_writel(val, fd->reg);
340
341         if (fd->lock)
342                 spin_unlock_irqrestore(fd->lock, flags);
343         else
344                 __release(fd->lock);
345
346         return 0;
347 }
348
349 static const struct clk_ops xgene_clk_pmd_ops = {
350         .recalc_rate = xgene_clk_pmd_recalc_rate,
351         .round_rate = xgene_clk_pmd_round_rate,
352         .set_rate = xgene_clk_pmd_set_rate,
353 };
354
355 static struct clk *
356 xgene_register_clk_pmd(struct device *dev,
357                        const char *name, const char *parent_name,
358                        unsigned long flags, void __iomem *reg, u8 shift,
359                        u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
360 {
361         struct xgene_clk_pmd *fd;
362         struct clk_init_data init;
363         struct clk *clk;
364
365         fd = kzalloc(sizeof(*fd), GFP_KERNEL);
366         if (!fd)
367                 return ERR_PTR(-ENOMEM);
368
369         init.name = name;
370         init.ops = &xgene_clk_pmd_ops;
371         init.flags = flags;
372         init.parent_names = parent_name ? &parent_name : NULL;
373         init.num_parents = parent_name ? 1 : 0;
374
375         fd->reg = reg;
376         fd->shift = shift;
377         fd->mask = (BIT(width) - 1) << shift;
378         fd->denom = denom;
379         fd->flags = clk_flags;
380         fd->lock = lock;
381         fd->hw.init = &init;
382
383         clk = clk_register(dev, &fd->hw);
384         if (IS_ERR(clk)) {
385                 pr_err("%s: could not register clk %s\n", __func__, name);
386                 kfree(fd);
387                 return NULL;
388         }
389
390         return clk;
391 }
392
393 static void xgene_pmdclk_init(struct device_node *np)
394 {
395         const char *clk_name = np->full_name;
396         void __iomem *csr_reg;
397         struct resource res;
398         struct clk *clk;
399         u64 denom;
400         u32 flags = 0;
401         int rc;
402
403         /* Check if the entry is disabled */
404         if (!of_device_is_available(np))
405                 return;
406
407         /* Parse the DTS register for resource */
408         rc = of_address_to_resource(np, 0, &res);
409         if (rc != 0) {
410                 pr_err("no DTS register for %pOF\n", np);
411                 return;
412         }
413         csr_reg = of_iomap(np, 0);
414         if (!csr_reg) {
415                 pr_err("Unable to map resource for %pOF\n", np);
416                 return;
417         }
418         of_property_read_string(np, "clock-output-names", &clk_name);
419
420         denom = BIT(XGENE_CLK_PMD_WIDTH);
421         flags |= XGENE_CLK_PMD_SCALE_INVERTED;
422
423         clk = xgene_register_clk_pmd(NULL, clk_name,
424                                      of_clk_get_parent_name(np, 0), 0,
425                                      csr_reg, XGENE_CLK_PMD_SHIFT,
426                                      XGENE_CLK_PMD_WIDTH, denom,
427                                      flags, &clk_lock);
428         if (!IS_ERR(clk)) {
429                 of_clk_add_provider(np, of_clk_src_simple_get, clk);
430                 clk_register_clkdev(clk, clk_name, NULL);
431                 pr_debug("Add %s clock\n", clk_name);
432         } else {
433                 if (csr_reg)
434                         iounmap(csr_reg);
435         }
436 }
437
438 /* IP Clock */
439 struct xgene_dev_parameters {
440         void __iomem *csr_reg;          /* CSR for IP clock */
441         u32 reg_clk_offset;             /* Offset to clock enable CSR */
442         u32 reg_clk_mask;               /* Mask bit for clock enable */
443         u32 reg_csr_offset;             /* Offset to CSR reset */
444         u32 reg_csr_mask;               /* Mask bit for disable CSR reset */
445         void __iomem *divider_reg;      /* CSR for divider */
446         u32 reg_divider_offset;         /* Offset to divider register */
447         u32 reg_divider_shift;          /* Bit shift to divider field */
448         u32 reg_divider_width;          /* Width of the bit to divider field */
449 };
450
451 struct xgene_clk {
452         struct clk_hw   hw;
453         spinlock_t      *lock;
454         struct xgene_dev_parameters     param;
455 };
456
457 #define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
458
459 static int xgene_clk_enable(struct clk_hw *hw)
460 {
461         struct xgene_clk *pclk = to_xgene_clk(hw);
462         unsigned long flags = 0;
463         u32 data;
464
465         if (pclk->lock)
466                 spin_lock_irqsave(pclk->lock, flags);
467
468         if (pclk->param.csr_reg) {
469                 pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
470                 /* First enable the clock */
471                 data = xgene_clk_read(pclk->param.csr_reg +
472                                         pclk->param.reg_clk_offset);
473                 data |= pclk->param.reg_clk_mask;
474                 xgene_clk_write(data, pclk->param.csr_reg +
475                                         pclk->param.reg_clk_offset);
476                 pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
477                         clk_hw_get_name(hw),
478                         pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
479                         data);
480
481                 /* Second enable the CSR */
482                 data = xgene_clk_read(pclk->param.csr_reg +
483                                         pclk->param.reg_csr_offset);
484                 data &= ~pclk->param.reg_csr_mask;
485                 xgene_clk_write(data, pclk->param.csr_reg +
486                                         pclk->param.reg_csr_offset);
487                 pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
488                         clk_hw_get_name(hw),
489                         pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
490                         data);
491         }
492
493         if (pclk->lock)
494                 spin_unlock_irqrestore(pclk->lock, flags);
495
496         return 0;
497 }
498
499 static void xgene_clk_disable(struct clk_hw *hw)
500 {
501         struct xgene_clk *pclk = to_xgene_clk(hw);
502         unsigned long flags = 0;
503         u32 data;
504
505         if (pclk->lock)
506                 spin_lock_irqsave(pclk->lock, flags);
507
508         if (pclk->param.csr_reg) {
509                 pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
510                 /* First put the CSR in reset */
511                 data = xgene_clk_read(pclk->param.csr_reg +
512                                         pclk->param.reg_csr_offset);
513                 data |= pclk->param.reg_csr_mask;
514                 xgene_clk_write(data, pclk->param.csr_reg +
515                                         pclk->param.reg_csr_offset);
516
517                 /* Second disable the clock */
518                 data = xgene_clk_read(pclk->param.csr_reg +
519                                         pclk->param.reg_clk_offset);
520                 data &= ~pclk->param.reg_clk_mask;
521                 xgene_clk_write(data, pclk->param.csr_reg +
522                                         pclk->param.reg_clk_offset);
523         }
524
525         if (pclk->lock)
526                 spin_unlock_irqrestore(pclk->lock, flags);
527 }
528
529 static int xgene_clk_is_enabled(struct clk_hw *hw)
530 {
531         struct xgene_clk *pclk = to_xgene_clk(hw);
532         u32 data = 0;
533
534         if (pclk->param.csr_reg) {
535                 pr_debug("%s clock checking\n", clk_hw_get_name(hw));
536                 data = xgene_clk_read(pclk->param.csr_reg +
537                                         pclk->param.reg_clk_offset);
538                 pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
539                         data & pclk->param.reg_clk_mask ? "enabled" :
540                                                         "disabled");
541         }
542
543         if (!pclk->param.csr_reg)
544                 return 1;
545         return data & pclk->param.reg_clk_mask ? 1 : 0;
546 }
547
548 static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
549                                 unsigned long parent_rate)
550 {
551         struct xgene_clk *pclk = to_xgene_clk(hw);
552         u32 data;
553
554         if (pclk->param.divider_reg) {
555                 data = xgene_clk_read(pclk->param.divider_reg +
556                                         pclk->param.reg_divider_offset);
557                 data >>= pclk->param.reg_divider_shift;
558                 data &= (1 << pclk->param.reg_divider_width) - 1;
559
560                 pr_debug("%s clock recalc rate %ld parent %ld\n",
561                         clk_hw_get_name(hw),
562                         parent_rate / data, parent_rate);
563
564                 return parent_rate / data;
565         } else {
566                 pr_debug("%s clock recalc rate %ld parent %ld\n",
567                         clk_hw_get_name(hw), parent_rate, parent_rate);
568                 return parent_rate;
569         }
570 }
571
572 static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
573                                 unsigned long parent_rate)
574 {
575         struct xgene_clk *pclk = to_xgene_clk(hw);
576         unsigned long flags = 0;
577         u32 data;
578         u32 divider;
579         u32 divider_save;
580
581         if (pclk->lock)
582                 spin_lock_irqsave(pclk->lock, flags);
583
584         if (pclk->param.divider_reg) {
585                 /* Let's compute the divider */
586                 if (rate > parent_rate)
587                         rate = parent_rate;
588                 divider_save = divider = parent_rate / rate; /* Rounded down */
589                 divider &= (1 << pclk->param.reg_divider_width) - 1;
590                 divider <<= pclk->param.reg_divider_shift;
591
592                 /* Set new divider */
593                 data = xgene_clk_read(pclk->param.divider_reg +
594                                 pclk->param.reg_divider_offset);
595                 data &= ~(((1 << pclk->param.reg_divider_width) - 1)
596                                 << pclk->param.reg_divider_shift);
597                 data |= divider;
598                 xgene_clk_write(data, pclk->param.divider_reg +
599                                         pclk->param.reg_divider_offset);
600                 pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
601                         parent_rate / divider_save);
602         } else {
603                 divider_save = 1;
604         }
605
606         if (pclk->lock)
607                 spin_unlock_irqrestore(pclk->lock, flags);
608
609         return parent_rate / divider_save;
610 }
611
612 static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
613                                 unsigned long *prate)
614 {
615         struct xgene_clk *pclk = to_xgene_clk(hw);
616         unsigned long parent_rate = *prate;
617         u32 divider;
618
619         if (pclk->param.divider_reg) {
620                 /* Let's compute the divider */
621                 if (rate > parent_rate)
622                         rate = parent_rate;
623                 divider = parent_rate / rate;   /* Rounded down */
624         } else {
625                 divider = 1;
626         }
627
628         return parent_rate / divider;
629 }
630
631 static const struct clk_ops xgene_clk_ops = {
632         .enable = xgene_clk_enable,
633         .disable = xgene_clk_disable,
634         .is_enabled = xgene_clk_is_enabled,
635         .recalc_rate = xgene_clk_recalc_rate,
636         .set_rate = xgene_clk_set_rate,
637         .round_rate = xgene_clk_round_rate,
638 };
639
640 static struct clk *xgene_register_clk(struct device *dev,
641                 const char *name, const char *parent_name,
642                 struct xgene_dev_parameters *parameters, spinlock_t *lock)
643 {
644         struct xgene_clk *apmclk;
645         struct clk *clk;
646         struct clk_init_data init;
647         int rc;
648
649         /* allocate the APM clock structure */
650         apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
651         if (!apmclk)
652                 return ERR_PTR(-ENOMEM);
653
654         init.name = name;
655         init.ops = &xgene_clk_ops;
656         init.flags = 0;
657         init.parent_names = parent_name ? &parent_name : NULL;
658         init.num_parents = parent_name ? 1 : 0;
659
660         apmclk->lock = lock;
661         apmclk->hw.init = &init;
662         apmclk->param = *parameters;
663
664         /* Register the clock */
665         clk = clk_register(dev, &apmclk->hw);
666         if (IS_ERR(clk)) {
667                 pr_err("%s: could not register clk %s\n", __func__, name);
668                 kfree(apmclk);
669                 return clk;
670         }
671
672         /* Register the clock for lookup */
673         rc = clk_register_clkdev(clk, name, NULL);
674         if (rc != 0) {
675                 pr_err("%s: could not register lookup clk %s\n",
676                         __func__, name);
677         }
678         return clk;
679 }
680
681 static void __init xgene_devclk_init(struct device_node *np)
682 {
683         const char *clk_name = np->full_name;
684         struct clk *clk;
685         struct resource res;
686         int rc;
687         struct xgene_dev_parameters parameters;
688         int i;
689
690         /* Check if the entry is disabled */
691         if (!of_device_is_available(np))
692                 return;
693
694         /* Parse the DTS register for resource */
695         parameters.csr_reg = NULL;
696         parameters.divider_reg = NULL;
697         for (i = 0; i < 2; i++) {
698                 void __iomem *map_res;
699                 rc = of_address_to_resource(np, i, &res);
700                 if (rc != 0) {
701                         if (i == 0) {
702                                 pr_err("no DTS register for %pOF\n", np);
703                                 return;
704                         }
705                         break;
706                 }
707                 map_res = of_iomap(np, i);
708                 if (!map_res) {
709                         pr_err("Unable to map resource %d for %pOF\n", i, np);
710                         goto err;
711                 }
712                 if (strcmp(res.name, "div-reg") == 0)
713                         parameters.divider_reg = map_res;
714                 else /* if (strcmp(res->name, "csr-reg") == 0) */
715                         parameters.csr_reg = map_res;
716         }
717         if (of_property_read_u32(np, "csr-offset", &parameters.reg_csr_offset))
718                 parameters.reg_csr_offset = 0;
719         if (of_property_read_u32(np, "csr-mask", &parameters.reg_csr_mask))
720                 parameters.reg_csr_mask = 0xF;
721         if (of_property_read_u32(np, "enable-offset",
722                                 &parameters.reg_clk_offset))
723                 parameters.reg_clk_offset = 0x8;
724         if (of_property_read_u32(np, "enable-mask", &parameters.reg_clk_mask))
725                 parameters.reg_clk_mask = 0xF;
726         if (of_property_read_u32(np, "divider-offset",
727                                 &parameters.reg_divider_offset))
728                 parameters.reg_divider_offset = 0;
729         if (of_property_read_u32(np, "divider-width",
730                                 &parameters.reg_divider_width))
731                 parameters.reg_divider_width = 0;
732         if (of_property_read_u32(np, "divider-shift",
733                                 &parameters.reg_divider_shift))
734                 parameters.reg_divider_shift = 0;
735         of_property_read_string(np, "clock-output-names", &clk_name);
736
737         clk = xgene_register_clk(NULL, clk_name,
738                 of_clk_get_parent_name(np, 0), &parameters, &clk_lock);
739         if (IS_ERR(clk))
740                 goto err;
741         pr_debug("Add %s clock\n", clk_name);
742         rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
743         if (rc != 0)
744                 pr_err("%s: could register provider clk %pOF\n", __func__, np);
745
746         return;
747
748 err:
749         if (parameters.csr_reg)
750                 iounmap(parameters.csr_reg);
751         if (parameters.divider_reg)
752                 iounmap(parameters.divider_reg);
753 }
754
755 CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
756 CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
757 CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
758 CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
759                xgene_socpllclk_init);
760 CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
761                xgene_pcppllclk_init);
762 CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);