1 // SPDX-License-Identifier: GPL-2.0
3 * R-Car Gen3 Clock Pulse Generator
5 * Copyright (C) 2015-2018 Glider bvba
6 * Copyright (C) 2019 Renesas Electronics Corp.
8 * Based on clk-rcar-gen3.c
10 * Copyright (C) 2015 Renesas Electronics Corp.
13 #include <linux/bug.h>
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <linux/sys_soc.h>
25 #include "renesas-cpg-mssr.h"
26 #include "rcar-gen3-cpg.h"
28 #define CPG_PLL0CR 0x00d8
29 #define CPG_PLL2CR 0x002c
30 #define CPG_PLL4CR 0x01f4
32 #define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
34 static spinlock_t cpg_lock;
36 static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
41 spin_lock_irqsave(&cpg_lock, flags);
46 spin_unlock_irqrestore(&cpg_lock, flags);
49 struct cpg_simple_notifier {
50 struct notifier_block nb;
55 static int cpg_simple_notifier_call(struct notifier_block *nb,
56 unsigned long action, void *data)
58 struct cpg_simple_notifier *csn =
59 container_of(nb, struct cpg_simple_notifier, nb);
62 case PM_EVENT_SUSPEND:
63 csn->saved = readl(csn->reg);
67 writel(csn->saved, csn->reg);
73 static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
74 struct cpg_simple_notifier *csn)
76 csn->nb.notifier_call = cpg_simple_notifier_call;
77 raw_notifier_chain_register(notifiers, &csn->nb);
83 * Traits of this clock:
84 * prepare - clk_prepare only ensures that parents are prepared
85 * enable - clk_enable only ensures that parents are enabled
86 * rate - rate is adjustable. clk->rate = (parent->rate * mult / 32 ) / 2
87 * parent - fixed parent. No clk_set_parent support
89 #define CPG_FRQCRB 0x00000004
90 #define CPG_FRQCRB_KICK BIT(31)
91 #define CPG_FRQCRC 0x000000e0
96 void __iomem *kick_reg;
98 unsigned int fixed_div;
101 #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
103 static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
104 unsigned long parent_rate)
106 struct cpg_z_clk *zclk = to_z_clk(hw);
110 val = readl(zclk->reg) & zclk->mask;
111 mult = 32 - (val >> __ffs(zclk->mask));
113 return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
114 32 * zclk->fixed_div);
117 static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
118 unsigned long *parent_rate)
120 struct cpg_z_clk *zclk = to_z_clk(hw);
124 prate = *parent_rate / zclk->fixed_div;
125 mult = div_u64(rate * 32ULL, prate);
126 mult = clamp(mult, 1U, 32U);
128 return (u64)prate * mult / 32;
131 static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
132 unsigned long parent_rate)
134 struct cpg_z_clk *zclk = to_z_clk(hw);
138 mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
140 mult = clamp(mult, 1U, 32U);
142 if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
145 cpg_reg_modify(zclk->reg, zclk->mask,
146 ((32 - mult) << __ffs(zclk->mask)) & zclk->mask);
149 * Set KICK bit in FRQCRB to update hardware setting and wait for
150 * clock change completion.
152 cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
155 * Note: There is no HW information about the worst case latency.
157 * Using experimental measurements, it seems that no more than
158 * ~10 iterations are needed, independently of the CPU rate.
159 * Since this value might be dependent of external xtal rate, pll1
160 * rate or even the other emulation clocks rate, use 1000 as a
161 * "super" safe value.
163 for (i = 1000; i; i--) {
164 if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
173 static const struct clk_ops cpg_z_clk_ops = {
174 .recalc_rate = cpg_z_clk_recalc_rate,
175 .round_rate = cpg_z_clk_round_rate,
176 .set_rate = cpg_z_clk_set_rate,
179 static struct clk * __init cpg_z_clk_register(const char *name,
180 const char *parent_name,
185 struct clk_init_data init;
186 struct cpg_z_clk *zclk;
189 zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
191 return ERR_PTR(-ENOMEM);
194 init.ops = &cpg_z_clk_ops;
196 init.parent_names = &parent_name;
197 init.num_parents = 1;
199 zclk->reg = reg + CPG_FRQCRC;
200 zclk->kick_reg = reg + CPG_FRQCRB;
201 zclk->hw.init = &init;
202 zclk->mask = GENMASK(offset + 4, offset);
203 zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
205 clk = clk_register(NULL, &zclk->hw);
215 #define CPG_SD_STP_HCK BIT(9)
216 #define CPG_SD_STP_CK BIT(8)
218 #define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
219 #define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
221 #define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
223 .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
224 ((stp_ck) ? CPG_SD_STP_CK : 0) | \
225 ((sd_srcfc) << 2) | \
230 struct sd_div_table {
237 const struct sd_div_table *div_table;
238 struct cpg_simple_notifier csn;
239 unsigned int div_num;
240 unsigned int cur_div_idx;
245 * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
246 *-------------------------------------------------------------------
247 * 0 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
248 * 0 0 1 (2) 1 (4) 8 : SDR50
249 * 1 0 2 (4) 1 (4) 16 : HS / SDR25
250 * 1 0 3 (8) 1 (4) 32 : NS / SDR12
251 * 1 0 4 (16) 1 (4) 64
253 * 0 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
256 * 1 0 4 (16) 0 (2) 32
258 * NOTE: There is a quirk option to ignore the first row of the dividers
259 * table when searching for suitable settings. This is because HS400 on
260 * early ES versions of H3 and M3-W requires a specific setting to work.
262 static const struct sd_div_table cpg_sd_div_table[] = {
263 /* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
264 CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4),
265 CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8),
266 CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16),
267 CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32),
268 CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64),
269 CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2),
270 CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4),
271 CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8),
272 CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16),
273 CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32),
276 #define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
278 static int cpg_sd_clock_enable(struct clk_hw *hw)
280 struct sd_clock *clock = to_sd_clock(hw);
282 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
283 clock->div_table[clock->cur_div_idx].val &
289 static void cpg_sd_clock_disable(struct clk_hw *hw)
291 struct sd_clock *clock = to_sd_clock(hw);
293 cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
296 static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
298 struct sd_clock *clock = to_sd_clock(hw);
300 return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
303 static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
304 unsigned long parent_rate)
306 struct sd_clock *clock = to_sd_clock(hw);
308 return DIV_ROUND_CLOSEST(parent_rate,
309 clock->div_table[clock->cur_div_idx].div);
312 static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
314 unsigned long parent_rate)
316 unsigned long calc_rate, diff, diff_min = ULONG_MAX;
317 unsigned int i, best_div = 0;
319 for (i = 0; i < clock->div_num; i++) {
320 calc_rate = DIV_ROUND_CLOSEST(parent_rate,
321 clock->div_table[i].div);
322 diff = calc_rate > rate ? calc_rate - rate : rate - calc_rate;
323 if (diff < diff_min) {
324 best_div = clock->div_table[i].div;
332 static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
333 unsigned long *parent_rate)
335 struct sd_clock *clock = to_sd_clock(hw);
336 unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate);
338 return DIV_ROUND_CLOSEST(*parent_rate, div);
341 static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
342 unsigned long parent_rate)
344 struct sd_clock *clock = to_sd_clock(hw);
345 unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
348 for (i = 0; i < clock->div_num; i++)
349 if (div == clock->div_table[i].div)
352 if (i >= clock->div_num)
355 clock->cur_div_idx = i;
357 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
358 clock->div_table[i].val &
359 (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
364 static const struct clk_ops cpg_sd_clock_ops = {
365 .enable = cpg_sd_clock_enable,
366 .disable = cpg_sd_clock_disable,
367 .is_enabled = cpg_sd_clock_is_enabled,
368 .recalc_rate = cpg_sd_clock_recalc_rate,
369 .round_rate = cpg_sd_clock_round_rate,
370 .set_rate = cpg_sd_clock_set_rate,
373 static u32 cpg_quirks __initdata;
375 #define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
376 #define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
377 #define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
379 static struct clk * __init cpg_sd_clk_register(const char *name,
380 void __iomem *base, unsigned int offset, const char *parent_name,
381 struct raw_notifier_head *notifiers)
383 struct clk_init_data init;
384 struct sd_clock *clock;
388 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
390 return ERR_PTR(-ENOMEM);
393 init.ops = &cpg_sd_clock_ops;
394 init.flags = CLK_SET_RATE_PARENT;
395 init.parent_names = &parent_name;
396 init.num_parents = 1;
398 clock->csn.reg = base + offset;
399 clock->hw.init = &init;
400 clock->div_table = cpg_sd_div_table;
401 clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
403 if (cpg_quirks & SD_SKIP_FIRST) {
408 val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
409 val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
410 writel(val, clock->csn.reg);
412 clk = clk_register(NULL, &clock->hw);
416 cpg_simple_notifier_register(notifiers, &clock->csn);
425 struct clk_divider div;
426 struct clk_gate gate;
428 * One notifier covers both RPC and RPCD2 clocks as they are both
429 * controlled by the same RPCCKCR register...
431 struct cpg_simple_notifier csn;
434 static const struct clk_div_table cpg_rpcsrc_div_table[] = {
435 { 2, 5 }, { 3, 6 }, { 0, 0 },
438 static const struct clk_div_table cpg_rpc_div_table[] = {
439 { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
442 static struct clk * __init cpg_rpc_clk_register(const char *name,
443 void __iomem *base, const char *parent_name,
444 struct raw_notifier_head *notifiers)
446 struct rpc_clock *rpc;
449 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
451 return ERR_PTR(-ENOMEM);
453 rpc->div.reg = base + CPG_RPCCKCR;
455 rpc->div.table = cpg_rpc_div_table;
456 rpc->div.lock = &cpg_lock;
458 rpc->gate.reg = base + CPG_RPCCKCR;
459 rpc->gate.bit_idx = 8;
460 rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
461 rpc->gate.lock = &cpg_lock;
463 rpc->csn.reg = base + CPG_RPCCKCR;
465 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
466 &rpc->div.hw, &clk_divider_ops,
467 &rpc->gate.hw, &clk_gate_ops, 0);
473 cpg_simple_notifier_register(notifiers, &rpc->csn);
478 struct clk_fixed_factor fixed;
479 struct clk_gate gate;
482 static struct clk * __init cpg_rpcd2_clk_register(const char *name,
484 const char *parent_name)
486 struct rpcd2_clock *rpcd2;
489 rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
491 return ERR_PTR(-ENOMEM);
493 rpcd2->fixed.mult = 1;
494 rpcd2->fixed.div = 2;
496 rpcd2->gate.reg = base + CPG_RPCCKCR;
497 rpcd2->gate.bit_idx = 9;
498 rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
499 rpcd2->gate.lock = &cpg_lock;
501 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
502 &rpcd2->fixed.hw, &clk_fixed_factor_ops,
503 &rpcd2->gate.hw, &clk_gate_ops, 0);
511 static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
512 static unsigned int cpg_clk_extalr __initdata;
513 static u32 cpg_mode __initdata;
515 static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
517 .soc_id = "r8a7795", .revision = "ES1.0",
518 .data = (void *)(PLL_ERRATA | RCKCR_CKSEL | SD_SKIP_FIRST),
521 .soc_id = "r8a7795", .revision = "ES1.*",
522 .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
525 .soc_id = "r8a7795", .revision = "ES2.0",
526 .data = (void *)SD_SKIP_FIRST,
529 .soc_id = "r8a7796", .revision = "ES1.0",
530 .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
533 .soc_id = "r8a7796", .revision = "ES1.1",
534 .data = (void *)SD_SKIP_FIRST,
539 struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
540 const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
541 struct clk **clks, void __iomem *base,
542 struct raw_notifier_head *notifiers)
544 const struct clk *parent;
545 unsigned int mult = 1;
546 unsigned int div = 1;
549 parent = clks[core->parent & 0xffff]; /* some types use high bits */
551 return ERR_CAST(parent);
553 switch (core->type) {
554 case CLK_TYPE_GEN3_MAIN:
555 div = cpg_pll_config->extal_div;
558 case CLK_TYPE_GEN3_PLL0:
560 * PLL0 is a configurable multiplier clock. Register it as a
561 * fixed factor clock for now as there's no generic multiplier
562 * clock implementation and we currently have no need to change
563 * the multiplier value.
565 value = readl(base + CPG_PLL0CR);
566 mult = (((value >> 24) & 0x7f) + 1) * 2;
567 if (cpg_quirks & PLL_ERRATA)
571 case CLK_TYPE_GEN3_PLL1:
572 mult = cpg_pll_config->pll1_mult;
573 div = cpg_pll_config->pll1_div;
576 case CLK_TYPE_GEN3_PLL2:
578 * PLL2 is a configurable multiplier clock. Register it as a
579 * fixed factor clock for now as there's no generic multiplier
580 * clock implementation and we currently have no need to change
581 * the multiplier value.
583 value = readl(base + CPG_PLL2CR);
584 mult = (((value >> 24) & 0x7f) + 1) * 2;
585 if (cpg_quirks & PLL_ERRATA)
589 case CLK_TYPE_GEN3_PLL3:
590 mult = cpg_pll_config->pll3_mult;
591 div = cpg_pll_config->pll3_div;
594 case CLK_TYPE_GEN3_PLL4:
596 * PLL4 is a configurable multiplier clock. Register it as a
597 * fixed factor clock for now as there's no generic multiplier
598 * clock implementation and we currently have no need to change
599 * the multiplier value.
601 value = readl(base + CPG_PLL4CR);
602 mult = (((value >> 24) & 0x7f) + 1) * 2;
603 if (cpg_quirks & PLL_ERRATA)
607 case CLK_TYPE_GEN3_SD:
608 return cpg_sd_clk_register(core->name, base, core->offset,
609 __clk_get_name(parent), notifiers);
611 case CLK_TYPE_GEN3_R:
612 if (cpg_quirks & RCKCR_CKSEL) {
613 struct cpg_simple_notifier *csn;
615 csn = kzalloc(sizeof(*csn), GFP_KERNEL);
617 return ERR_PTR(-ENOMEM);
619 csn->reg = base + CPG_RCKCR;
623 * Only if EXTALR is populated, we switch to it.
625 value = readl(csn->reg) & 0x3f;
627 if (clk_get_rate(clks[cpg_clk_extalr])) {
628 parent = clks[cpg_clk_extalr];
629 value |= CPG_RCKCR_CKSEL;
632 writel(value, csn->reg);
633 cpg_simple_notifier_register(notifiers, csn);
637 /* Select parent clock of RCLK by MD28 */
638 if (cpg_mode & BIT(28))
639 parent = clks[cpg_clk_extalr];
642 case CLK_TYPE_GEN3_MDSEL:
644 * Clock selectable between two parents and two fixed dividers
647 if (cpg_mode & BIT(core->offset)) {
648 div = core->div & 0xffff;
650 parent = clks[core->parent >> 16];
652 return ERR_CAST(parent);
653 div = core->div >> 16;
658 case CLK_TYPE_GEN3_Z:
659 return cpg_z_clk_register(core->name, __clk_get_name(parent),
660 base, core->div, core->offset);
662 case CLK_TYPE_GEN3_OSC:
664 * Clock combining OSC EXTAL predivider and a fixed divider
666 div = cpg_pll_config->osc_prediv * core->div;
669 case CLK_TYPE_GEN3_RCKSEL:
671 * Clock selectable between two parents and two fixed dividers
674 if (readl(base + CPG_RCKCR) & CPG_RCKCR_CKSEL) {
675 div = core->div & 0xffff;
677 parent = clks[core->parent >> 16];
679 return ERR_CAST(parent);
680 div = core->div >> 16;
684 case CLK_TYPE_GEN3_RPCSRC:
685 return clk_register_divider_table(NULL, core->name,
686 __clk_get_name(parent), 0,
687 base + CPG_RPCCKCR, 3, 2, 0,
688 cpg_rpcsrc_div_table,
691 case CLK_TYPE_GEN3_RPC:
692 return cpg_rpc_clk_register(core->name, base,
693 __clk_get_name(parent), notifiers);
695 case CLK_TYPE_GEN3_RPCD2:
696 return cpg_rpcd2_clk_register(core->name, base,
697 __clk_get_name(parent));
700 return ERR_PTR(-EINVAL);
703 return clk_register_fixed_factor(NULL, core->name,
704 __clk_get_name(parent), 0, mult, div);
707 int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
708 unsigned int clk_extalr, u32 mode)
710 const struct soc_device_attribute *attr;
712 cpg_pll_config = config;
713 cpg_clk_extalr = clk_extalr;
715 attr = soc_device_match(cpg_quirks_match);
717 cpg_quirks = (uintptr_t)attr->data;
718 pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
720 spin_lock_init(&cpg_lock);