treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
[sfrench/cifs-2.6.git] / drivers / clk / sunxi-ng / ccu_mult.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Maxime Ripard
4  * Maxime Ripard <maxime.ripard@free-electrons.com>
5  */
6
7 #include <linux/clk-provider.h>
8 #include <linux/io.h>
9
10 #include "ccu_gate.h"
11 #include "ccu_mult.h"
12
13 struct _ccu_mult {
14         unsigned long   mult, min, max;
15 };
16
17 static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
18                                struct _ccu_mult *mult)
19 {
20         int _mult;
21
22         _mult = rate / parent;
23         if (_mult < mult->min)
24                 _mult = mult->min;
25
26         if (_mult > mult->max)
27                 _mult = mult->max;
28
29         mult->mult = _mult;
30 }
31
32 static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
33                                          struct clk_hw *parent,
34                                          unsigned long *parent_rate,
35                                          unsigned long rate,
36                                          void *data)
37 {
38         struct ccu_mult *cm = data;
39         struct _ccu_mult _cm;
40
41         _cm.min = cm->mult.min;
42
43         if (cm->mult.max)
44                 _cm.max = cm->mult.max;
45         else
46                 _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
47
48         ccu_mult_find_best(*parent_rate, rate, &_cm);
49
50         return *parent_rate * _cm.mult;
51 }
52
53 static void ccu_mult_disable(struct clk_hw *hw)
54 {
55         struct ccu_mult *cm = hw_to_ccu_mult(hw);
56
57         return ccu_gate_helper_disable(&cm->common, cm->enable);
58 }
59
60 static int ccu_mult_enable(struct clk_hw *hw)
61 {
62         struct ccu_mult *cm = hw_to_ccu_mult(hw);
63
64         return ccu_gate_helper_enable(&cm->common, cm->enable);
65 }
66
67 static int ccu_mult_is_enabled(struct clk_hw *hw)
68 {
69         struct ccu_mult *cm = hw_to_ccu_mult(hw);
70
71         return ccu_gate_helper_is_enabled(&cm->common, cm->enable);
72 }
73
74 static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw,
75                                         unsigned long parent_rate)
76 {
77         struct ccu_mult *cm = hw_to_ccu_mult(hw);
78         unsigned long val;
79         u32 reg;
80
81         if (ccu_frac_helper_is_enabled(&cm->common, &cm->frac))
82                 return ccu_frac_helper_read_rate(&cm->common, &cm->frac);
83
84         reg = readl(cm->common.base + cm->common.reg);
85         val = reg >> cm->mult.shift;
86         val &= (1 << cm->mult.width) - 1;
87
88         parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
89                                                   parent_rate);
90
91         return parent_rate * (val + cm->mult.offset);
92 }
93
94 static int ccu_mult_determine_rate(struct clk_hw *hw,
95                                 struct clk_rate_request *req)
96 {
97         struct ccu_mult *cm = hw_to_ccu_mult(hw);
98
99         return ccu_mux_helper_determine_rate(&cm->common, &cm->mux,
100                                              req, ccu_mult_round_rate, cm);
101 }
102
103 static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
104                            unsigned long parent_rate)
105 {
106         struct ccu_mult *cm = hw_to_ccu_mult(hw);
107         struct _ccu_mult _cm;
108         unsigned long flags;
109         u32 reg;
110
111         if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) {
112                 ccu_frac_helper_enable(&cm->common, &cm->frac);
113
114                 return ccu_frac_helper_set_rate(&cm->common, &cm->frac,
115                                                 rate, cm->lock);
116         } else {
117                 ccu_frac_helper_disable(&cm->common, &cm->frac);
118         }
119
120         parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
121                                                   parent_rate);
122
123         _cm.min = cm->mult.min;
124
125         if (cm->mult.max)
126                 _cm.max = cm->mult.max;
127         else
128                 _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
129
130         ccu_mult_find_best(parent_rate, rate, &_cm);
131
132         spin_lock_irqsave(cm->common.lock, flags);
133
134         reg = readl(cm->common.base + cm->common.reg);
135         reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
136         reg |= ((_cm.mult - cm->mult.offset) << cm->mult.shift);
137
138         writel(reg, cm->common.base + cm->common.reg);
139
140         spin_unlock_irqrestore(cm->common.lock, flags);
141
142         ccu_helper_wait_for_lock(&cm->common, cm->lock);
143
144         return 0;
145 }
146
147 static u8 ccu_mult_get_parent(struct clk_hw *hw)
148 {
149         struct ccu_mult *cm = hw_to_ccu_mult(hw);
150
151         return ccu_mux_helper_get_parent(&cm->common, &cm->mux);
152 }
153
154 static int ccu_mult_set_parent(struct clk_hw *hw, u8 index)
155 {
156         struct ccu_mult *cm = hw_to_ccu_mult(hw);
157
158         return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index);
159 }
160
161 const struct clk_ops ccu_mult_ops = {
162         .disable        = ccu_mult_disable,
163         .enable         = ccu_mult_enable,
164         .is_enabled     = ccu_mult_is_enabled,
165
166         .get_parent     = ccu_mult_get_parent,
167         .set_parent     = ccu_mult_set_parent,
168
169         .determine_rate = ccu_mult_determine_rate,
170         .recalc_rate    = ccu_mult_recalc_rate,
171         .set_rate       = ccu_mult_set_rate,
172 };