Merge tag 'reset-for-v5.3' of git://git.pengutronix.de/git/pza/linux into arm/drivers
[sfrench/cifs-2.6.git] / drivers / clk / imx / clk-busy.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2012 Freescale Semiconductor, Inc.
4  * Copyright 2012 Linaro Ltd.
5  */
6
7 #include <linux/clk.h>
8 #include <linux/clk-provider.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 #include <linux/jiffies.h>
12 #include <linux/err.h>
13 #include "clk.h"
14
15 static int clk_busy_wait(void __iomem *reg, u8 shift)
16 {
17         unsigned long timeout = jiffies + msecs_to_jiffies(10);
18
19         while (readl_relaxed(reg) & (1 << shift))
20                 if (time_after(jiffies, timeout))
21                         return -ETIMEDOUT;
22
23         return 0;
24 }
25
26 struct clk_busy_divider {
27         struct clk_divider div;
28         const struct clk_ops *div_ops;
29         void __iomem *reg;
30         u8 shift;
31 };
32
33 static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
34 {
35         struct clk_divider *div = to_clk_divider(hw);
36
37         return container_of(div, struct clk_busy_divider, div);
38 }
39
40 static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
41                                                   unsigned long parent_rate)
42 {
43         struct clk_busy_divider *busy = to_clk_busy_divider(hw);
44
45         return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
46 }
47
48 static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
49                                         unsigned long *prate)
50 {
51         struct clk_busy_divider *busy = to_clk_busy_divider(hw);
52
53         return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
54 }
55
56 static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
57                 unsigned long parent_rate)
58 {
59         struct clk_busy_divider *busy = to_clk_busy_divider(hw);
60         int ret;
61
62         ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
63         if (!ret)
64                 ret = clk_busy_wait(busy->reg, busy->shift);
65
66         return ret;
67 }
68
69 static const struct clk_ops clk_busy_divider_ops = {
70         .recalc_rate = clk_busy_divider_recalc_rate,
71         .round_rate = clk_busy_divider_round_rate,
72         .set_rate = clk_busy_divider_set_rate,
73 };
74
75 struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
76                                  void __iomem *reg, u8 shift, u8 width,
77                                  void __iomem *busy_reg, u8 busy_shift)
78 {
79         struct clk_busy_divider *busy;
80         struct clk *clk;
81         struct clk_init_data init;
82
83         busy = kzalloc(sizeof(*busy), GFP_KERNEL);
84         if (!busy)
85                 return ERR_PTR(-ENOMEM);
86
87         busy->reg = busy_reg;
88         busy->shift = busy_shift;
89
90         busy->div.reg = reg;
91         busy->div.shift = shift;
92         busy->div.width = width;
93         busy->div.lock = &imx_ccm_lock;
94         busy->div_ops = &clk_divider_ops;
95
96         init.name = name;
97         init.ops = &clk_busy_divider_ops;
98         init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
99         init.parent_names = &parent_name;
100         init.num_parents = 1;
101
102         busy->div.hw.init = &init;
103
104         clk = clk_register(NULL, &busy->div.hw);
105         if (IS_ERR(clk))
106                 kfree(busy);
107
108         return clk;
109 }
110
111 struct clk_busy_mux {
112         struct clk_mux mux;
113         const struct clk_ops *mux_ops;
114         void __iomem *reg;
115         u8 shift;
116 };
117
118 static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
119 {
120         struct clk_mux *mux = to_clk_mux(hw);
121
122         return container_of(mux, struct clk_busy_mux, mux);
123 }
124
125 static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
126 {
127         struct clk_busy_mux *busy = to_clk_busy_mux(hw);
128
129         return busy->mux_ops->get_parent(&busy->mux.hw);
130 }
131
132 static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
133 {
134         struct clk_busy_mux *busy = to_clk_busy_mux(hw);
135         int ret;
136
137         ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
138         if (!ret)
139                 ret = clk_busy_wait(busy->reg, busy->shift);
140
141         return ret;
142 }
143
144 static const struct clk_ops clk_busy_mux_ops = {
145         .get_parent = clk_busy_mux_get_parent,
146         .set_parent = clk_busy_mux_set_parent,
147 };
148
149 struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
150                              u8 width, void __iomem *busy_reg, u8 busy_shift,
151                              const char * const *parent_names, int num_parents)
152 {
153         struct clk_busy_mux *busy;
154         struct clk *clk;
155         struct clk_init_data init;
156
157         busy = kzalloc(sizeof(*busy), GFP_KERNEL);
158         if (!busy)
159                 return ERR_PTR(-ENOMEM);
160
161         busy->reg = busy_reg;
162         busy->shift = busy_shift;
163
164         busy->mux.reg = reg;
165         busy->mux.shift = shift;
166         busy->mux.mask = BIT(width) - 1;
167         busy->mux.lock = &imx_ccm_lock;
168         busy->mux_ops = &clk_mux_ops;
169
170         init.name = name;
171         init.ops = &clk_busy_mux_ops;
172         init.flags = CLK_IS_CRITICAL;
173         init.parent_names = parent_names;
174         init.num_parents = num_parents;
175
176         busy->mux.hw.init = &init;
177
178         clk = clk_register(NULL, &busy->mux.hw);
179         if (IS_ERR(clk))
180                 kfree(busy);
181
182         return clk;
183 }