Merge tag 'imx-fixes-5.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / dc / dcn10 / dcn10_clk_mgr.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dcn10_clk_mgr.h"
27
28 #include "reg_helper.h"
29 #include "core_types.h"
30
31 #define TO_DCE_CLK_MGR(clocks)\
32         container_of(clocks, struct dce_clk_mgr, base)
33
34 #define REG(reg) \
35         (clk_mgr_dce->regs->reg)
36
37 #undef FN
38 #define FN(reg_name, field_name) \
39         clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
40
41 #define CTX \
42         clk_mgr_dce->base.ctx
43 #define DC_LOGGER \
44         clk_mgr->ctx->logger
45
46 void dcn1_pplib_apply_display_requirements(
47         struct dc *dc,
48         struct dc_state *context)
49 {
50         struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
51
52         pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
53         pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
54         pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
55         pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
56         pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
57         pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
58         dce110_fill_display_configs(context, pp_display_cfg);
59
60         dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
61 }
62
63 static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
64 {
65         bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
66         bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
67         int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
68         bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
69
70         /* increase clock, looking for div is 0 for current, request div is 1*/
71         if (dispclk_increase) {
72                 /* already divided by 2, no need to reach target clk with 2 steps*/
73                 if (cur_dpp_div)
74                         return new_clocks->dispclk_khz;
75
76                 /* request disp clk is lower than maximum supported dpp clk,
77                  * no need to reach target clk with two steps.
78                  */
79                 if (new_clocks->dispclk_khz <= disp_clk_threshold)
80                         return new_clocks->dispclk_khz;
81
82                 /* target dpp clk not request divided by 2, still within threshold */
83                 if (!request_dpp_div)
84                         return new_clocks->dispclk_khz;
85
86         } else {
87                 /* decrease clock, looking for current dppclk divided by 2,
88                  * request dppclk not divided by 2.
89                  */
90
91                 /* current dpp clk not divided by 2, no need to ramp*/
92                 if (!cur_dpp_div)
93                         return new_clocks->dispclk_khz;
94
95                 /* current disp clk is lower than current maximum dpp clk,
96                  * no need to ramp
97                  */
98                 if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
99                         return new_clocks->dispclk_khz;
100
101                 /* request dpp clk need to be divided by 2 */
102                 if (request_dpp_div)
103                         return new_clocks->dispclk_khz;
104         }
105
106         return disp_clk_threshold;
107 }
108
109 static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
110 {
111         struct dc *dc = clk_mgr->ctx->dc;
112         int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
113         bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
114         int i;
115
116         /* set disp clk to dpp clk threshold */
117         dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
118
119         /* update request dpp clk division option */
120         for (i = 0; i < dc->res_pool->pipe_count; i++) {
121                 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
122
123                 if (!pipe_ctx->plane_state)
124                         continue;
125
126                 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
127                                 pipe_ctx->plane_res.dpp,
128                                 request_dpp_div,
129                                 true);
130         }
131
132         /* If target clk not same as dppclk threshold, set to target clock */
133         if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
134                 dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
135
136         clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
137         clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
138         clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
139 }
140
141 static int get_active_display_cnt(
142                 struct dc *dc,
143                 struct dc_state *context)
144 {
145         int i, display_count;
146
147         display_count = 0;
148         for (i = 0; i < context->stream_count; i++) {
149                 const struct dc_stream_state *stream = context->streams[i];
150
151                 /*
152                  * Only notify active stream or virtual stream.
153                  * Need to notify virtual stream to work around
154                  * headless case. HPD does not fire when system is in
155                  * S0i2.
156                  */
157                 if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
158                         display_count++;
159         }
160
161         return display_count;
162 }
163
164 static void notify_deep_sleep_dcfclk_to_smu(
165                 struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
166 {
167         int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
168         /*
169          * if function pointer not set up, this message is
170          * sent as part of pplib_apply_display_requirements.
171          * So just return.
172          */
173         if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
174                 return;
175
176         min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
177         pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
178 }
179
180 static void notify_hard_min_dcfclk_to_smu(
181                 struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
182 {
183         int min_dcf_clk_mhz; //minimum required DCF clock in mhz
184
185         /*
186          * if function pointer not set up, this message is
187          * sent as part of pplib_apply_display_requirements.
188          * So just return.
189          */
190         if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
191                 return;
192
193         min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
194
195         pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
196 }
197
198 static void notify_hard_min_fclk_to_smu(
199                 struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz)
200 {
201         int min_f_clk_mhz; //minimum required F clock in mhz
202
203         /*
204          * if function pointer not set up, this message is
205          * sent as part of pplib_apply_display_requirements.
206          * So just return.
207          */
208         if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq)
209                 return;
210
211         min_f_clk_mhz = min_f_clk_khz / 1000;
212
213         pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz);
214 }
215
216 static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
217                         struct dc_state *context,
218                         bool safe_to_lower)
219 {
220         struct dc *dc = clk_mgr->ctx->dc;
221         struct dc_clocks *new_clocks = &context->bw.dcn.clk;
222         struct pp_smu_display_requirement_rv *smu_req_cur =
223                         &dc->res_pool->pp_smu_req;
224         struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
225         struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
226         uint32_t requested_dcf_clock_in_khz = 0;
227         bool send_request_to_increase = false;
228         bool send_request_to_lower = false;
229         int display_count;
230
231         bool enter_display_off = false;
232
233         display_count = get_active_display_cnt(dc, context);
234
235         if (display_count == 0)
236                 enter_display_off = true;
237
238         if (enter_display_off == safe_to_lower) {
239                 /*
240                  * Notify SMU active displays
241                  * if function pointer not set up, this message is
242                  * sent as part of pplib_apply_display_requirements.
243                  */
244                 if (pp_smu->set_display_count)
245                         pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
246                 else
247                         smu_req.display_count = display_count;
248
249         }
250
251         if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
252                         || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
253                         || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
254                         || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
255                 send_request_to_increase = true;
256
257         if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
258                 clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
259
260                 send_request_to_lower = true;
261         }
262
263         // F Clock
264         if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
265                 clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
266                 smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
267
268                 notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
269
270                 send_request_to_lower = true;
271         }
272
273         //DCF Clock
274         if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
275                 clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
276                 smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
277
278                 send_request_to_lower = true;
279         }
280
281         if (should_set_clock(safe_to_lower,
282                         new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
283                 clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
284                 smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000;
285
286                 send_request_to_lower = true;
287         }
288
289         /* make sure dcf clk is before dpp clk to
290          * make sure we have enough voltage to run dpp clk
291          */
292         if (send_request_to_increase) {
293                 /*use dcfclk to request voltage*/
294                 requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
295
296                 notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
297
298                 if (pp_smu->set_display_requirement)
299                         pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
300
301                 notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
302                 dcn1_pplib_apply_display_requirements(dc, context);
303         }
304
305         /* dcn1 dppclk is tied to dispclk */
306         /* program dispclk on = as a w/a for sleep resume clock ramping issues */
307         if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
308                         || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
309                 dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
310                 clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
311
312                 send_request_to_lower = true;
313         }
314
315         if (!send_request_to_increase && send_request_to_lower) {
316                 /*use dcfclk to request voltage*/
317                 requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
318
319                 notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
320
321                 if (pp_smu->set_display_requirement)
322                         pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
323
324                 notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
325                 dcn1_pplib_apply_display_requirements(dc, context);
326         }
327
328
329         *smu_req_cur = smu_req;
330 }
331 static const struct clk_mgr_funcs dcn1_funcs = {
332         .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
333         .update_clocks = dcn1_update_clocks
334 };
335 struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
336 {
337         struct dc_debug_options *debug = &ctx->dc->debug;
338         struct dc_bios *bp = ctx->dc_bios;
339         struct dc_firmware_info fw_info = { { 0 } };
340         struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
341
342         if (clk_mgr_dce == NULL) {
343                 BREAK_TO_DEBUGGER();
344                 return NULL;
345         }
346
347         clk_mgr_dce->base.ctx = ctx;
348         clk_mgr_dce->base.funcs = &dcn1_funcs;
349
350         clk_mgr_dce->dfs_bypass_disp_clk = 0;
351
352         clk_mgr_dce->dprefclk_ss_percentage = 0;
353         clk_mgr_dce->dprefclk_ss_divider = 1000;
354         clk_mgr_dce->ss_on_dprefclk = false;
355
356         clk_mgr_dce->dprefclk_khz = 600000;
357         if (bp->integrated_info)
358                 clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
359         if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
360                 bp->funcs->get_firmware_info(bp, &fw_info);
361                 clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
362                 if (clk_mgr_dce->dentist_vco_freq_khz == 0)
363                         clk_mgr_dce->dentist_vco_freq_khz = 3600000;
364         }
365
366         if (!debug->disable_dfs_bypass && bp->integrated_info)
367                 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
368                         clk_mgr_dce->dfs_bypass_enabled = true;
369
370         dce_clock_read_ss_info(clk_mgr_dce);
371
372         return &clk_mgr_dce->base;
373 }
374
375